text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Return a float number representing the Julian Date for the given <END_TASK> <USER_TASK:> Description: def struct_time_to_jd(st): """ Return a float number representing the Julian Date for the given `struct_time`. NOTE: extra fields `tm_wday`, `tm_yday`, and `tm_isdst` are ignored. """
year, month, day = st[:3] hours, minutes, seconds = st[3:6] # Convert time of day to fraction of day day += jdutil.hmsm_to_days(hours, minutes, seconds) return jdutil.date_to_jd(year, month, day)
<SYSTEM_TASK:> Return a `struct_time` converted from a Julian Date float number. <END_TASK> <USER_TASK:> Description: def jd_to_struct_time(jd): """ Return a `struct_time` converted from a Julian Date float number. WARNING: Conversion to then from Julian Date value to `struct_time` can be inaccurate and lose or gain time, especially for BC (negative) years. NOTE: extra fields `tm_wday`, `tm_yday`, and `tm_isdst` are set to default values, not real ones. """
year, month, day = jdutil.jd_to_date(jd) # Convert time of day from fraction of day day_fraction = day - int(day) hour, minute, second, ms = jdutil.days_to_hmsm(day_fraction) day = int(day) # This conversion can return negative values for items we do not want to be # negative: month, day, hour, minute, second. year, month, day, hour, minute, second = _roll_negative_time_fields( year, month, day, hour, minute, second) return struct_time( [year, month, day, hour, minute, second] + TIME_EMPTY_EXTRAS )
<SYSTEM_TASK:> Get example by schema object <END_TASK> <USER_TASK:> Description: def get_example_by_schema(cls, schema, ignored_schemas=None, paths=None, name=''): """ Get example by schema object :param Schema schema: current schema :param list ignored_schemas: list of previous schemas for avoid circular references :param list paths: list object paths (ex. #/definitions/Model.property) If nested schemas exists, custom examples checks in order from paths :param str name: name of property schema object if exists :return: dict or list (if schema is array) """
if schema.schema_example: return schema.schema_example if ignored_schemas is None: ignored_schemas = [] if paths is None: paths = [] if name: paths = list(map(lambda path: '.'.join((path, name)), paths)) if schema.ref_path: paths.append(schema.ref_path) if schema.schema_id in ignored_schemas: result = [] if schema.is_array else {} else: schemas = ignored_schemas + [schema.schema_id] kwargs = dict( ignored_schemas=schemas, paths=paths ) if schema.is_array: result = cls.get_example_for_array( schema.item, **kwargs) elif schema.type in PRIMITIVE_TYPES: result = cls.get_example_value_for_primitive_type( schema.type, schema.raw, schema.type_format, paths=paths ) elif schema.all_of: result = {} for _schema_id in schema.all_of: schema = SchemaObjects.get(_schema_id) result.update(cls.get_example_by_schema(schema, **kwargs)) else: result = cls.get_example_for_object( schema.properties, nested=schema.nested_schemas, **kwargs) return result
<SYSTEM_TASK:> Get example for body parameter example by operation <END_TASK> <USER_TASK:> Description: def get_body_example(cls, operation): """ Get example for body parameter example by operation :param Operation operation: operation object """
path = "#/paths/'{0.path}'/{0.method}/parameters/{name}".format( operation, name=operation.body.name or 'body') return cls.get_example_by_schema(operation.body, paths=[path])
<SYSTEM_TASK:> Get example for response object by operation object <END_TASK> <USER_TASK:> Description: def get_response_example(cls, operation, response): """ Get example for response object by operation object :param Operation operation: operation object :param Response response: response object """
path = "#/paths/'{}'/{}/responses/{}".format( operation.path, operation.method, response.name) kwargs = dict(paths=[path]) if response.type in PRIMITIVE_TYPES: result = cls.get_example_value_for_primitive_type( response.type, response.properties, response.type_format, **kwargs) else: schema = SchemaObjects.get(response.type) result = cls.get_example_by_schema(schema, **kwargs) return result
<SYSTEM_TASK:> Get example for header object <END_TASK> <USER_TASK:> Description: def get_header_example(cls, header): """ Get example for header object :param Header header: Header object :return: example :rtype: dict """
if header.is_array: result = cls.get_example_for_array(header.item) else: example_method = getattr(cls, '{}_example'.format(header.type)) result = example_method(header.properties, header.type_format) return {header.name: result}
<SYSTEM_TASK:> Get example for property <END_TASK> <USER_TASK:> Description: def get_property_example(cls, property_, nested=None, **kw): """ Get example for property :param dict property_: :param set nested: :return: example value """
paths = kw.get('paths', []) name = kw.get('name', '') result = None if name and paths: paths = list(map(lambda path: '.'.join((path, name)), paths)) result, path = cls._get_custom_example(paths) if result is not None and property_['type'] in PRIMITIVE_TYPES: cls._example_validate( path, result, property_['type'], property_['type_format']) return result if SchemaObjects.contains(property_['type']): schema = SchemaObjects.get(property_['type']) if result is not None: if schema.is_array: if not isinstance(result, list): result = [result] * cls.EXAMPLE_ARRAY_ITEMS_COUNT else: if isinstance(result, list): cls.logger.warning( 'Example type mismatch in path {}'.format(schema.ref_path)) else: result = cls.get_example_by_schema(schema, **kw) if (not result) and schema.nested_schemas: for _schema_id in schema.nested_schemas: _schema = SchemaObjects.get(_schema_id) if _schema: if isinstance(_schema, SchemaMapWrapper): result[_schema.name] = cls.get_example_by_schema(_schema, **kw) elif _schema.nested_schemas: for _schema__id in _schema.nested_schemas: _schema_ = SchemaObjects.get(_schema__id) if isinstance(_schema_, SchemaMapWrapper): result[_schema.name] = cls.get_example_by_schema(_schema_, **kw) else: result = cls.get_example_value_for_primitive_type( property_['type'], property_['type_properties'], property_['type_format'], **kw ) return result
<SYSTEM_TASK:> Create a directory tree for the resized assets <END_TASK> <USER_TASK:> Description: def mkres(self): """ Create a directory tree for the resized assets """
for d in DENSITY_TYPES: if d == 'ldpi' and not self.ldpi: continue # skip ldpi if d == 'xxxhdpi' and not self.xxxhdpi: continue # skip xxxhdpi try: path = os.path.join(self.out, 'res/drawable-%s' % d) os.makedirs(path, 0o755) except OSError: pass
<SYSTEM_TASK:> Return the new image size for the target density <END_TASK> <USER_TASK:> Description: def get_size_for_density(self, size, target_density): """ Return the new image size for the target density """
current_size = size current_density = DENSITY_MAP[self.source_density] target_density = DENSITY_MAP[target_density] return int(current_size * (target_density / current_density))
<SYSTEM_TASK:> Generate assets from the given image and path in case you've already <END_TASK> <USER_TASK:> Description: def resize_image(self, path, im): """ Generate assets from the given image and path in case you've already called Image.open """
# Get the original filename _, filename = os.path.split(path) # Generate the new filename filename = self.get_safe_filename(filename) filename = '%s%s' % (self.prefix if self.prefix else '', filename) # Get the original image size w, h = im.size # Generate assets from the source image for d in DENSITY_TYPES: if d == 'ldpi' and not self.ldpi: continue # skip ldpi if d == 'xxxhdpi' and not self.xxxhdpi: continue # skip xxxhdpi out_file = os.path.join(self.out, self.get_out_for_density(d), filename) if d == self.source_density: im.save(out_file, quality=self.image_quality) else: size = (self.get_size_for_density(w, d), self.get_size_for_density(h, d)) im.resize(size, self.image_filter).save(out_file, quality=self.image_quality)
<SYSTEM_TASK:> message should be a dict recognized by the Stitch Import API. <END_TASK> <USER_TASK:> Description: def push(self, message, callback_arg=None): """message should be a dict recognized by the Stitch Import API. See https://www.stitchdata.com/docs/integrations/import-api. """
if message['action'] == 'upsert': message.setdefault('key_names', self.key_names) message['client_id'] = self.client_id message.setdefault('table_name', self.table_name) self._add_message(message, callback_arg) batch = self._take_batch(self.target_messages_per_batch) if batch: self._send_batch(batch)
<SYSTEM_TASK:> Get parameters list by location <END_TASK> <USER_TASK:> Description: def get_parameters_by_location(self, locations=None, excludes=None): """ Get parameters list by location :param locations: list of locations :type locations: list or None :param excludes: list of excludes locations :type excludes: list or None :return: list of Parameter :rtype: list """
result = self.parameters if locations: result = filter(lambda x: x.location_in in locations, result) if excludes: result = filter(lambda x: x.location_in not in excludes, result) return list(result)
<SYSTEM_TASK:> Generate EDTF string equivalent of a given natural language date string. <END_TASK> <USER_TASK:> Description: def text_to_edtf(text): """ Generate EDTF string equivalent of a given natural language date string. """
if not text: return t = text.lower() # try parsing the whole thing result = text_to_edtf_date(t) if not result: # split by list delims and move fwd with the first thing that returns a non-empty string. # TODO: assemble multiple dates into a {} or [] structure. for split in [",", ";", "or"]: for list_item in t.split(split): # try parsing as an interval - split by '-' toks = list_item.split("-") if len(toks) == 2: d1 = toks[0].strip() d2 = toks[1].strip() # match looks from the beginning of the string, search # looks anywhere. if re.match(r'\d\D\b', d2): # 1-digit year partial e.g. 1868-9 if re.search(r'\b\d\d\d\d$', d1): # TODO: evaluate it and see if it's a year d2 = d1[-4:-1] + d2 elif re.match(r'\d\d\b', d2): # 2-digit year partial e.g. 1809-10 if re.search(r'\b\d\d\d\d$', d1): d2 = d1[-4:-2] + d2 else: century_range_match = re.search(r'\b(\d\d)(th|st|nd|rd|)-(\d\d)(th|st|nd|rd) [cC]', "%s-%s" % (d1,d2)) if century_range_match: g = century_range_match.groups() d1 = "%sC" % g[0] d2 = "%sC" % g[2] r1 = text_to_edtf_date(d1) r2 = text_to_edtf_date(d2) if r1 and r2: result = r1 + "/" + r2 return result # is it an either/or year "1838/1862" - that has a different # representation in EDTF. If it's 'both', then we use {}. If # it's 'or' then we use []. Assuming the latter for now. # This whole section could be more friendly. else: int_match = re.search(r"(\d\d\d\d)\/(\d\d\d\d)", list_item) if int_match: return "[%s, %s]" % (int_match.group(1), int_match.group(2)) result = text_to_edtf_date(list_item) if result: break if result: break is_before = re.findall(r'\bbefore\b', t) is_before = is_before or re.findall(r'\bearlier\b', t) is_after = re.findall(r'\bafter\b', t) is_after = is_after or re.findall(r'\bsince\b', t) is_after = is_after or re.findall(r'\blater\b', t) if is_before: result = u"unknown/%s" % result elif is_after: result = u"%s/unknown" % result return result
<SYSTEM_TASK:> Find current canonical representative equivalent to node. <END_TASK> <USER_TASK:> Description: def find(node): """Find current canonical representative equivalent to node. Adjust the parent pointer of each node along the way to the root to point directly at the root for inverse-Ackerman-fast access. """
if node.parent is None: return node root = node while root.parent is not None: root = root.parent parent = node while parent.parent is not root: grandparent = parent.parent parent.parent = root parent = grandparent return root
<SYSTEM_TASK:> Compute mapping from element to list of equivalent elements. <END_TASK> <USER_TASK:> Description: def classes(equivalences): """Compute mapping from element to list of equivalent elements. `equivalences` is an iterable of (x, y) tuples representing equivalences x ~ y. Returns an OrderedDict mapping each x to the list of elements equivalent to x. """
node = OrderedDict() def N(x): if x in node: return node[x] n = node[x] = Node(x) return n for x, y in equivalences: union(N(x), N(y)) eqclass = OrderedDict() for x, n in node.iteritems(): x_ = find(n).element if x_ not in eqclass: eqclass[x_] = [] eqclass[x_].append(x) eqclass[x] = eqclass[x_] return eqclass
<SYSTEM_TASK:> Change a string's characters from one base to another. <END_TASK> <USER_TASK:> Description: def changebase(string, frm, to, minlen=0): """ Change a string's characters from one base to another. Return the re-encoded string """
if frm == to: return lpad(string, get_code_string(frm)[0], minlen) return encode(decode(string, frm), to, minlen)
<SYSTEM_TASK:> Helper which instantiates the appropriate Engine and returns a Client <END_TASK> <USER_TASK:> Description: def get_CrossCatClient(client_type, **kwargs): """Helper which instantiates the appropriate Engine and returns a Client"""
client = None if client_type == 'local': import crosscat.LocalEngine as LocalEngine le = LocalEngine.LocalEngine(**kwargs) client = CrossCatClient(le) elif client_type == 'multiprocessing': import crosscat.MultiprocessingEngine as MultiprocessingEngine me = MultiprocessingEngine.MultiprocessingEngine(**kwargs) client = CrossCatClient(me) else: raise Exception('unknown client_type: %s' % client_type) return client
<SYSTEM_TASK:> Build the bigfloat library for in-place testing. <END_TASK> <USER_TASK:> Description: def build(python=PYTHON): """Build the bigfloat library for in-place testing."""
clean() local( "LIBRARY_PATH={library_path} CPATH={include_path} {python} " "setup.py build_ext --inplace".format( library_path=LIBRARY_PATH, include_path=INCLUDE_PATH, python=python, ))
<SYSTEM_TASK:> Synchronize the virtual blockchain state up until a given block. <END_TASK> <USER_TASK:> Description: def sync_virtualchain(blockchain_opts, last_block, state_engine, expected_snapshots={}, tx_filter=None ): """ Synchronize the virtual blockchain state up until a given block. Obtain the operation sequence from the blockchain, up to and including last_block. That is, go and fetch each block we haven't seen since the last call to this method, extract the operations from them, and record in the given working_dir where we left off while watching the blockchain. Store the state engine state, consensus snapshots, and last block to the working directory. Return True on success Return False if we're supposed to stop indexing Abort the program on error. The implementation should catch timeouts and connection errors """
rc = False start = datetime.datetime.now() while True: try: # advance state rc = indexer.StateEngine.build(blockchain_opts, last_block + 1, state_engine, expected_snapshots=expected_snapshots, tx_filter=tx_filter ) break except Exception, e: log.exception(e) log.error("Failed to synchronize chain; exiting to safety") os.abort() time_taken = "%s seconds" % (datetime.datetime.now() - start).seconds log.info(time_taken) return rc
<SYSTEM_TASK:> Pass along virtualchain-reserved fields to a virtualchain operation. <END_TASK> <USER_TASK:> Description: def virtualchain_set_opfields( op, **fields ): """ Pass along virtualchain-reserved fields to a virtualchain operation. This layer of indirection is meant to help with future compatibility, so virtualchain implementations do not try to set operation fields directly. """
# warn about unsupported fields for f in fields.keys(): if f not in indexer.RESERVED_KEYS: log.warning("Unsupported virtualchain field '%s'" % f) # propagate reserved fields for f in fields.keys(): if f in indexer.RESERVED_KEYS: op[f] = fields[f] return op
<SYSTEM_TASK:> Converts from a directory of tarballed ASCII ".samp" files to a single <END_TASK> <USER_TASK:> Description: def ascii2h5(dirname, output_fname): """ Converts from a directory of tarballed ASCII ".samp" files to a single HDF5 file. Essentially, converts from the original release format to a single HDF5 file. """
import tarfile import sys from glob import glob from contextlib import closing # The datatype that will be used to store extinction, A0 A0_dtype = 'float16' def load_samp_file(f, fname): # Parse filename fname_chunks = os.path.split(fname)[1].split('_') l = float(fname_chunks[0]) b = float(fname_chunks[1]) # Load ASCII data data_raw = np.loadtxt(f, dtype='float64') n_samples = data_raw.shape[1] - 1 n_dists = data_raw.shape[0] # Construct output dtype = [ ('dist', 'int32'), ('A0', A0_dtype, (n_samples,))] data = np.empty(n_dists, dtype=dtype) data['dist'][:] = data_raw[:,0] data['A0'][:,:] = data_raw[:,1:] return (l,b), data def process_tarball(tarball_fname): # Write to the progress bar print('.', end='') sys.stdout.flush() with closing(tarfile.open(tarball_fname, mode='r:gz')) as f_tar: fnames = f_tar.getnames() f = f_tar.extractfile(fnames[0]) (l,b), data = load_samp_file(f, fnames[0]) n_dists, n_samples = data['A0'].shape n_coords = len(fnames) dtype = [ ('l', 'float32'), ('b', 'float32'), ('dist', 'int32', (n_dists,)), ('A0', A0_dtype, (n_dists, n_samples))] data_combined = np.empty(n_coords, dtype=dtype) for k,fn in enumerate(fnames): # print('File {: >4d} of {:d}'.format(k+1, n_coords)) f = f_tar.extractfile(fn) (l,b), data = load_samp_file(f, fn) data_combined['l'][k] = l data_combined['b'][k] = b data_combined['dist'][k] = data['dist'] data_combined['A0'][k] = data['A0'] return data_combined def save_data(data, fname): with closing(h5py.File(fname, 'w')) as f: f.create_dataset( 'samples', data=data, chunks=True, compression='gzip', compression_opts=3) print('Progress: ', end='') sys.stdout.flush() tar_fname_list = glob(os.path.join(dirname, 'A_samp_*.tar.gz')) d = np.hstack([process_tarball(fn) for fn in tar_fname_list]) print('+', end='') sys.stdout.flush() save_data(d, output_fname) print('')
<SYSTEM_TASK:> Checks, if the dihedral defining atom is colinear. <END_TASK> <USER_TASK:> Description: def check_dihedral(self, construction_table): """Checks, if the dihedral defining atom is colinear. Checks for each index starting from the third row of the ``construction_table``, if the reference atoms are colinear. Args: construction_table (pd.DataFrame): Returns: list: A list of problematic indices. """
c_table = construction_table angles = self.get_angle_degrees(c_table.iloc[3:, :].values) problem_index = np.nonzero((175 < angles) | (angles < 5))[0] rename = dict(enumerate(c_table.index[3:])) problem_index = [rename[i] for i in problem_index] return problem_index
<SYSTEM_TASK:> Reindexe the dihedral defining atom if linear reference is used. <END_TASK> <USER_TASK:> Description: def correct_dihedral(self, construction_table, use_lookup=None): """Reindexe the dihedral defining atom if linear reference is used. Uses :meth:`~Cartesian.check_dihedral` to obtain the problematic indices. Args: construction_table (pd.DataFrame): use_lookup (bool): Use a lookup variable for :meth:`~chemcoord.Cartesian.get_bonds`. The default is specified in ``settings['defaults']['use_lookup']`` Returns: pd.DataFrame: Appropiately renamed construction table. """
if use_lookup is None: use_lookup = settings['defaults']['use_lookup'] problem_index = self.check_dihedral(construction_table) bond_dict = self._give_val_sorted_bond_dict(use_lookup=use_lookup) c_table = construction_table.copy() for i in problem_index: loc_i = c_table.index.get_loc(i) b, a, problem_d = c_table.loc[i, ['b', 'a', 'd']] try: c_table.loc[i, 'd'] = (bond_dict[a] - {b, a, problem_d} - set(c_table.index[loc_i:]))[0] except IndexError: visited = set(c_table.index[loc_i:]) | {b, a, problem_d} tmp_bond_dict = OrderedDict([(j, bond_dict[j] - visited) for j in bond_dict[problem_d]]) found = False while tmp_bond_dict and not found: new_tmp_bond_dict = OrderedDict() for new_d in tmp_bond_dict: if new_d in visited: continue angle = self.get_angle_degrees([b, a, new_d])[0] if 5 < angle < 175: found = True c_table.loc[i, 'd'] = new_d else: visited.add(new_d) for j in tmp_bond_dict[new_d]: new_tmp_bond_dict[j] = bond_dict[j] - visited tmp_bond_dict = new_tmp_bond_dict if not found: other_atoms = c_table.index[:loc_i].difference({b, a}) molecule = self.get_distance_to(origin=i, sort=True, other_atoms=other_atoms) k = 0 while not found and k < len(molecule): new_d = molecule.index[k] angle = self.get_angle_degrees([b, a, new_d])[0] if 5 < angle < 175: found = True c_table.loc[i, 'd'] = new_d k = k + 1 if not found: message = ('The atom with index {} has no possibility ' 'to get nonlinear reference atoms'.format) raise UndefinedCoordinateSystem(message(i)) return c_table
<SYSTEM_TASK:> Checks, if ``i`` uses valid absolute references. <END_TASK> <USER_TASK:> Description: def _has_valid_abs_ref(self, i, construction_table): """Checks, if ``i`` uses valid absolute references. Checks for each index from first to third row of the ``construction_table``, if the references are colinear. This case has to be specially treated, because the references are not only atoms (to fix internal degrees of freedom) but also points in cartesian space called absolute references. (to fix translational and rotational degrees of freedom) Args: i (label): The label has to be in the first three rows. construction_table (pd.DataFrame): Returns: bool: """
c_table = construction_table abs_refs = constants.absolute_refs A = np.empty((3, 3)) row = c_table.index.get_loc(i) if row > 2: message = 'The index {i} is not from the first three, rows'.format raise ValueError(message(i=i)) for k in range(3): if k < row: A[k] = self.loc[c_table.iloc[row, k], ['x', 'y', 'z']] else: A[k] = abs_refs[c_table.iloc[row, k]] v1, v2 = A[2] - A[1], A[1] - A[0] K = np.cross(v1, v2) zero = np.full(3, 0.) return not (np.allclose(K, zero) or np.allclose(v1, zero) or np.allclose(v2, zero))
<SYSTEM_TASK:> Checks first three rows of ``construction_table`` for linear references <END_TASK> <USER_TASK:> Description: def check_absolute_refs(self, construction_table): """Checks first three rows of ``construction_table`` for linear references Checks for each index from first to third row of the ``construction_table``, if the references are colinear. This case has to be specially treated, because the references are not only atoms (to fix internal degrees of freedom) but also points in cartesian space called absolute references. (to fix translational and rotational degrees of freedom) Args: construction_table (pd.DataFrame): Returns: list: A list of problematic indices. """
c_table = construction_table problem_index = [i for i in c_table.index[:3] if not self._has_valid_abs_ref(i, c_table)] return problem_index
<SYSTEM_TASK:> Reindexe construction_table if linear reference in first three rows <END_TASK> <USER_TASK:> Description: def correct_absolute_refs(self, construction_table): """Reindexe construction_table if linear reference in first three rows present. Uses :meth:`~Cartesian.check_absolute_refs` to obtain the problematic indices. Args: construction_table (pd.DataFrame): Returns: pd.DataFrame: Appropiately renamed construction table. """
c_table = construction_table.copy() abs_refs = constants.absolute_refs problem_index = self.check_absolute_refs(c_table) for i in problem_index: order_of_refs = iter(permutations(abs_refs.keys())) finished = False while not finished: if self._has_valid_abs_ref(i, c_table): finished = True else: row = c_table.index.get_loc(i) c_table.iloc[row, row:] = next(order_of_refs)[row:3] return c_table
<SYSTEM_TASK:> Create the Zmatrix from a construction table. <END_TASK> <USER_TASK:> Description: def _build_zmat(self, construction_table): """Create the Zmatrix from a construction table. Args: Construction table (pd.DataFrame): Returns: Zmat: A new instance of :class:`Zmat`. """
c_table = construction_table default_cols = ['atom', 'b', 'bond', 'a', 'angle', 'd', 'dihedral'] optional_cols = list(set(self.columns) - {'atom', 'x', 'y', 'z'}) zmat_frame = pd.DataFrame(columns=default_cols + optional_cols, dtype='float', index=c_table.index) zmat_frame.loc[:, optional_cols] = self.loc[c_table.index, optional_cols] zmat_frame.loc[:, 'atom'] = self.loc[c_table.index, 'atom'] zmat_frame.loc[:, ['b', 'a', 'd']] = c_table zmat_values = self._calculate_zmat_values(c_table) zmat_frame.loc[:, ['bond', 'angle', 'dihedral']] = zmat_values zmatrix = Zmat(zmat_frame, metadata=self.metadata, _metadata={'last_valid_cartesian': self.copy()}) return zmatrix
<SYSTEM_TASK:> Transform to internal coordinates. <END_TASK> <USER_TASK:> Description: def get_zmat(self, construction_table=None, use_lookup=None): """Transform to internal coordinates. Transforming to internal coordinates involves basically three steps: 1. Define an order of how to build and define for each atom the used reference atoms. 2. Check for problematic local linearity. In this algorithm an angle with ``170 < angle < 10`` is assumed to be linear. This is not the mathematical definition, but makes it safer against "floating point noise" 3. Calculate the bond lengths, angles and dihedrals using the references defined in step 1 and 2. In the first two steps a so called ``construction_table`` is created. This is basically a Zmatrix without the values for the bonds, angles and dihedrals hence containing only the information about the used references. ChemCoord uses a :class:`pandas.DataFrame` with the columns ``['b', 'a', 'd']``. Look into :meth:`~chemcoord.Cartesian.get_construction_table` for more information. It is important to know, that calculating the construction table is a very costly step since the algoritym tries to make some guesses based on connectivity to create a "chemical" zmatrix. If you create several zmatrices based on the same references you can obtain the construction table of a zmatrix with ``Zmat_instance.loc[:, ['b', 'a', 'd']]`` If you then pass the buildlist as argument to ``give_zmat``, the algorithm directly starts with step 3 (which is much faster). If a ``construction_table`` is passed into :meth:`~Cartesian.get_zmat` the check for pathological linearity is not performed! So if a ``construction_table`` is either manually created, or obtained from :meth:`~Cartesian.get_construction_table` under the option ``perform_checks = False``, it is recommended to use the following methods: * :meth:`~Cartesian.correct_dihedral` * :meth:`~Cartesian.correct_absolute_refs` If you want to check for problematic indices in order to solve the invalid references yourself, use the following methods: * :meth:`~Cartesian.check_dihedral` * :meth:`~Cartesian.check_absolute_refs` Args: construction_table (pandas.DataFrame): use_lookup (bool): Use a lookup variable for :meth:`~chemcoord.Cartesian.get_bonds`. The default is specified in ``settings['defaults']['use_lookup']`` Returns: Zmat: A new instance of :class:`~Zmat`. """
if use_lookup is None: use_lookup = settings['defaults']['use_lookup'] self.get_bonds(use_lookup=use_lookup) self._give_val_sorted_bond_dict(use_lookup=use_lookup) use_lookup = True # During function execution the connectivity situation won't change # So use_look=True will be used if construction_table is None: c_table = self.get_construction_table(use_lookup=use_lookup) c_table = self.correct_dihedral(c_table, use_lookup=use_lookup) c_table = self.correct_absolute_refs(c_table) else: c_table = construction_table return self._build_zmat(c_table)
<SYSTEM_TASK:> r"""Return the gradient for the transformation to a Zmatrix. <END_TASK> <USER_TASK:> Description: def get_grad_zmat(self, construction_table, as_function=True): r"""Return the gradient for the transformation to a Zmatrix. If ``as_function`` is True, a function is returned that can be directly applied onto instances of :class:`~Cartesian`, which contain the applied distortions in cartesian space. In this case the user does not have to worry about indexing and correct application of the tensor product. Basically this is the function :func:`xyz_functions.apply_grad_zmat_tensor` with partially replaced arguments. If ``as_function`` is False, a ``(3, n, n, 3)`` tensor is returned, which contains the values of the derivatives. Since a ``n * 3`` matrix is deriven after a ``n * 3`` matrix, it is important to specify the used rules for indexing the resulting tensor. The rule is very simple: The indices of the numerator are used first then the indices of the denominator get swapped and appended: .. math:: \left( \frac{\partial \mathbf{Y}}{\partial \mathbf{X}} \right)_{i, j, k, l} = \frac{\partial \mathbf{Y}_{i, j}}{\partial \mathbf{X}_{l, k}} Applying this rule to an example function: .. math:: f \colon \mathbb{R}^3 \rightarrow \mathbb{R} Gives as derivative the known row-vector gradient: .. math:: (\nabla f)_{1, i} = \frac{\partial f}{\partial x_i} \qquad i \in \{1, 2, 3\} .. note:: The row wise alignment of the XYZ files makes sense for these CSV like files. But it is mathematically advantageous and sometimes (depending on the memory layout) numerically better to use a column wise alignment of the coordinates. In this function the resulting tensor assumes a ``3 * n`` array for the coordinates. If .. math:: \mathbf{X}_{i, j} &\qquad 1 \leq i \leq 3, \quad 1 \leq j \leq n \\ \mathbf{C}_{i, j} &\qquad 1 \leq i \leq 3, \quad 1 \leq j \leq n denote the positions in cartesian and Zmatrix space, The complete tensor may be written as: .. math:: \left( \frac{\partial \mathbf{C}}{\partial \mathbf{X}} \right)_{i, j, k, l} = \frac{\partial \mathbf{C}_{i, j}}{\partial \mathbf{X}_{l, k}} Args: construction_table (pandas.DataFrame): as_function (bool): Return a tensor or :func:`xyz_functions.apply_grad_zmat_tensor` with partially replaced arguments. Returns: (func, np.array): Depending on ``as_function`` return a tensor or :func:`~chemcoord.xyz_functions.apply_grad_zmat_tensor` with partially replaced arguments. """
if (construction_table.index != self.index).any(): message = "construction_table and self must use the same index" raise ValueError(message) c_table = construction_table.loc[:, ['b', 'a', 'd']] c_table = c_table.replace(constants.int_label) c_table = c_table.replace({k: v for v, k in enumerate(c_table.index)}) c_table = c_table.values.T X = self.loc[:, ['x', 'y', 'z']].values.T if X.dtype == np.dtype('i8'): X = X.astype('f8') err, row, grad_C = transformation.get_grad_C(X, c_table) if err == ERR_CODE_InvalidReference: rename = dict(enumerate(self.index)) i = rename[row] b, a, d = construction_table.loc[i, ['b', 'a', 'd']] raise InvalidReference(i=i, b=b, a=a, d=d) if as_function: return partial(xyz_functions.apply_grad_zmat_tensor, grad_C, construction_table) else: return grad_C
<SYSTEM_TASK:> Adds a column with the requested data. <END_TASK> <USER_TASK:> Description: def add_data(self, new_cols=None): """Adds a column with the requested data. If you want to see for example the mass, the colormap used in jmol and the block of the element, just use:: ['mass', 'jmol_color', 'block'] The underlying ``pd.DataFrame`` can be accessed with ``constants.elements``. To see all available keys use ``constants.elements.info()``. The data comes from the module `mendeleev <http://mendeleev.readthedocs.org/en/latest/>`_ written by Lukasz Mentel. Please note that I added three columns to the mendeleev data:: ['atomic_radius_cc', 'atomic_radius_gv', 'gv_color', 'valency'] The ``atomic_radius_cc`` is used by default by this module for determining bond lengths. The three others are taken from the MOLCAS grid viewer written by Valera Veryazov. Args: new_cols (str): You can pass also just one value. E.g. ``'mass'`` is equivalent to ``['mass']``. If ``new_cols`` is ``None`` all available data is returned. inplace (bool): Returns: Cartesian: """
atoms = self['atom'] data = constants.elements if pd.api.types.is_list_like(new_cols): new_cols = set(new_cols) elif new_cols is None: new_cols = set(data.columns) else: new_cols = [new_cols] new_frame = data.loc[atoms, set(new_cols) - set(self.columns)] new_frame.index = self.index return self.__class__(pd.concat([self._frame, new_frame], axis=1))
<SYSTEM_TASK:> Determines if ``other`` has the same sumformula <END_TASK> <USER_TASK:> Description: def has_same_sumformula(self, other): """Determines if ``other`` has the same sumformula Args: other (molecule): Returns: bool: """
same_atoms = True for atom in set(self['atom']): own_atom_number = len(self[self['atom'] == atom]) other_atom_number = len(other[other['atom'] == atom]) same_atoms = (own_atom_number == other_atom_number) if not same_atoms: break return same_atoms
<SYSTEM_TASK:> Return the number of electrons. <END_TASK> <USER_TASK:> Description: def get_electron_number(self, charge=0): """Return the number of electrons. Args: charge (int): Charge of the molecule. Returns: int: """
atomic_number = constants.elements['atomic_number'].to_dict() return sum([atomic_number[atom] for atom in self['atom']]) - charge
<SYSTEM_TASK:> Let API instance can respond jsonp request automatically. <END_TASK> <USER_TASK:> Description: def support_jsonp(api_instance, callback_name_source='callback'): """Let API instance can respond jsonp request automatically. `callback_name_source` can be a string or a callback. If it is a string, the system will find the argument that named by this string in `query string`. If found, determine this request to be a jsonp request, and use the argument's value as the js callback name. If `callback_name_source` is a callback, this callback should return js callback name when request is a jsonp request, and return False when request is not jsonp request. And system will handle request according to its return value. default support format:url?callback=js_callback_name """
output_json = api_instance.representations['application/json'] @api_instance.representation('application/json') def handle_jsonp(data, code, headers=None): resp = output_json(data, code, headers) if code == 200: callback = request.args.get(callback_name_source, False) if not callable(callback_name_source) \ else callback_name_source() if callback: resp.set_data(str(callback) + '(' + resp.get_data().decode("utf-8") + ')') return resp
<SYSTEM_TASK:> Insert column into molecule at specified location. <END_TASK> <USER_TASK:> Description: def insert(self, loc, column, value, allow_duplicates=False, inplace=False): """Insert column into molecule at specified location. Wrapper around the :meth:`pandas.DataFrame.insert` method. """
out = self if inplace else self.copy() out._frame.insert(loc, column, value, allow_duplicates=allow_duplicates) if not inplace: return out
<SYSTEM_TASK:> Make either a p2sh-p2wpkh or p2sh-p2wsh <END_TASK> <USER_TASK:> Description: def make_multisig_segwit_info( m, pks ): """ Make either a p2sh-p2wpkh or p2sh-p2wsh redeem script and p2sh address. Return {'address': p2sh address, 'redeem_script': **the witness script**, 'private_keys': privkeys, 'segwit': True} * privkeys and redeem_script will be hex-encoded """
pubs = [] privkeys = [] for pk in pks: priv = BitcoinPrivateKey(pk, compressed=True) priv_hex = priv.to_hex() pub_hex = priv.public_key().to_hex() privkeys.append(priv_hex) pubs.append(keylib.key_formatting.compress(pub_hex)) script = None if len(pubs) == 1: if m != 1: raise ValueError("invalid m: len(pubkeys) == 1") # 1 pubkey means p2wpkh key_hash = hashing.bin_hash160(pubs[0].decode('hex')).encode('hex') script = '160014' + key_hash addr = btc_make_p2sh_address(script[2:]) else: # 2+ pubkeys means p2wsh script = make_multisig_script(pubs, m) addr = make_multisig_segwit_address_from_witness_script(script) return { 'address': addr, 'redeem_script': script, 'private_keys': privkeys, 'segwit': True, 'm': m }
<SYSTEM_TASK:> Create a bundle of information <END_TASK> <USER_TASK:> Description: def make_multisig_wallet( m, n ): """ Create a bundle of information that can be used to generate an m-of-n multisig scriptsig. """
if m <= 1 and n <= 1: raise ValueError("Invalid multisig parameters") pks = [] for i in xrange(0, n): pk = BitcoinPrivateKey(compressed=True).to_wif() pks.append(pk) return make_multisig_info( m, pks )
<SYSTEM_TASK:> Create a bundle of information <END_TASK> <USER_TASK:> Description: def make_segwit_info(privkey=None): """ Create a bundle of information that can be used to generate a p2sh-p2wpkh transaction """
if privkey is None: privkey = BitcoinPrivateKey(compressed=True).to_wif() return make_multisig_segwit_info(1, [privkey])
<SYSTEM_TASK:> Create a bundle of information <END_TASK> <USER_TASK:> Description: def make_multisig_segwit_wallet( m, n ): """ Create a bundle of information that can be used to generate an m-of-n multisig witness script. """
pks = [] for i in xrange(0, n): pk = BitcoinPrivateKey(compressed=True).to_wif() pks.append(pk) return make_multisig_segwit_info(m, pks)
<SYSTEM_TASK:> Factory which wrap all resources in settings. <END_TASK> <USER_TASK:> Description: def resources_preparing_factory(app, wrapper): """ Factory which wrap all resources in settings. """
settings = app.app.registry.settings config = settings.get(CONFIG_RESOURCES, None) if not config: return resources = [(k, [wrapper(r, GroupResource(k, v)) for r in v]) for k, v in config] settings[CONFIG_RESOURCES] = resources
<SYSTEM_TASK:> Get the tx fee per byte from the underlying blockchain <END_TASK> <USER_TASK:> Description: def get_tx_fee_per_byte(bitcoind_opts=None, config_path=None, bitcoind_client=None): """ Get the tx fee per byte from the underlying blockchain Return the fee on success Return None on error """
if bitcoind_client is None: bitcoind_client = get_bitcoind_client(bitcoind_opts=bitcoind_opts, config_path=config_path) try: # try to confirm in 2-3 blocks try: fee_info = bitcoind_client.estimatesmartfee(2) if 'errors' in fee_info and len(fee_info['errors']) > 0: fee = -1 else: fee = fee_info['feerate'] except JSONRPCException as je: fee = bitcoind_client.estimatefee(2) if fee < 0: # if we're testing, then use our own fee if os.environ.get("BLOCKSTACK_TEST") == '1' or os.environ.get("BLOCKSTACK_TESTNET", None) == "1": fee = 5500.0 / 10**8 else: log.error("Failed to estimate tx fee") return None else: log.debug("Bitcoin estimatefee(2) is {}".format(fee)) fee = float(fee) # fee is BTC/kb. Return satoshis/byte ret = int(round(fee * 10**8 / 1024.0)) log.debug("Bitcoin estimatefee(2) is {} ({} satoshi/byte)".format(fee, ret)) return ret except Exception as e: if os.environ.get("BLOCKSTACK_DEBUG") == '1': log.exception(e) log.error("Failed to estimate tx fee per byte") return None
<SYSTEM_TASK:> Get the tx fee for a tx <END_TASK> <USER_TASK:> Description: def get_tx_fee(tx_hex, config_path=None, bitcoind_opts=None, bitcoind_client=None): """ Get the tx fee for a tx Return the fee on success Return None on error """
tx_fee_per_byte = get_tx_fee_per_byte(config_path=config_path, bitcoind_opts=bitcoind_opts, bitcoind_client=bitcoind_client) if tx_fee_per_byte is None: return None return calculate_tx_fee(tx_hex, tx_fee_per_byte)
<SYSTEM_TASK:> Resolve the problem about sometimes error message specified by programmer won't output to user. <END_TASK> <USER_TASK:> Description: def handle_error(self, e): """ Resolve the problem about sometimes error message specified by programmer won't output to user. Flask-RESTFul's error handler handling format different exceptions has different behavior. If we raise an normal Exception, it will raise it again. If we report error by `restful.abort()`, likes `restful.abort(400, message="my_msg", custom_data="value")`, it will make a response like this: Status 400 Content {"message": "my_msg", "custom_data": "value"} The error message we specified was outputted. And if we raise an HTTPException, likes `from werkzeug.exceptions import BadRequest; raise BadRequest('my_msg')`, if will make a response too, but the error message specified by ourselves was lost: Status 400 Content {"status": 400, "message": "Bad Request"} The reason is, flask-restful always use the `data` attribute of HTTPException to generate response content. But, standard HTTPException object didn't has this attribute. So, we use this method to add it manually. Some reference material: Structure of exceptions raised by restful.abort(): code: status code description: predefined error message for this status code data: {     message: error message } Structure of python2's standard Exception: message: error message Exceptions in python3 didn't has hte `message` attribute, but use `str(exception)` can get it's message. Structure of standard `werkzeug.exceptions.HTTPException` (same as BadRequest): code: status code name: the name correspondence to status code description: error message """
if isinstance(e, HTTPException) and not hasattr(e, 'data'): e.data = dict(message=e.description) return super(ErrorHandledApi, self).handle_error(e)
<SYSTEM_TASK:> Download a schema from a specified URI and save it locally. <END_TASK> <USER_TASK:> Description: def download_schema(uri, path, comment=None): """Download a schema from a specified URI and save it locally. :param uri: url where the schema should be downloaded :param path: local file path where the schema should be saved :param comment: optional comment; if specified, will be added to the downloaded schema :returns: true on success, false if there was an error and the schema failed to download """
# if requests isn't available, warn and bail out if requests is None: sys.stderr.write(req_requests_msg) return # short-hand name of the schema, based on uri schema = os.path.basename(uri) try: req = requests.get(uri, stream=True) req.raise_for_status() with open(path, 'wb') as schema_download: for chunk in req.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks schema_download.write(chunk) # if a comment is specified, add it to the locally saved schema if comment is not None: tree = etree.parse(path) tree.getroot().append(etree.Comment(comment)) with open(path, 'wb') as xml_catalog: xml_catalog.write(etree.tostring(tree, pretty_print=True, xml_declaration=True, encoding="UTF-8")) logger.debug('Downloaded schema %s', schema) return True except requests.exceptions.HTTPError as err: msg = 'Failed to download schema %s' % schema msg += '(error codes %s)' % err.response.status_code logger.warn(msg) return False
<SYSTEM_TASK:> Generating an XML catalog for use in resolving schemas <END_TASK> <USER_TASK:> Description: def generate_catalog(xsd_schemas=None, xmlcatalog_dir=None, xmlcatalog_file=None): """Generating an XML catalog for use in resolving schemas Creates the XML Catalog directory if it doesn't already exist. Uses :meth:`download_schema` to save local copies of schemas, adding a comment indicating the date downloaded by eulxml. Generates a new catalog.xml file, with entries for all schemas that downloaded successfully. If no schemas downloaded, the catalog is not generated. .. Note:: Currently this method overwites any existing schema and catalog files, without checking if they are present or need to be updated. """
# if requests isn't available, warn and bail out if requests is None: sys.stderr.write(req_requests_msg) return logger.debug("Generating a new XML catalog") if xsd_schemas is None: xsd_schemas = XSD_SCHEMAS if xmlcatalog_file is None: xmlcatalog_file = XMLCATALOG_FILE if xmlcatalog_dir is None: xmlcatalog_dir = XMLCATALOG_DIR # if the catalog dir doesn't exist, create it if not os.path.isdir(xmlcatalog_dir): os.mkdir(xmlcatalog_dir) # new xml catalog to be populated with saved schemas catalog = Catalog() # comment string to be added to locally-saved schemas comment = 'Downloaded by eulxml %s on %s' % \ (__version__, date.today().isoformat()) for schema_uri in xsd_schemas: filename = os.path.basename(schema_uri) schema_path = os.path.join(xmlcatalog_dir, filename) saved = download_schema(schema_uri, schema_path, comment) if saved: # if download succeeded, add to our catalog. # - name is the schema identifier (uri) # - uri is the local path to load # NOTE: using path relative to catalog file catalog.uri_list.append(Uri(name=schema_uri, uri=filename)) # if we have any uris in our catalog, write it out if catalog.uri_list: with open(xmlcatalog_file, 'wb') as xml_catalog: catalog.serializeDocument(xml_catalog, pretty=True) return catalog
<SYSTEM_TASK:> Back up a sqlite3 database, while ensuring <END_TASK> <USER_TASK:> Description: def sqlite3_backup(src_path, dest_path): """ Back up a sqlite3 database, while ensuring that no ongoing queries are being executed. Return True on success Return False on error. """
# find sqlite3 sqlite3_path = sqlite3_find_tool() if sqlite3_path is None: log.error("Failed to find sqlite3 tool") return False sqlite3_cmd = [sqlite3_path, '{}'.format(src_path), '.backup "{}"'.format(dest_path)] rc = None backoff = 1.0 out = None err = None try: while True: log.debug("{}".format(" ".join(sqlite3_cmd))) p = subprocess.Popen(sqlite3_cmd, shell=False, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() rc = p.wait() if rc != 0: if "database is locked" in out.lower() or "database is locked" in err.lower(): # try again log.error("Database {} is locked; trying again in {} seconds".format(src_path, backoff)) time.sleep(backoff) backoff += 2 * backoff + random.random() * random.randint(0, int(backoff)) continue elif 'is not a database' in out.lower() or 'is not a database' in err.lower(): # not a valid sqlite3 file log.error("File {} is not a SQLite database".format(src_path)) return False else: # some other failure. Try again log.error('Failed to back up with "{}". Error log follows.\n{}'.format(" ".join(sqlite3_cmd), err)) continue else: break except Exception, e: log.exception(e) return False if not os.WIFEXITED(rc): # bad exit # failed for some other reason log.error("Backup failed: out='{}', err='{}', rc={}".format(out, err, rc)) return False if os.WEXITSTATUS(rc) != 0: # bad exit log.error("Backup failed: out='{}', err='{}', exit={}".format(out, err, os.WEXITSTATUS(rc))) return False return True
<SYSTEM_TASK:> Extract the existing chain state transactions from the existing state engine at a particular block height, <END_TASK> <USER_TASK:> Description: def state_engine_replay_block(existing_state_engine, new_state_engine, block_height, expected_snapshots={}): """ Extract the existing chain state transactions from the existing state engine at a particular block height, parse them using the new state engine, and process them using the new state engine. Returns the consensus hash of the block on success. """
assert new_state_engine.lastblock + 1 == block_height, 'Block height mismatch: {} + 1 != {}'.format(new_state_engine.lastblock, block_height) db_con = StateEngine.db_open(existing_state_engine.impl, existing_state_engine.working_dir) chainstate_block = existing_state_engine.db_chainstate_get_block(db_con, block_height) db_con.close() log.debug("{} transactions accepted at block {} in chainstate {}; replaying in {}".format(len(chainstate_block), block_height, existing_state_engine.working_dir, new_state_engine.working_dir)) parsed_txs = dict([(txdata['txid'], transactions.tx_parse(txdata['tx_hex'], blockchain=existing_state_engine.impl.get_blockchain())) for txdata in chainstate_block]) txs = [ { 'txid': txdata['txid'], 'txindex': txdata['txindex'], 'nulldata': '{}{}{}'.format(existing_state_engine.impl.get_magic_bytes().encode('hex'), txdata['opcode'].encode('hex'), txdata['data_hex']), 'ins': parsed_txs[txdata['txid']]['ins'], 'outs': parsed_txs[txdata['txid']]['outs'], 'senders': txdata['senders'], 'fee': txdata['fee'], 'hex': txdata['tx_hex'], 'tx_merkle_path': txdata['tx_merkle_path'], } for txdata in chainstate_block] new_state_engine.db_set_indexing(True, new_state_engine.impl, new_state_engine.working_dir) ops = new_state_engine.parse_block(block_height, txs) consensus_hash = new_state_engine.process_block(block_height, ops, expected_snapshots=expected_snapshots) new_state_engine.db_set_indexing(False, new_state_engine.impl, new_state_engine.working_dir) return consensus_hash
<SYSTEM_TASK:> Verify that a database is consistent with a <END_TASK> <USER_TASK:> Description: def state_engine_verify(trusted_consensus_hash, consensus_block_height, consensus_impl, untrusted_working_dir, new_state_engine, start_block=None, expected_snapshots={}): """ Verify that a database is consistent with a known-good consensus hash. This algorithm works by creating a new database, parsing the untrusted database, and feeding the untrusted operations into the new database block-by-block. If we derive the same consensus hash, then we can trust the database. Return True if consistent with the given consensus hash at the given consensus block height Return False if not """
assert hasattr(consensus_impl, 'get_initial_snapshots') final_consensus_hash = state_engine_replay(consensus_impl, untrusted_working_dir, new_state_engine, consensus_block_height, \ start_block=start_block, initial_snapshots=consensus_impl.get_initial_snapshots(), expected_snapshots=expected_snapshots) # did we reach the consensus hash we expected? if final_consensus_hash is not None and final_consensus_hash == trusted_consensus_hash: return True else: log.error("Unverifiable database state stored in '{}': {} != {}".format(untrusted_working_dir, final_consensus_hash, trusted_consensus_hash)) return False
<SYSTEM_TASK:> Restore the database and clear the indexing lockfile. <END_TASK> <USER_TASK:> Description: def db_restore(self, block_number=None): """ Restore the database and clear the indexing lockfile. Restore to a given block if given; otherwise use the most recent valid backup. Return True on success Return False if there is no state to restore Raise exception on error """
restored = False if block_number is not None: # restore a specific backup try: self.backup_restore(block_number, self.impl, self.working_dir) restored = True except AssertionError: log.error("Failed to restore state from {}".format(block_number)) return False else: # find the latest block backup_blocks = self.get_backup_blocks(self.impl, self.working_dir) for block_number in reversed(sorted(backup_blocks)): try: self.backup_restore(block_number, self.impl, self.working_dir) restored = True log.debug("Restored state from {}".format(block_number)) break except AssertionError: log.debug("Failed to restore state from {}".format(block_number)) continue if not restored: # failed to restore log.error("Failed to restore state from {}".format(','.join(backup_blocks))) return False # woo! self.db_set_indexing(False, self.impl, self.working_dir) return self.db_setup()
<SYSTEM_TASK:> Does the chainstate db exist? <END_TASK> <USER_TASK:> Description: def db_exists(cls, impl, working_dir): """ Does the chainstate db exist? """
path = config.get_snapshots_filename(impl, working_dir) return os.path.exists(path)
<SYSTEM_TASK:> Create a sqlite3 db at the given path. <END_TASK> <USER_TASK:> Description: def db_create(cls, impl, working_dir): """ Create a sqlite3 db at the given path. Create all the tables and indexes we need. Returns a db connection on success Raises an exception on error """
global VIRTUALCHAIN_DB_SCRIPT log.debug("Setup chain state in {}".format(working_dir)) path = config.get_snapshots_filename(impl, working_dir) if os.path.exists( path ): raise Exception("Database {} already exists") lines = [l + ";" for l in VIRTUALCHAIN_DB_SCRIPT.split(";")] con = sqlite3.connect(path, isolation_level=None, timeout=2**30) for line in lines: con.execute(line) con.row_factory = StateEngine.db_row_factory return con
<SYSTEM_TASK:> connect to our chainstate db <END_TASK> <USER_TASK:> Description: def db_connect(cls, path): """ connect to our chainstate db """
con = sqlite3.connect(path, isolation_level=None, timeout=2**30) con.row_factory = StateEngine.db_row_factory return con
<SYSTEM_TASK:> Open a connection to our chainstate db <END_TASK> <USER_TASK:> Description: def db_open(cls, impl, working_dir): """ Open a connection to our chainstate db """
path = config.get_snapshots_filename(impl, working_dir) return cls.db_connect(path)
<SYSTEM_TASK:> Insert a row into the chain state. <END_TASK> <USER_TASK:> Description: def db_chainstate_append(cls, cur, **fields): """ Insert a row into the chain state. Meant to be executed as part of a transaction. Return True on success Raise an exception if the fields are invalid Abort on db error. """
missing = [] extra = [] for reqfield in CHAINSTATE_FIELDS: if reqfield not in fields: missing.append(reqfield) for fieldname in fields: if fieldname not in CHAINSTATE_FIELDS: extra.append(fieldname) if len(missing) > 0 or len(extra) > 0: raise ValueError("Invalid fields: missing: {}, extra: {}".format(','.join(missing), ','.join(extra))) query = 'INSERT INTO chainstate ({}) VALUES ({});'.format( ','.join( CHAINSTATE_FIELDS ), ','.join( ['?'] * len(CHAINSTATE_FIELDS))) args = tuple([fields[fieldname] for fieldname in CHAINSTATE_FIELDS]) cls.db_query_execute(cur, query, args) return True
<SYSTEM_TASK:> Append hash info for the last block processed, and the time at which it was done. <END_TASK> <USER_TASK:> Description: def db_snapshot_append(cls, cur, block_id, consensus_hash, ops_hash, timestamp): """ Append hash info for the last block processed, and the time at which it was done. Meant to be executed as part of a transaction. Return True on success Raise an exception on invalid block number Abort on db error """
query = 'INSERT INTO snapshots (block_id,consensus_hash,ops_hash,timestamp) VALUES (?,?,?,?);' args = (block_id,consensus_hash,ops_hash,timestamp) cls.db_query_execute(cur, query, args) return True
<SYSTEM_TASK:> Get the list of virtualchain transactions accepted at a given block. <END_TASK> <USER_TASK:> Description: def db_chainstate_get_block(cls, cur, block_height): """ Get the list of virtualchain transactions accepted at a given block. Returns the list of rows, where each row is a dict. """
query = 'SELECT * FROM chainstate WHERE block_id = ? ORDER BY vtxindex;' args = (block_height,) rows = cls.db_query_execute(cur, query, args, verbose=False) ret = [] for r in rows: rowdata = { 'txid': str(r['txid']), 'block_id': r['block_id'], 'txindex': r['txindex'], 'vtxindex': r['vtxindex'], 'opcode': str(r['opcode']), 'data_hex': str(r['data_hex']), 'senders': simplejson.loads(r['senders']), 'tx_hex': str(r['tx_hex']), 'tx_merkle_path': str(r['tx_merkle_path']), 'fee': r['fee'] } ret.append(rowdata) return ret
<SYSTEM_TASK:> Set lockfile path as to whether or not the system is indexing. <END_TASK> <USER_TASK:> Description: def db_set_indexing(cls, is_indexing, impl, working_dir): """ Set lockfile path as to whether or not the system is indexing. NOT THREAD SAFE, USE ONLY FOR CRASH DETECTION. """
indexing_lockfile_path = config.get_lockfile_filename(impl, working_dir) if is_indexing: # make sure this exists with open(indexing_lockfile_path, 'w') as f: pass else: # make sure it does not exist try: os.unlink(indexing_lockfile_path) except: pass
<SYSTEM_TASK:> Is the system indexing? <END_TASK> <USER_TASK:> Description: def db_is_indexing(cls, impl, working_dir): """ Is the system indexing? Return True if so, False if not. """
indexing_lockfile_path = config.get_lockfile_filename(impl, working_dir) return os.path.exists(indexing_lockfile_path)
<SYSTEM_TASK:> What was the last block processed? <END_TASK> <USER_TASK:> Description: def get_lastblock(cls, impl, working_dir): """ What was the last block processed? Return the number on success Return None on failure to read """
if not cls.db_exists(impl, working_dir): return None con = cls.db_open(impl, working_dir) query = 'SELECT MAX(block_id) FROM snapshots;' rows = cls.db_query_execute(con, query, (), verbose=False) ret = None for r in rows: ret = r['MAX(block_id)'] con.close() return ret
<SYSTEM_TASK:> Get the set of state paths that point to the current chain and state info. <END_TASK> <USER_TASK:> Description: def get_state_paths(cls, impl, working_dir): """ Get the set of state paths that point to the current chain and state info. Returns a list of paths. """
return [config.get_db_filename(impl, working_dir), config.get_snapshots_filename(impl, working_dir)]
<SYSTEM_TASK:> Get the set of block IDs that were backed up <END_TASK> <USER_TASK:> Description: def get_backup_blocks(cls, impl, working_dir): """ Get the set of block IDs that were backed up """
ret = [] backup_dir = config.get_backups_directory(impl, working_dir) if not os.path.exists(backup_dir): return [] for name in os.listdir( backup_dir ): if ".bak." not in name: continue suffix = name.split(".bak.")[-1] try: block_id = int(suffix) except: continue # must exist... backup_paths = cls.get_backup_paths(block_id, impl, working_dir) for p in backup_paths: if not os.path.exists(p): # doesn't exist block_id = None continue if block_id is not None: # have backup at this block ret.append(block_id) return ret
<SYSTEM_TASK:> Get the set of backup paths, given the virtualchain implementation module and block number <END_TASK> <USER_TASK:> Description: def get_backup_paths(cls, block_id, impl, working_dir): """ Get the set of backup paths, given the virtualchain implementation module and block number """
backup_dir = config.get_backups_directory(impl, working_dir) backup_paths = [] for p in cls.get_state_paths(impl, working_dir): pbase = os.path.basename(p) backup_path = os.path.join( backup_dir, pbase + (".bak.%s" % block_id)) backup_paths.append( backup_path ) return backup_paths
<SYSTEM_TASK:> Restore from a backup, given the virutalchain implementation module and block number. <END_TASK> <USER_TASK:> Description: def backup_restore(cls, block_id, impl, working_dir): """ Restore from a backup, given the virutalchain implementation module and block number. NOT THREAD SAFE. DO NOT CALL WHILE INDEXING. Return True on success Raise exception on error, i.e. if a backup file is missing """
backup_dir = config.get_backups_directory(impl, working_dir) backup_paths = cls.get_backup_paths(block_id, impl, working_dir) for p in backup_paths: assert os.path.exists(p), "No such backup file: {}".format(p) for p in cls.get_state_paths(impl, working_dir): pbase = os.path.basename(p) backup_path = os.path.join(backup_dir, pbase + (".bak.{}".format(block_id))) log.debug("Restoring '{}' to '{}'".format(backup_path, p)) shutil.copy(backup_path, p) return True
<SYSTEM_TASK:> If we're doing backups on a regular basis, then <END_TASK> <USER_TASK:> Description: def make_backups(self, block_id): """ If we're doing backups on a regular basis, then carry them out here if it is time to do so. This method does nothing otherwise. Return None on success Abort on failure """
assert self.setup, "Not set up yet. Call .db_setup() first!" # make a backup? if self.backup_frequency is not None: if (block_id % self.backup_frequency) == 0: backup_dir = config.get_backups_directory(self.impl, self.working_dir) if not os.path.exists(backup_dir): try: os.makedirs(backup_dir) except Exception, e: log.exception(e) log.error("FATAL: failed to make backup directory '%s'" % backup_dir) traceback.print_stack() os.abort() for p in self.get_state_paths(self.impl, self.working_dir): if os.path.exists(p): try: pbase = os.path.basename(p) backup_path = os.path.join(backup_dir, pbase + (".bak.{}".format(block_id - 1))) if not os.path.exists(backup_path): rc = sqlite3_backup(p, backup_path) if not rc: log.warning("Failed to back up as an SQLite db. Falling back to /bin/cp") shutil.copy(p, backup_path) else: log.error("Will not overwrite '%s'" % backup_path) except Exception, e: log.exception(e) log.error("FATAL: failed to back up '%s'" % p) traceback.print_stack() os.abort() return
<SYSTEM_TASK:> Write out all state to the working directory. <END_TASK> <USER_TASK:> Description: def save(self, block_id, consensus_hash, ops_hash, accepted_ops, virtualchain_ops_hints, backup=False): """ Write out all state to the working directory. Calls the implementation's 'db_save' method to store any state for this block. Calls the implementation's 'db_continue' method at the very end, to signal to the implementation that all virtualchain state has been saved. This method can return False, in which case, indexing stops Return True on success Return False if the implementation wants to exit. Aborts on fatal error """
assert self.setup, "Not set up yet. Call .db_setup() first!" assert len(accepted_ops) == len(virtualchain_ops_hints) if self.read_only: log.error("FATAL: StateEngine is read only") traceback.print_stack() os.abort() if block_id < self.lastblock: log.error("FATAL: Already processed up to block {} (got {})".format(self.lastblock, block_id)) traceback.print_stack() os.abort() # ask the implementation to save if hasattr(self.impl, 'db_save'): rc = False try: rc = self.impl.db_save(block_id, consensus_hash, ops_hash, accepted_ops, virtualchain_ops_hints, db_state=self.state) except Exception as e: log.exception(e) rc = False if not rc: log.error("FATAL: Implementation failed to save state at block {}".format(block_id)) traceback.print_stack() os.abort() # save new chainstate self.lastblock = block_id # start a transaction to store the new data db_con = self.db_open(self.impl, self.working_dir) cur = db_con.cursor() self.db_query_execute(cur, "BEGIN", (), verbose=False) # add chainstate for i, (accepted_op, virtualchain_op_hints) in enumerate(zip(accepted_ops, virtualchain_ops_hints)): # unpack virtualchain hints senders = virtualchain_op_hints['virtualchain_senders'] data_hex = virtualchain_op_hints['virtualchain_data_hex'] tx_hex = virtualchain_op_hints['virtualchain_txhex'] txid = virtualchain_op_hints['virtualchain_txid'] fee = virtualchain_op_hints['virtualchain_fee'] opcode = virtualchain_op_hints['virtualchain_opcode'] txindex = virtualchain_op_hints['virtualchain_txindex'] vtxindex = i merkle_path = virtualchain_op_hints['virtualchain_tx_merkle_path'] vtx_data = { 'txid': txid, 'senders': simplejson.dumps(senders), 'data_hex': data_hex, 'tx_hex': tx_hex, 'tx_merkle_path': merkle_path, 'fee': fee, 'opcode': opcode, 'txindex': txindex, 'vtxindex': vtxindex, 'block_id': block_id } self.db_chainstate_append(cur, **vtx_data) # update snapshot info self.db_snapshot_append(cur, block_id, consensus_hash, ops_hash, int(time.time())) self.db_query_execute(cur, "END", (), verbose=False) db_con.close() # make new backups and clear old ones self.make_backups(block_id) self.clear_old_backups(block_id) # ask the implementation if we should continue continue_indexing = True if hasattr(self.impl, "db_continue"): try: continue_indexing = self.impl.db_continue( block_id, consensus_hash ) except Exception, e: log.exception(e) traceback.print_stack() log.error("FATAL: implementation failed db_continue") os.abort() return continue_indexing
<SYSTEM_TASK:> Generate the consensus hash from the hash over the current ops, and <END_TASK> <USER_TASK:> Description: def make_snapshot_from_ops_hash( cls, record_root_hash, prev_consensus_hashes ): """ Generate the consensus hash from the hash over the current ops, and all previous required consensus hashes. """
# mix into previous consensus hashes... all_hashes = prev_consensus_hashes[:] + [record_root_hash] all_hashes.sort() all_hashes_merkle_tree = MerkleTree( all_hashes ) root_hash = all_hashes_merkle_tree.root() consensus_hash = StateEngine.calculate_consensus_hash( root_hash ) return consensus_hash
<SYSTEM_TASK:> Set a virtualchain field value. <END_TASK> <USER_TASK:> Description: def set_virtualchain_field(cls, opdata, virtualchain_field, value): """ Set a virtualchain field value. Used by implementations that generate extra consensus data at the end of a block """
assert virtualchain_field in RESERVED_KEYS, 'Invalid field name {} (choose from {})'.format(virtualchain_field, ','.join(RESERVED_KEYS)) opdata[virtualchain_field] = value
<SYSTEM_TASK:> Given a block ID and an data-bearing transaction, <END_TASK> <USER_TASK:> Description: def parse_transaction(self, block_id, tx): """ Given a block ID and an data-bearing transaction, try to parse it into a virtual chain operation. Use the implementation's 'db_parse' method to do so. Data transactions that do not have the magic bytes or a valid opcode will be skipped automatically. The db_parse method does not need to know how to handle them. @tx is a dict with `txid`: the transaction ID `txindex`: the offset in the block where this tx occurs `nulldata`: the hex-encoded scratch data from the transaction `ins`: the list of transaction inputs `outs`: the list of transaction outputs `senders`: the list of transaction senders `fee`: the transaction fee `txhex`: the hex-encoded raw transaction Return a dict representing the data on success. Return None on error """
data_hex = tx['nulldata'] inputs = tx['ins'] outputs = tx['outs'] senders = tx['senders'] fee = tx['fee'] txhex = tx['hex'] merkle_path = tx['tx_merkle_path'] if not is_hex(data_hex): # should always work; the tx downloader converts the binary string to hex # not a valid hex string raise ValueError("Invalid nulldata: not hex-encoded") if len(data_hex) % 2 != 0: # should always work; the tx downloader converts the binary string to hex # not valid hex string raise ValueError("Invalid nulldata: not hex-encoded") data_bin = None try: # should always work; the tx downloader converts the binary string to hex data_bin = data_hex.decode('hex') except Exception, e: log.error("Failed to parse transaction: %s (data_hex = %s)" % (tx, data_hex)) raise ValueError("Invalid nulldata: not hex-encoded") if not data_bin.startswith(self.magic_bytes): # not for us return None if len(data_bin) < len(self.magic_bytes) + 1: # invalid operation--no opcode return None # 3rd byte is always the operation code op_code = data_bin[len(self.magic_bytes)] if op_code not in self.opcodes: return None # looks like an op. Try to parse it. op_payload = data_bin[len(self.magic_bytes)+1:] op = self.impl.db_parse(block_id, tx['txid'], tx['txindex'], op_code, op_payload, senders, inputs, outputs, fee, db_state=self.state, raw_tx=txhex) if op is None: # not valid return None # store it op['virtualchain_opcode'] = op_code op['virtualchain_txid'] = tx['txid'] op['virtualchain_txindex'] = tx['txindex'] op['virtualchain_txhex'] = txhex op['virtualchain_tx_merkle_path'] = merkle_path op['virtualchain_senders'] = senders op['virtualchain_fee'] = fee op['virtualchain_data_hex'] = op_payload.encode('hex') return op
<SYSTEM_TASK:> Given the sequence of transactions in a block, turn them into a <END_TASK> <USER_TASK:> Description: def parse_block(self, block_id, txs): """ Given the sequence of transactions in a block, turn them into a sequence of virtual chain operations. Return the list of successfully-parsed virtualchain transactions """
ops = [] for i in range(0,len(txs)): tx = txs[i] op = self.parse_transaction(block_id, tx) if op is not None: ops.append( op ) return ops
<SYSTEM_TASK:> Remove reserved keywords from an op dict, <END_TASK> <USER_TASK:> Description: def remove_reserved_keys(self, op): """ Remove reserved keywords from an op dict, which can then safely be passed into the db. Returns a new op dict, and the reserved fields """
sanitized = {} reserved = {} for k in op.keys(): if str(k) not in RESERVED_KEYS: sanitized[str(k)] = copy.deepcopy(op[k]) else: reserved[str(k)] = copy.deepcopy(op[k]) return sanitized, reserved
<SYSTEM_TASK:> Log an accepted operation <END_TASK> <USER_TASK:> Description: def log_accept(self, block_id, vtxindex, opcode, op_data): """ Log an accepted operation """
log.debug("ACCEPT op {} at ({}, {}) ({})".format(opcode, block_id, vtxindex, json.dumps(op_data, sort_keys=True)))
<SYSTEM_TASK:> Given a transaction-ordered sequence of parsed operations, <END_TASK> <USER_TASK:> Description: def process_ops(self, block_id, ops): """ Given a transaction-ordered sequence of parsed operations, check their validity and give them to the state engine to affect state changes. It calls 'db_check' to validate each operation, and 'db_commit' to add it to the state engine. Gets back a list of state transitions (ops) to snapshot. Returns a defaultdict with the following fields: 'virtualchain_ordered': the list of operations committed by the implementation (where each operation is a dict of fields) 'virtualchain_all_ops': this is ops, plus a list containing the "final" operations returned by the implementation in response to the 'virtualchain_final' hint 'virtualchain_final': this is the list of final operations returned by the implementation in response to the 'virtualchain_final' hint. Aborts on error """
new_ops = defaultdict(list) for op in self.opcodes: new_ops[op] = [] # transaction-ordered listing of accepted operations new_ops['virtualchain_ordered'] = [] new_ops['virtualchain_all_ops'] = ops to_commit_sanitized = [] to_commit_reserved = [] # let the implementation do an initial scan over the blocks # NOTE: these will be different objects in memory from the objects passed into db_check initial_scan = [] for i in xrange(0, len(ops)): op_data = ops[i] op_sanitized, _ = self.remove_reserved_keys( op_data ) initial_scan.append( copy.deepcopy( op_sanitized ) ) # allow the implementation to do a pre-scan of the set of ops # (e.g. in Blockstack, this gets used to find name registration collisions) if hasattr(self.impl, "db_scan_block"): self.impl.db_scan_block( block_id, initial_scan, db_state=self.state ) else: log.debug("Compat: no db_scan_block") # check each operation for i in range(0, len(ops)): op_data = ops[i] op_sanitized, reserved = self.remove_reserved_keys( op_data ) opcode = reserved['virtualchain_opcode'] # check this op rc = self.impl.db_check(block_id, new_ops, opcode, op_sanitized, reserved['virtualchain_txid'], reserved['virtualchain_txindex'], to_commit_sanitized, db_state=self.state) if rc: # commit this op new_op_list = self.impl.db_commit(block_id, opcode, op_sanitized, reserved['virtualchain_txid'], reserved['virtualchain_txindex'], db_state=self.state) if type(new_op_list) != list: new_op_list = [new_op_list] for new_op in new_op_list: if new_op is not None: if type(new_op) == dict: # externally-visible state transition to_commit_sanitized_op = copy.deepcopy( new_op ) to_commit_sanitized.append( to_commit_sanitized_op ) new_op.update( reserved ) new_ops[opcode].append( new_op ) new_ops['virtualchain_ordered'].append( new_op ) else: # internal state transition continue else: self.log_reject( block_id, reserved['virtualchain_txindex'], opcode, copy.deepcopy(op_sanitized)) # final commit hint. # the implementation has a chance here to feed any extra data into the consensus hash with this call # (e.g. to affect internal state transitions that occur as seconary, holistic consequences to the sequence # of prior operations for this block). final_ops = self.impl.db_commit( block_id, 'virtualchain_final', {'virtualchain_ordered': new_ops['virtualchain_ordered']}, None, None, db_state=self.state ) if final_ops is not None: # make sure each one has all the virtualchain reserved fields for i in range(0, len(final_ops)): for fieldname in RESERVED_FIELDS: assert fieldname in final_ops[i], 'Extra consensus operation at offset {} is missing {}'.format(i, fieldname) new_ops['virtualchain_final'] = final_ops new_ops['virtualchain_ordered'] += final_ops new_ops['virtualchain_all_ops'] += final_ops return new_ops
<SYSTEM_TASK:> Get the consensus hash at a given block. <END_TASK> <USER_TASK:> Description: def get_consensus_at(self, block_id): """ Get the consensus hash at a given block. Return the consensus hash if we have one for this block. Return None if we don't """
query = 'SELECT consensus_hash FROM snapshots WHERE block_id = ?;' args = (block_id,) con = self.db_open(self.impl, self.working_dir) rows = self.db_query_execute(con, query, args, verbose=False) res = None for r in rows: res = r['consensus_hash'] con.close() return res
<SYSTEM_TASK:> Get the block number with the given consensus hash. <END_TASK> <USER_TASK:> Description: def get_block_from_consensus( self, consensus_hash ): """ Get the block number with the given consensus hash. Return None if there is no such block. """
query = 'SELECT block_id FROM snapshots WHERE consensus_hash = ?;' args = (consensus_hash,) con = self.db_open(self.impl, self.working_dir) rows = self.db_query_execute(con, query, args, verbose=False) res = None for r in rows: res = r['block_id'] con.close() return res
<SYSTEM_TASK:> Get the list of valid consensus hashes for a given block. <END_TASK> <USER_TASK:> Description: def get_valid_consensus_hashes( self, block_id ): """ Get the list of valid consensus hashes for a given block. """
first_block_to_check = block_id - self.impl.get_valid_transaction_window() query = 'SELECT consensus_hash FROM snapshots WHERE block_id >= ? AND block_id <= ?;' args = (first_block_to_check,block_id) valid_consensus_hashes = [] con = self.db_open(self.impl, self.working_dir) rows = self.db_query_execute(con, query, args, verbose=False) for r in rows: assert r['consensus_hash'] is not None assert isinstance(r['consensus_hash'], (str,unicode)) valid_consensus_hashes.append(str(r['consensus_hash'])) con.close() return valid_consensus_hashes
<SYSTEM_TASK:> Downloads the specified version of the Bayestar dust map. <END_TASK> <USER_TASK:> Description: def fetch(version='bayestar2017'): """ Downloads the specified version of the Bayestar dust map. Args: version (Optional[:obj:`str`]): The map version to download. Valid versions are :obj:`'bayestar2017'` (Green, Schlafly, Finkbeiner et al. 2018) and :obj:`'bayestar2015'` (Green, Schlafly, Finkbeiner et al. 2015). Defaults to :obj:`'bayestar2017'`. Raises: :obj:`ValueError`: The requested version of the map does not exist. :obj:`DownloadError`: Either no matching file was found under the given DOI, or the MD5 sum of the file was not as expected. :obj:`requests.exceptions.HTTPError`: The given DOI does not exist, or there was a problem connecting to the Dataverse. """
doi = { 'bayestar2015': '10.7910/DVN/40C44C', 'bayestar2017': '10.7910/DVN/LCYHJG' } # Raise an error if the specified version of the map does not exist try: doi = doi[version] except KeyError as err: raise ValueError('Version "{}" does not exist. Valid versions are: {}'.format( version, ', '.join(['"{}"'.format(k) for k in doi.keys()]) )) requirements = { 'bayestar2015': {'contentType': 'application/x-hdf'}, 'bayestar2017': {'filename': 'bayestar2017.h5'} }[version] local_fname = os.path.join(data_dir(), 'bayestar', '{}.h5'.format(version)) # Download the data fetch_utils.dataverse_download_doi( doi, local_fname, file_requirements=requirements)
<SYSTEM_TASK:> if variable is None print '' instead of 'None' <END_TASK> <USER_TASK:> Description: def set_jinja2_silent_none(config): # pragma: no cover """ if variable is None print '' instead of 'None' """
config.commit() jinja2_env = config.get_jinja2_environment() jinja2_env.finalize = _silent_none
<SYSTEM_TASK:> Quickly setup attributes validation by one-time, based on `sqlalchemy.orm.validates`. <END_TASK> <USER_TASK:> Description: def complex_validates(validate_rule): """Quickly setup attributes validation by one-time, based on `sqlalchemy.orm.validates`. Don't like `sqlalchemy.orm.validates`, you don't need create many model method, as long as pass formatted validate rule. (Cause of SQLAlchemy's validate mechanism, you need assignment this funciton's return value to a model property.) For simplicity, complex_validates don't support `include_removes` and `include_backrefs` parameters that in `sqlalchemy.orm.validates`. And we don't recommend you use this function multiple times in one model. Because this will bring many problems, like: 1. Multiple complex_validates's execute order was decide by it's model property name, and by reversed order. eg. predicates in `validator1 = complex_validates(...)` will be executed **AFTER** predicates in `validator2 = complex_validates(...)` 2. If you try to validate the same attribute in two (or more) complex_validates, only one of complex_validates will be execute. (May be this is a bug of SQLAlchemy?) `complex_validates` was currently based on `sqlalchemy.orm.validates`, so it is difficult to solve these problems. May be we can try to use `AttributeEvents` directly in further, to provide more reliable function. Rule Format ----------- { column_name: predicate # basic format (column_name2, column_name3): predicate # you can specify multiple column_names to given predicates column_name4: (predicate, predicate2) # you can specify multiple predicates to given column_names column_name5: [(predicate, arg1, ... argN)] # and you can specify what arguments should pass to predicate # when it doing validate (column_name6, column_name7): [(predicate, arg1, ... argN), predicate2] # another example } Notice: If you want pass arguments to predicate, you must wrap whole command by another list or tuple. Otherwise, we will determine the argument as another predicate. So, this is wrong: { column_name: (predicate, arg) } this is right: { column_name: [(predicate, arg)] } Predicate --------- There's some `predefined_predicates`, you can just reference its name in validate rule. {column_name: ['trans_upper']} Or you can pass your own predicate function to the rule, like this: def custom_predicate(value): return value_is_legal # return True or False for valid or invalid value {column_name: [custom_predicate]} If you want change the value when doing validate, return an `dict(value=new_value)` instead of boolean {column_name: lambda value: dict(value = value * 2)} # And you see, we can use lambda as a predicate. And the predicate can receive extra arguments, that passes in rule: def multiple(value, target_multiple): return dict(value= value * target_multiple) {column_name: (multiple, 10)} Complete Example ---------------- class People(db.Model): name = Column(String(100)) age = Column(Integer) IQ = Column(Integer) has_lover = Column(Boolean) validator = complex_validates({ 'name': [('min_length', 1), ('max_length', 100)], ('age', 'IQ'): [('min', 0)], 'has_lover': lambda value: return !value # hate you! })"""
ref_dict = { # column_name: ( # (predicate, arg1, ... argN), # ... # ) } for column_names, predicate_refs in validate_rule.items(): for column_name in _to_tuple(column_names): ref_dict[column_name] = \ ref_dict.get(column_name, tuple()) + _normalize_predicate_refs(predicate_refs) return validates(*ref_dict.keys())( lambda self, name, value: _validate_handler(name, value, ref_dict[name]))
<SYSTEM_TASK:> handle predicate's return value <END_TASK> <USER_TASK:> Description: def _validate_handler(column_name, value, predicate_refs): """handle predicate's return value"""
# only does validate when attribute value is not None # else, just return it, let sqlalchemy decide if the value was legal according to `nullable` argument's value if value is not None: for predicate_ref in predicate_refs: predicate, predicate_name, predicate_args = _decode_predicate_ref(predicate_ref) validate_result = predicate(value, *predicate_args) if isinstance(validate_result, dict) and 'value' in validate_result: value = validate_result['value'] elif type(validate_result) != bool: raise Exception( 'predicate (name={}) can only return bool or dict(value=new_value) value'.format(predicate_name)) elif not validate_result: raise ModelInvalid(u'db model validate failed: column={}, value={}, predicate={}, arguments={}'.format( column_name, value, predicate_name, ','.join(map(str, predicate_args)) )) return value
<SYSTEM_TASK:> Returns the simple predictive probability, averaged over each sample. <END_TASK> <USER_TASK:> Description: def simple_predictive_probability_multistate(M_c, X_L_list, X_D_list, Y, Q): """Returns the simple predictive probability, averaged over each sample."""
logprobs = [float(simple_predictive_probability(M_c, X_L, X_D, Y, Q)) for X_L, X_D in zip(X_L_list, X_D_list)] return logmeanexp(logprobs)
<SYSTEM_TASK:> Returns the predictive probability, averaged over each sample. <END_TASK> <USER_TASK:> Description: def predictive_probability_multistate(M_c, X_L_list, X_D_list, Y, Q): """ Returns the predictive probability, averaged over each sample. """
logprobs = [float(predictive_probability(M_c, X_L, X_D, Y, Q)) for X_L, X_D in zip(X_L_list, X_D_list)] return logmeanexp(logprobs)
<SYSTEM_TASK:> Returns the similarity of the given row to the target row, averaged over <END_TASK> <USER_TASK:> Description: def similarity( M_c, X_L_list, X_D_list, given_row_id, target_row_id, target_column=None): """Returns the similarity of the given row to the target row, averaged over all the column indexes given by col_idxs. Similarity is defined as the proportion of times that two cells are in the same view and category. """
score = 0.0 # Set col_idxs: defaults to all columns. if target_column: if type(target_column) == str: col_idxs = [M_c['name_to_idx'][target_column]] elif type(target_column) == list: col_idxs = target_column else: col_idxs = [target_column] else: col_idxs = M_c['idx_to_name'].keys() col_idxs = [int(col_idx) for col_idx in col_idxs] ## Iterate over all latent states. for X_L, X_D in zip(X_L_list, X_D_list): for col_idx in col_idxs: view_idx = X_L['column_partition']['assignments'][col_idx] if X_D[view_idx][given_row_id] == X_D[view_idx][target_row_id]: score += 1.0 return score / (len(X_L_list)*len(col_idxs))
<SYSTEM_TASK:> Calculate HEALPix indices from an astropy SkyCoord. Assume the HEALPix <END_TASK> <USER_TASK:> Description: def coord2healpix(coords, frame, nside, nest=True): """ Calculate HEALPix indices from an astropy SkyCoord. Assume the HEALPix system is defined on the coordinate frame ``frame``. Args: coords (:obj:`astropy.coordinates.SkyCoord`): The input coordinates. frame (:obj:`str`): The frame in which the HEALPix system is defined. nside (:obj:`int`): The HEALPix nside parameter to use. Must be a power of 2. nest (Optional[:obj:`bool`]): ``True`` (the default) if nested HEALPix ordering is desired. ``False`` for ring ordering. Returns: An array of pixel indices (integers), with the same shape as the input SkyCoord coordinates (:obj:`coords.shape`). Raises: :obj:`dustexceptions.CoordFrameError`: If the specified frame is not supported. """
if coords.frame.name != frame: c = coords.transform_to(frame) else: c = coords if hasattr(c, 'ra'): phi = c.ra.rad theta = 0.5*np.pi - c.dec.rad return hp.pixelfunc.ang2pix(nside, theta, phi, nest=nest) elif hasattr(c, 'l'): phi = c.l.rad theta = 0.5*np.pi - c.b.rad return hp.pixelfunc.ang2pix(nside, theta, phi, nest=nest) elif hasattr(c, 'x'): return hp.pixelfunc.vec2pix(nside, c.x.kpc, c.y.kpc, c.z.kpc, nest=nest) elif hasattr(c, 'w'): return hp.pixelfunc.vec2pix(nside, c.w.kpc, c.u.kpc, c.v.kpc, nest=nest) else: raise dustexceptions.CoordFrameError( 'No method to transform from coordinate frame "{}" to HEALPix.'.format( frame))
<SYSTEM_TASK:> Query using Galactic coordinates. <END_TASK> <USER_TASK:> Description: def query_gal(self, l, b, d=None, **kwargs): """ Query using Galactic coordinates. Args: l (:obj:`float`, scalar or array-like): Galactic longitude, in degrees, or as an :obj:`astropy.unit.Quantity`. b (:obj:`float`, scalar or array-like): Galactic latitude, in degrees, or as an :obj:`astropy.unit.Quantity`. d (Optional[:obj:`float`, scalar or array-like]): Distance from the Solar System, in kpc, or as an :obj:`astropy.unit.Quantity`. Defaults to ``None``, meaning no distance is specified. **kwargs: Any additional keyword arguments accepted by derived classes. Returns: The results of the query, which must be implemented by derived classes. """
if not isinstance(l, units.Quantity): l = l * units.deg if not isinstance(b, units.Quantity): b = b * units.deg if d is None: coords = coordinates.SkyCoord(l, b, frame='galactic') else: if not isinstance(d, units.Quantity): d = d * units.kpc coords = coordinates.SkyCoord( l, b, distance=d, frame='galactic') return self.query(coords, **kwargs)
<SYSTEM_TASK:> Pass a `model class` or `model instance` to this function, <END_TASK> <USER_TASK:> Description: def make_request_parser(model_or_inst, excludes=None, only=None, for_populate=False): """Pass a `model class` or `model instance` to this function, then, it will generate a `RequestParser` that extract user request data from `request.json` according to the model class's definition. Parameter `excludes` and `only` can be `str` or list of `str`, then are used to specify which columns should be handled. If you passed `excludes` and `only` at same time, only `excludes` will be used. And, the primary key of the model will not be added to `RequestParser`'s argument list, unless you explicitly specify it use `only` parameter. If you pass in a model class, but not a model instance, the function will doing `required` checking, for columns that nullable=False. (If you pass in a model instance, the `required` checking will not proceed. Because in this situation, we should allow the user to ignore the assignment to a field) """
is_inst = _is_inst(model_or_inst) if isinstance(excludes, six.string_types): excludes = [excludes] if excludes and only: only = None elif isinstance(only, six.string_types): only = [only] parser = RequestPopulator() if for_populate else reqparse.RequestParser() for col in model_or_inst.__table__.columns: if only: if col.name not in only: continue elif (excludes and col.name in excludes) or col.primary_key: continue col_type = col.type.python_type kwargs = { "type": _type_dict.get(col_type.__name__, col_type) if hasattr(col_type, '__name__') else col_type } # When the context was to creating a new model instance, if a field has no default value, and is not nullable, # mark it's corresponding argument as `required`. # 创建新数据库实例时,若一个字段既没有默认值,又不允许 NULL,则把它对应 arg 设为 required if not is_inst and col.default is None and col.server_default is None and not col.nullable: kwargs["required"] = True parser.add_argument(col.name, **kwargs) return parser
<SYSTEM_TASK:> Set all flags in ``flagset``, and clear all other flags. <END_TASK> <USER_TASK:> Description: def set_flagstate(flagset): """ Set all flags in ``flagset``, and clear all other flags. """
if not flagset <= _all_flags: raise ValueError("unrecognized flags in flagset") for f in flagset: set_flag(f) for f in _all_flags - flagset: clear_flag(f)
<SYSTEM_TASK:> Convert the string ``s`` in base ``base`` to a BigFloat instance, rounding <END_TASK> <USER_TASK:> Description: def set_str2(s, base, context=None): """ Convert the string ``s`` in base ``base`` to a BigFloat instance, rounding according to the current context. Raise ValueError if ``s`` doesn't represent a valid string in the given base. """
return _apply_function_in_current_context( BigFloat, _set_from_whole_string, (s, base), context, )
<SYSTEM_TASK:> Return ``x``. <END_TASK> <USER_TASK:> Description: def pos(x, context=None): """ Return ``x``. As usual, the result is rounded to the current context. The ``pos`` function can be useful for rounding an intermediate result, computed with a temporary increase in precision, back to the current context. For example:: >>> from bigfloat import precision >>> pow(3, 20) + 1.234 - pow(3, 20) # inaccurate due to precision loss BigFloat.exact('1.2340002059936523', precision=53) >>> with precision(100): # compute result with extra precision ... x = pow(3, 20) + 1.234 - pow(3, 20) ... >>> x BigFloat.exact('1.2339999999999999857891452847980', precision=100) >>> pos(x) # round back to original precision BigFloat.exact('1.2340000000000000', precision=53) """
return _apply_function_in_current_context( BigFloat, mpfr.mpfr_set, (BigFloat._implicit_convert(x),), context, )
<SYSTEM_TASK:> Return the floor of ``x`` divided by ``y``. <END_TASK> <USER_TASK:> Description: def floordiv(x, y, context=None): """ Return the floor of ``x`` divided by ``y``. The result is a ``BigFloat`` instance, rounded to the context if necessary. Special cases match those of the ``div`` function. """
return _apply_function_in_current_context( BigFloat, mpfr_floordiv, ( BigFloat._implicit_convert(x), BigFloat._implicit_convert(y), ), context, )
<SYSTEM_TASK:> Return the remainder of x divided by y, with sign matching that of y. <END_TASK> <USER_TASK:> Description: def mod(x, y, context=None): """ Return the remainder of x divided by y, with sign matching that of y. """
return _apply_function_in_current_context( BigFloat, mpfr_mod, ( BigFloat._implicit_convert(x), BigFloat._implicit_convert(y), ), context, )
<SYSTEM_TASK:> Return the reciprocal square root of x. <END_TASK> <USER_TASK:> Description: def rec_sqrt(x, context=None): """ Return the reciprocal square root of x. Return +Inf if x is ±0, +0 if x is +Inf, and NaN if x is negative. """
return _apply_function_in_current_context( BigFloat, mpfr.mpfr_rec_sqrt, (BigFloat._implicit_convert(x),), context, )
<SYSTEM_TASK:> Return the kth root of x. <END_TASK> <USER_TASK:> Description: def root(x, k, context=None): """ Return the kth root of x. For k odd and x negative (including -Inf), return a negative number. For k even and x negative (including -Inf), return NaN. The kth root of -0 is defined to be -0, whatever the parity of k. This function is only implemented for nonnegative k. """
if k < 0: raise ValueError("root function not implemented for negative k") return _apply_function_in_current_context( BigFloat, mpfr.mpfr_root, (BigFloat._implicit_convert(x), k), context, )
<SYSTEM_TASK:> Return ``x`` raised to the power ``y``. <END_TASK> <USER_TASK:> Description: def pow(x, y, context=None): """ Return ``x`` raised to the power ``y``. Special values are handled as described in the ISO C99 and IEEE 754-2008 standards for the pow function. * pow(±0, y) returns plus or minus infinity for y a negative odd integer. * pow(±0, y) returns plus infinity for y negative and not an odd integer. * pow(±0, y) returns plus or minus zero for y a positive odd integer. * pow(±0, y) returns plus zero for y positive and not an odd integer. * pow(-1, ±Inf) returns 1. * pow(+1, y) returns 1 for any y, even a NaN. * pow(x, ±0) returns 1 for any x, even a NaN. * pow(x, y) returns NaN for finite negative x and finite non-integer y. * pow(x, -Inf) returns plus infinity for 0 < abs(x) < 1, and plus zero for abs(x) > 1. * pow(x, +Inf) returns plus zero for 0 < abs(x) < 1, and plus infinity for abs(x) > 1. * pow(-Inf, y) returns minus zero for y a negative odd integer. * pow(-Inf, y) returns plus zero for y negative and not an odd integer. * pow(-Inf, y) returns minus infinity for y a positive odd integer. * pow(-Inf, y) returns plus infinity for y positive and not an odd integer. * pow(+Inf, y) returns plus zero for y negative, and plus infinity for y positive. """
return _apply_function_in_current_context( BigFloat, mpfr.mpfr_pow, ( BigFloat._implicit_convert(x), BigFloat._implicit_convert(y), ), context, )
<SYSTEM_TASK:> Perform a three-way comparison of op1 and op2. <END_TASK> <USER_TASK:> Description: def cmp(op1, op2): """ Perform a three-way comparison of op1 and op2. Return a positive value if op1 > op2, zero if op1 = op2, and a negative value if op1 < op2. Both op1 and op2 are considered to their full own precision, which may differ. If one of the operands is NaN, raise ValueError. Note: This function may be useful to distinguish the three possible cases. If you need to distinguish two cases only, it is recommended to use the predicate functions like 'greaterequal'; they behave like the IEEE 754 comparisons, in particular when one or both arguments are NaN. """
op1 = BigFloat._implicit_convert(op1) op2 = BigFloat._implicit_convert(op2) if is_nan(op1) or is_nan(op2): raise ValueError("Cannot perform comparison with NaN.") return mpfr.mpfr_cmp(op1, op2)
<SYSTEM_TASK:> Compare the absolute values of op1 and op2. <END_TASK> <USER_TASK:> Description: def cmpabs(op1, op2): """ Compare the absolute values of op1 and op2. Return a positive value if op1 > op2, zero if op1 = op2, and a negative value if op1 < op2. Both op1 and op2 are considered to their full own precision, which may differ. If one of the operands is NaN, raise ValueError. Note: This function may be useful to distinguish the three possible cases. If you need to distinguish two cases only, it is recommended to use the predicate functions like 'greaterequal'; they behave like the IEEE 754 comparisons, in particular when one or both arguments are NaN. """
op1 = BigFloat._implicit_convert(op1) op2 = BigFloat._implicit_convert(op2) if is_nan(op1) or is_nan(op2): raise ValueError("Cannot perform comparison with NaN.") return mpfr.mpfr_cmpabs(op1, op2)
<SYSTEM_TASK:> Return the sign of x. <END_TASK> <USER_TASK:> Description: def sgn(x): """ Return the sign of x. Return a positive integer if x > 0, 0 if x == 0, and a negative integer if x < 0. Raise ValueError if x is a NaN. This function is equivalent to cmp(x, 0), but more efficient. """
x = BigFloat._implicit_convert(x) if is_nan(x): raise ValueError("Cannot take sign of a NaN.") return mpfr.mpfr_sgn(x)
<SYSTEM_TASK:> Return True if x > y and False otherwise. <END_TASK> <USER_TASK:> Description: def greater(x, y): """ Return True if x > y and False otherwise. This function returns False whenever x and/or y is a NaN. """
x = BigFloat._implicit_convert(x) y = BigFloat._implicit_convert(y) return mpfr.mpfr_greater_p(x, y)
<SYSTEM_TASK:> Return True if x >= y and False otherwise. <END_TASK> <USER_TASK:> Description: def greaterequal(x, y): """ Return True if x >= y and False otherwise. This function returns False whenever x and/or y is a NaN. """
x = BigFloat._implicit_convert(x) y = BigFloat._implicit_convert(y) return mpfr.mpfr_greaterequal_p(x, y)
<SYSTEM_TASK:> Return True if x < y and False otherwise. <END_TASK> <USER_TASK:> Description: def less(x, y): """ Return True if x < y and False otherwise. This function returns False whenever x and/or y is a NaN. """
x = BigFloat._implicit_convert(x) y = BigFloat._implicit_convert(y) return mpfr.mpfr_less_p(x, y)
<SYSTEM_TASK:> Return True if x <= y and False otherwise. <END_TASK> <USER_TASK:> Description: def lessequal(x, y): """ Return True if x <= y and False otherwise. This function returns False whenever x and/or y is a NaN. """
x = BigFloat._implicit_convert(x) y = BigFloat._implicit_convert(y) return mpfr.mpfr_lessequal_p(x, y)
<SYSTEM_TASK:> Return True if x == y and False otherwise. <END_TASK> <USER_TASK:> Description: def equal(x, y): """ Return True if x == y and False otherwise. This function returns False whenever x and/or y is a NaN. """
x = BigFloat._implicit_convert(x) y = BigFloat._implicit_convert(y) return mpfr.mpfr_equal_p(x, y)