code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def query(self, coords): pix_idx = coord2healpix(coords, self._frame, self._nside, nest=self._nest) return self._pix_val[pix_idx]
Args: coords (`astropy.coordinates.SkyCoord`): The coordinates to query. Returns: A float array of the value of the map at the given coordinates. The shape of the output is the same as the shape of the coordinates stored by `coords`.
def _serialize(xp_ast): '''Generate token strings which, when joined together, form a valid XPath serialization of the AST.''' if hasattr(xp_ast, '_serialize'): for tok in xp_ast._serialize(): yield tok elif isinstance(xp_ast, string_types): # strings in serialized xpath needed to be quoted # (e.g. for use in paths, comparisons, etc) # using repr to quote them; for unicode, the leading # u (u'') needs to be removed. yield repr(xp_ast).lstrip('u') else: yield str(xp_astf _serialize(xp_ast): '''Generate token strings which, when joined together, form a valid XPath serialization of the AST.''' if hasattr(xp_ast, '_serialize'): for tok in xp_ast._serialize(): yield tok elif isinstance(xp_ast, string_types): # strings in serialized xpath needed to be quoted # (e.g. for use in paths, comparisons, etc) # using repr to quote them; for unicode, the leading # u (u'') needs to be removed. yield repr(xp_ast).lstrip('u') else: yield str(xp_ast)
Generate token strings which, when joined together, form a valid XPath serialization of the AST.
def build(python=PYTHON): clean() local( "LIBRARY_PATH={library_path} CPATH={include_path} {python} " "setup.py build_ext --inplace".format( library_path=LIBRARY_PATH, include_path=INCLUDE_PATH, python=python, ))
Build the bigfloat library for in-place testing.
def install(python=PYTHON): local( "LIBRARY_PATH={library_path} CPATH={include_path} {python} " "setup.py build".format( library_path=LIBRARY_PATH, include_path=INCLUDE_PATH, python=python, )) local("sudo {python} setup.py install".format(python=python))
Install into site-packages
def uninstall(python=PYTHON): site_packages = local( "{python} -c 'from distutils.sysconfig import " "get_python_lib; print(get_python_lib())'".format(python=python), capture=True, ) with lcd(site_packages): local("sudo rm mpfr.so") local("sudo rm -fr bigfloat") local("sudo rm bigfloat*.egg-info")
Uninstall from site-packages
def sync_virtualchain(blockchain_opts, last_block, state_engine, expected_snapshots={}, tx_filter=None ): rc = False start = datetime.datetime.now() while True: try: # advance state rc = indexer.StateEngine.build(blockchain_opts, last_block + 1, state_engine, expected_snapshots=expected_snapshots, tx_filter=tx_filter ) break except Exception, e: log.exception(e) log.error("Failed to synchronize chain; exiting to safety") os.abort() time_taken = "%s seconds" % (datetime.datetime.now() - start).seconds log.info(time_taken) return rc
Synchronize the virtual blockchain state up until a given block. Obtain the operation sequence from the blockchain, up to and including last_block. That is, go and fetch each block we haven't seen since the last call to this method, extract the operations from them, and record in the given working_dir where we left off while watching the blockchain. Store the state engine state, consensus snapshots, and last block to the working directory. Return True on success Return False if we're supposed to stop indexing Abort the program on error. The implementation should catch timeouts and connection errors
def virtualchain_set_opfields( op, **fields ): # warn about unsupported fields for f in fields.keys(): if f not in indexer.RESERVED_KEYS: log.warning("Unsupported virtualchain field '%s'" % f) # propagate reserved fields for f in fields.keys(): if f in indexer.RESERVED_KEYS: op[f] = fields[f] return op
Pass along virtualchain-reserved fields to a virtualchain operation. This layer of indirection is meant to help with future compatibility, so virtualchain implementations do not try to set operation fields directly.
def fetch(): doi = '10.7910/DVN/VBSI4A' for component in ['dust', 'err']: requirements = {'filename': 'PG_{}_4096_ngp.fits'.format(component)} local_fname = os.path.join( data_dir(), 'pg2010', 'PG_{}_4096_ngp.fits'.format(component)) print('Downloading P&G (2010) {} data file to {}'.format( component, local_fname)) fetch_utils.dataverse_download_doi( doi, local_fname, file_requirements=requirements)
Downloads the Peek & Graves (2010) dust map, placing it in the data directory for :obj:`dustmap`.
def query(self, coords, order=1): return super(PG2010Query, self).query(coords, order=order)
Returns the P&G (2010) correction to the SFD'98 E(B-V) at the specified location(s) on the sky. If component is 'err', then return the uncertainty in the correction. Args: coords (:obj:`astropy.coordinates.SkyCoord`): The coordinates to query. order (Optional[:obj:`int`]): Interpolation order to use. Defaults to ``1``, for linear interpolation. Returns: A float array containing the P&G (2010) correction (or its uncertainty) to SFD'98 at every input coordinate. The shape of the output will be the same as the shape of the coordinates stored by :obj:`coords`.
def _check_construction_table(construction_table): c_table = construction_table for row, i in enumerate(c_table.index): give_message = ("Not a valid construction table. " "The index {i} uses an invalid reference").format if row == 0: pass elif row == 1: if c_table.loc[i, 'b'] not in c_table.index[:row]: raise UndefinedCoordinateSystem(give_message(i=i)) elif row == 2: reference = c_table.loc[i, ['b', 'a']] if not reference.isin(c_table.index[:row]).all(): raise UndefinedCoordinateSystem(give_message(i=i)) else: reference = c_table.loc[i, ['b', 'a', 'd']] if not reference.isin(c_table.index[:row]).all(): raise UndefinedCoordinateSystem(give_message(i=i))
Checks if a construction table uses valid references. Raises an exception (UndefinedCoordinateSystem) otherwise.
def check_dihedral(self, construction_table): c_table = construction_table angles = self.get_angle_degrees(c_table.iloc[3:, :].values) problem_index = np.nonzero((175 < angles) | (angles < 5))[0] rename = dict(enumerate(c_table.index[3:])) problem_index = [rename[i] for i in problem_index] return problem_index
Checks, if the dihedral defining atom is colinear. Checks for each index starting from the third row of the ``construction_table``, if the reference atoms are colinear. Args: construction_table (pd.DataFrame): Returns: list: A list of problematic indices.
def _has_valid_abs_ref(self, i, construction_table): c_table = construction_table abs_refs = constants.absolute_refs A = np.empty((3, 3)) row = c_table.index.get_loc(i) if row > 2: message = 'The index {i} is not from the first three, rows'.format raise ValueError(message(i=i)) for k in range(3): if k < row: A[k] = self.loc[c_table.iloc[row, k], ['x', 'y', 'z']] else: A[k] = abs_refs[c_table.iloc[row, k]] v1, v2 = A[2] - A[1], A[1] - A[0] K = np.cross(v1, v2) zero = np.full(3, 0.) return not (np.allclose(K, zero) or np.allclose(v1, zero) or np.allclose(v2, zero))
Checks, if ``i`` uses valid absolute references. Checks for each index from first to third row of the ``construction_table``, if the references are colinear. This case has to be specially treated, because the references are not only atoms (to fix internal degrees of freedom) but also points in cartesian space called absolute references. (to fix translational and rotational degrees of freedom) Args: i (label): The label has to be in the first three rows. construction_table (pd.DataFrame): Returns: bool:
def check_absolute_refs(self, construction_table): c_table = construction_table problem_index = [i for i in c_table.index[:3] if not self._has_valid_abs_ref(i, c_table)] return problem_index
Checks first three rows of ``construction_table`` for linear references Checks for each index from first to third row of the ``construction_table``, if the references are colinear. This case has to be specially treated, because the references are not only atoms (to fix internal degrees of freedom) but also points in cartesian space called absolute references. (to fix translational and rotational degrees of freedom) Args: construction_table (pd.DataFrame): Returns: list: A list of problematic indices.
def correct_absolute_refs(self, construction_table): c_table = construction_table.copy() abs_refs = constants.absolute_refs problem_index = self.check_absolute_refs(c_table) for i in problem_index: order_of_refs = iter(permutations(abs_refs.keys())) finished = False while not finished: if self._has_valid_abs_ref(i, c_table): finished = True else: row = c_table.index.get_loc(i) c_table.iloc[row, row:] = next(order_of_refs)[row:3] return c_table
Reindexe construction_table if linear reference in first three rows present. Uses :meth:`~Cartesian.check_absolute_refs` to obtain the problematic indices. Args: construction_table (pd.DataFrame): Returns: pd.DataFrame: Appropiately renamed construction table.
def _build_zmat(self, construction_table): c_table = construction_table default_cols = ['atom', 'b', 'bond', 'a', 'angle', 'd', 'dihedral'] optional_cols = list(set(self.columns) - {'atom', 'x', 'y', 'z'}) zmat_frame = pd.DataFrame(columns=default_cols + optional_cols, dtype='float', index=c_table.index) zmat_frame.loc[:, optional_cols] = self.loc[c_table.index, optional_cols] zmat_frame.loc[:, 'atom'] = self.loc[c_table.index, 'atom'] zmat_frame.loc[:, ['b', 'a', 'd']] = c_table zmat_values = self._calculate_zmat_values(c_table) zmat_frame.loc[:, ['bond', 'angle', 'dihedral']] = zmat_values zmatrix = Zmat(zmat_frame, metadata=self.metadata, _metadata={'last_valid_cartesian': self.copy()}) return zmatrix
Create the Zmatrix from a construction table. Args: Construction table (pd.DataFrame): Returns: Zmat: A new instance of :class:`Zmat`.
def add_data(self, new_cols=None): atoms = self['atom'] data = constants.elements if pd.api.types.is_list_like(new_cols): new_cols = set(new_cols) elif new_cols is None: new_cols = set(data.columns) else: new_cols = [new_cols] new_frame = data.loc[atoms, set(new_cols) - set(self.columns)] new_frame.index = self.index return self.__class__(pd.concat([self._frame, new_frame], axis=1))
Adds a column with the requested data. If you want to see for example the mass, the colormap used in jmol and the block of the element, just use:: ['mass', 'jmol_color', 'block'] The underlying ``pd.DataFrame`` can be accessed with ``constants.elements``. To see all available keys use ``constants.elements.info()``. The data comes from the module `mendeleev <http://mendeleev.readthedocs.org/en/latest/>`_ written by Lukasz Mentel. Please note that I added three columns to the mendeleev data:: ['atomic_radius_cc', 'atomic_radius_gv', 'gv_color', 'valency'] The ``atomic_radius_cc`` is used by default by this module for determining bond lengths. The three others are taken from the MOLCAS grid viewer written by Valera Veryazov. Args: new_cols (str): You can pass also just one value. E.g. ``'mass'`` is equivalent to ``['mass']``. If ``new_cols`` is ``None`` all available data is returned. inplace (bool): Returns: Cartesian:
def get_total_mass(self): try: mass = self.loc[:, 'mass'].sum() except KeyError: mass_molecule = self.add_data('mass') mass = mass_molecule.loc[:, 'mass'].sum() return mass
Returns the total mass in g/mol. Args: None Returns: float:
def has_same_sumformula(self, other): same_atoms = True for atom in set(self['atom']): own_atom_number = len(self[self['atom'] == atom]) other_atom_number = len(other[other['atom'] == atom]) same_atoms = (own_atom_number == other_atom_number) if not same_atoms: break return same_atoms
Determines if ``other`` has the same sumformula Args: other (molecule): Returns: bool:
def get_electron_number(self, charge=0): atomic_number = constants.elements['atomic_number'].to_dict() return sum([atomic_number[atom] for atom in self['atom']]) - charge
Return the number of electrons. Args: charge (int): Charge of the molecule. Returns: int:
def enhance_json_encode(api_instance, extra_settings=None): api_instance.json_encoder = JSONEncodeManager() dumps_settings = {} if extra_settings is None else extra_settings dumps_settings['default'] = api_instance.json_encoder dumps_settings.setdefault('ensure_ascii', False) @api_instance.representation('application/json') def output_json(data, code, headers=None): if current_app.debug: dumps_settings.setdefault('indent', 4) dumps_settings.setdefault('sort_keys', True) dumped = json.dumps(data, **dumps_settings) if 'indent' in dumps_settings: dumped += '\n' resp = make_response(dumped, code) resp.headers.extend(headers or {}) return resp
use `JSONEncodeManager` replace default `output_json` function of Flask-RESTful for the advantage of use `JSONEncodeManager`, please see https://github.com/anjianshi/json_encode_manager
def support_jsonp(api_instance, callback_name_source='callback'): output_json = api_instance.representations['application/json'] @api_instance.representation('application/json') def handle_jsonp(data, code, headers=None): resp = output_json(data, code, headers) if code == 200: callback = request.args.get(callback_name_source, False) if not callable(callback_name_source) \ else callback_name_source() if callback: resp.set_data(str(callback) + '(' + resp.get_data().decode("utf-8") + ')') return resp
Let API instance can respond jsonp request automatically. `callback_name_source` can be a string or a callback. If it is a string, the system will find the argument that named by this string in `query string`. If found, determine this request to be a jsonp request, and use the argument's value as the js callback name. If `callback_name_source` is a callback, this callback should return js callback name when request is a jsonp request, and return False when request is not jsonp request. And system will handle request according to its return value. default support format:url?callback=js_callback_name
def sort_values(self, by, axis=0, ascending=True, kind='quicksort', na_position='last'): return self._frame.sort_values(by, axis=axis, ascending=ascending, inplace=False, kind=kind, na_position=na_position)
Sort by the values along either axis Wrapper around the :meth:`pandas.DataFrame.sort_values` method.
def sort_index(self, axis=0, level=None, ascending=True, inplace=False, kind='quicksort', na_position='last', sort_remaining=True, by=None): return self._frame.sort_index(axis=axis, level=level, ascending=ascending, inplace=inplace, kind=kind, na_position=na_position, sort_remaining=sort_remaining, by=by)
Sort object by labels (along an axis) Wrapper around the :meth:`pandas.DataFrame.sort_index` method.
def insert(self, loc, column, value, allow_duplicates=False, inplace=False): out = self if inplace else self.copy() out._frame.insert(loc, column, value, allow_duplicates=allow_duplicates) if not inplace: return out
Insert column into molecule at specified location. Wrapper around the :meth:`pandas.DataFrame.insert` method.
def make_multisig_segwit_address_from_witness_script(script): script_hash = hashing.bin_sha256(script.decode('hex')).encode('hex') scriptsig_script = '0020' + script_hash addr = btc_make_p2sh_address(scriptsig_script) return addr
multisig witness script (p2sh-p2wsh) to address
def make_multisig_info( m, pks, compressed=None ): pubs = [] privkeys = [] for pk in pks: priv = None if compressed in [True, False]: priv = BitcoinPrivateKey(pk, compressed=compressed) else: priv = BitcoinPrivateKey(pk) priv_hex = priv.to_hex() pub_hex = priv.public_key().to_hex() privkeys.append(priv_hex) pubs.append(pub_hex) script = make_multisig_script(pubs, m) addr = btc_make_p2sh_address(script) return { 'address': addr, 'redeem_script': script, 'private_keys': privkeys, 'segwit': False, }
Make a multisig address and redeem script. @m of the given @pks must sign. Return {'address': p2sh address, 'redeem_script': redeem script, 'private_keys': private keys, 'segwit': False} * privkeys will be hex-encoded * redeem_script will be hex-encoded
def make_multisig_segwit_info( m, pks ): pubs = [] privkeys = [] for pk in pks: priv = BitcoinPrivateKey(pk, compressed=True) priv_hex = priv.to_hex() pub_hex = priv.public_key().to_hex() privkeys.append(priv_hex) pubs.append(keylib.key_formatting.compress(pub_hex)) script = None if len(pubs) == 1: if m != 1: raise ValueError("invalid m: len(pubkeys) == 1") # 1 pubkey means p2wpkh key_hash = hashing.bin_hash160(pubs[0].decode('hex')).encode('hex') script = '160014' + key_hash addr = btc_make_p2sh_address(script[2:]) else: # 2+ pubkeys means p2wsh script = make_multisig_script(pubs, m) addr = make_multisig_segwit_address_from_witness_script(script) return { 'address': addr, 'redeem_script': script, 'private_keys': privkeys, 'segwit': True, 'm': m }
Make either a p2sh-p2wpkh or p2sh-p2wsh redeem script and p2sh address. Return {'address': p2sh address, 'redeem_script': **the witness script**, 'private_keys': privkeys, 'segwit': True} * privkeys and redeem_script will be hex-encoded
def make_multisig_wallet( m, n ): if m <= 1 and n <= 1: raise ValueError("Invalid multisig parameters") pks = [] for i in xrange(0, n): pk = BitcoinPrivateKey(compressed=True).to_wif() pks.append(pk) return make_multisig_info( m, pks )
Create a bundle of information that can be used to generate an m-of-n multisig scriptsig.
def make_segwit_info(privkey=None): if privkey is None: privkey = BitcoinPrivateKey(compressed=True).to_wif() return make_multisig_segwit_info(1, [privkey])
Create a bundle of information that can be used to generate a p2sh-p2wpkh transaction
def make_multisig_segwit_wallet( m, n ): pks = [] for i in xrange(0, n): pk = BitcoinPrivateKey(compressed=True).to_wif() pks.append(pk) return make_multisig_segwit_info(m, pks)
Create a bundle of information that can be used to generate an m-of-n multisig witness script.
def parse_multisig_redeemscript( redeem_script_hex ): script_parts = [] redeem_script_hex = str(redeem_script_hex) try: script_parts = btc_script_deserialize(redeem_script_hex) except: if os.environ.get("BLOCKSTACK_TEST") == "1": traceback.print_exc() log.error("Invalid redeem script %s" % redeem_script_hex) return None, None try: assert len(script_parts) > 2 assert script_parts[-1] == OPCODE_VALUES['OP_CHECKMULTISIG'] script_parts.pop(-1) # get n n = script_parts.pop(-1) pubkeys = [] # get m m = script_parts.pop(0) for i in xrange(0, n): pubk = script_parts.pop(0) # must be a public key BitcoinPublicKey(pubk) pubkeys.append(pubk) assert len(script_parts) == 0, "script_parts = %s" % script_parts return (m, pubkeys) except Exception, e: if os.environ.get("BLOCKSTACK_TEST") == "1": traceback.print_exc() log.error("Invalid redeem script %s (parses to %s)" % (redeem_script_hex, script_parts)) return (None, None)
Given a redeem script (as hex), extract multisig information. Return m, list of public keys on success Return (None, None)
def parse_multisig_scriptsig( scriptsig_hex ): try: script_parts = btc_script_deserialize(scriptsig_hex) except: if os.environ.get("BLOCKSTACK_TEST") == "1": traceback.print_exc() return None # sanity check return script_parts
Given a scriptsig (as hex), extract the signatures. Return list of signatures on success Return None on error
def resources_preparing_factory(app, wrapper): settings = app.app.registry.settings config = settings.get(CONFIG_RESOURCES, None) if not config: return resources = [(k, [wrapper(r, GroupResource(k, v)) for r in v]) for k, v in config] settings[CONFIG_RESOURCES] = resources
Factory which wrap all resources in settings.
def calculate_tx_fee( tx_hex, fee_per_byte ): txobj = btc_tx_deserialize(tx_hex) tx_num_bytes = len(tx_hex) / 2 num_virtual_bytes = None if btc_tx_is_segwit(tx_hex): # segwit--discount witness data witness_len = 0 for inp in txobj['ins']: witness_len += len(inp['witness_script']) / 2 # see https://bitcoincore.org/en/segwit_wallet_dev/#transaction-fee-estimation tx_num_bytes_original = tx_num_bytes - witness_len num_virtual_bytes = 3 * tx_num_bytes_original + tx_num_bytes else: # non-segwit num_virtual_bytes = tx_num_bytes * 4 return (fee_per_byte * num_virtual_bytes) / 4
High-level API call (meant to be blockchain-agnostic) What is the fee for the transaction?
def get_tx_fee_per_byte(bitcoind_opts=None, config_path=None, bitcoind_client=None): if bitcoind_client is None: bitcoind_client = get_bitcoind_client(bitcoind_opts=bitcoind_opts, config_path=config_path) try: # try to confirm in 2-3 blocks try: fee_info = bitcoind_client.estimatesmartfee(2) if 'errors' in fee_info and len(fee_info['errors']) > 0: fee = -1 else: fee = fee_info['feerate'] except JSONRPCException as je: fee = bitcoind_client.estimatefee(2) if fee < 0: # if we're testing, then use our own fee if os.environ.get("BLOCKSTACK_TEST") == '1' or os.environ.get("BLOCKSTACK_TESTNET", None) == "1": fee = 5500.0 / 10**8 else: log.error("Failed to estimate tx fee") return None else: log.debug("Bitcoin estimatefee(2) is {}".format(fee)) fee = float(fee) # fee is BTC/kb. Return satoshis/byte ret = int(round(fee * 10**8 / 1024.0)) log.debug("Bitcoin estimatefee(2) is {} ({} satoshi/byte)".format(fee, ret)) return ret except Exception as e: if os.environ.get("BLOCKSTACK_DEBUG") == '1': log.exception(e) log.error("Failed to estimate tx fee per byte") return None
Get the tx fee per byte from the underlying blockchain Return the fee on success Return None on error
def get_tx_fee(tx_hex, config_path=None, bitcoind_opts=None, bitcoind_client=None): tx_fee_per_byte = get_tx_fee_per_byte(config_path=config_path, bitcoind_opts=bitcoind_opts, bitcoind_client=bitcoind_client) if tx_fee_per_byte is None: return None return calculate_tx_fee(tx_hex, tx_fee_per_byte)
Get the tx fee for a tx Return the fee on success Return None on error
def handle_error(self, e): if isinstance(e, HTTPException) and not hasattr(e, 'data'): e.data = dict(message=e.description) return super(ErrorHandledApi, self).handle_error(e)
Resolve the problem about sometimes error message specified by programmer won't output to user. Flask-RESTFul's error handler handling format different exceptions has different behavior. If we raise an normal Exception, it will raise it again. If we report error by `restful.abort()`, likes `restful.abort(400, message="my_msg", custom_data="value")`, it will make a response like this: Status 400 Content {"message": "my_msg", "custom_data": "value"} The error message we specified was outputted. And if we raise an HTTPException, likes `from werkzeug.exceptions import BadRequest; raise BadRequest('my_msg')`, if will make a response too, but the error message specified by ourselves was lost: Status 400 Content {"status": 400, "message": "Bad Request"} The reason is, flask-restful always use the `data` attribute of HTTPException to generate response content. But, standard HTTPException object didn't has this attribute. So, we use this method to add it manually. Some reference material: Structure of exceptions raised by restful.abort(): code: status code description: predefined error message for this status code data: {     message: error message } Structure of python2's standard Exception: message: error message Exceptions in python3 didn't has hte `message` attribute, but use `str(exception)` can get it's message. Structure of standard `werkzeug.exceptions.HTTPException` (same as BadRequest): code: status code name: the name correspondence to status code description: error message
def main(global_settings, **settings): my_session_factory = SignedCookieSessionFactory('itsaseekreet') # Add session engine config = Configurator( settings=settings, session_factory=my_session_factory ) # Add static and templates config.add_static_view(name='static', path='static') config.include('pyramid_jinja2') config.add_jinja2_search_path('templates') config.include('ps_crud') # Setting up pyramid_sacrud config.include('pyramid_sacrud', route_prefix='admin') settings = config.get_settings() settings['pyramid_sacrud.models'] = ( ('Docker', [Image(), ]), ) # Make app config.scan('views') return config.make_wsgi_app()
Entrypoint for WSGI app.
def download_schema(uri, path, comment=None): # if requests isn't available, warn and bail out if requests is None: sys.stderr.write(req_requests_msg) return # short-hand name of the schema, based on uri schema = os.path.basename(uri) try: req = requests.get(uri, stream=True) req.raise_for_status() with open(path, 'wb') as schema_download: for chunk in req.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks schema_download.write(chunk) # if a comment is specified, add it to the locally saved schema if comment is not None: tree = etree.parse(path) tree.getroot().append(etree.Comment(comment)) with open(path, 'wb') as xml_catalog: xml_catalog.write(etree.tostring(tree, pretty_print=True, xml_declaration=True, encoding="UTF-8")) logger.debug('Downloaded schema %s', schema) return True except requests.exceptions.HTTPError as err: msg = 'Failed to download schema %s' % schema msg += '(error codes %s)' % err.response.status_code logger.warn(msg) return False
Download a schema from a specified URI and save it locally. :param uri: url where the schema should be downloaded :param path: local file path where the schema should be saved :param comment: optional comment; if specified, will be added to the downloaded schema :returns: true on success, false if there was an error and the schema failed to download
def get_index_range(blockchain_name, blockchain_client, impl, working_dir): start_block = config.get_first_block_id(impl) try: current_block = get_blockchain_height(blockchain_name, blockchain_client) except Exception, e: log.exception(e) return None, None saved_block = StateEngine.get_lastblock(impl, working_dir) if saved_block is None: saved_block = 0 elif saved_block == current_block: start_block = saved_block elif saved_block < current_block: start_block = saved_block + 1 return start_block, current_block
Get the range of block numbers that we need to fetch from the blockchain. Requires virtualchain to have been configured with setup_virtualchain() if impl=None Return None, None if we fail to connect to the blockchain
def sqlite3_find_tool(): # find sqlite3 path = os.environ.get("PATH", None) if path is None: path = "/usr/local/bin:/usr/bin:/bin" sqlite3_path = None dirs = path.split(":") for pathdir in dirs: if len(pathdir) == 0: continue sqlite3_path = os.path.join(pathdir, 'sqlite3') if not os.path.exists(sqlite3_path): continue if not os.path.isfile(sqlite3_path): continue if not os.access(sqlite3_path, os.X_OK): continue break if sqlite3_path is None: log.error("Could not find sqlite3 binary") return None return sqlite3_path
Find the sqlite3 binary Return the path to the binary on success Return None on error
def state_engine_verify(trusted_consensus_hash, consensus_block_height, consensus_impl, untrusted_working_dir, new_state_engine, start_block=None, expected_snapshots={}): assert hasattr(consensus_impl, 'get_initial_snapshots') final_consensus_hash = state_engine_replay(consensus_impl, untrusted_working_dir, new_state_engine, consensus_block_height, \ start_block=start_block, initial_snapshots=consensus_impl.get_initial_snapshots(), expected_snapshots=expected_snapshots) # did we reach the consensus hash we expected? if final_consensus_hash is not None and final_consensus_hash == trusted_consensus_hash: return True else: log.error("Unverifiable database state stored in '{}': {} != {}".format(untrusted_working_dir, final_consensus_hash, trusted_consensus_hash)) return False
Verify that a database is consistent with a known-good consensus hash. This algorithm works by creating a new database, parsing the untrusted database, and feeding the untrusted operations into the new database block-by-block. If we derive the same consensus hash, then we can trust the database. Return True if consistent with the given consensus hash at the given consensus block height Return False if not
def db_setup(self): if self.db_exists(impl=self.impl, working_dir=self.working_dir): # resuming from previous indexing # read/write and unclean shutdown? if not self.read_only and self.db_is_indexing(self.impl, self.working_dir): log.error("Unclean shutdown detected on read/write open") return False else: # setting up for the first time assert not self.read_only, 'Cannot instantiate database if read_only is True' db_con = self.db_create(self.impl, self.working_dir) initial_snapshots = self.impl.get_initial_snapshots() for block_id in sorted(initial_snapshots.keys()): self.db_snapshot_append(db_con, int(block_id), str(initial_snapshots[block_id]), None, int(time.time())) self.chainstate_path = config.get_snapshots_filename(self.impl, self.working_dir) self.lastblock = self.get_lastblock(self.impl, self.working_dir) self.setup = True return True
Set up the state engine database. * If it doesn't exist, then create it. * If it does exist, then check that it is in a clean state. If not, then recover from a known-good backup. Return True on success Return False if there was an unclean shutdown. The caller should call db_restore() in this case to continue Raise exception on error Abort on db error
def db_restore(self, block_number=None): restored = False if block_number is not None: # restore a specific backup try: self.backup_restore(block_number, self.impl, self.working_dir) restored = True except AssertionError: log.error("Failed to restore state from {}".format(block_number)) return False else: # find the latest block backup_blocks = self.get_backup_blocks(self.impl, self.working_dir) for block_number in reversed(sorted(backup_blocks)): try: self.backup_restore(block_number, self.impl, self.working_dir) restored = True log.debug("Restored state from {}".format(block_number)) break except AssertionError: log.debug("Failed to restore state from {}".format(block_number)) continue if not restored: # failed to restore log.error("Failed to restore state from {}".format(','.join(backup_blocks))) return False # woo! self.db_set_indexing(False, self.impl, self.working_dir) return self.db_setup()
Restore the database and clear the indexing lockfile. Restore to a given block if given; otherwise use the most recent valid backup. Return True on success Return False if there is no state to restore Raise exception on error
def db_exists(cls, impl, working_dir): path = config.get_snapshots_filename(impl, working_dir) return os.path.exists(path)
Does the chainstate db exist?
def db_create(cls, impl, working_dir): global VIRTUALCHAIN_DB_SCRIPT log.debug("Setup chain state in {}".format(working_dir)) path = config.get_snapshots_filename(impl, working_dir) if os.path.exists( path ): raise Exception("Database {} already exists") lines = [l + ";" for l in VIRTUALCHAIN_DB_SCRIPT.split(";")] con = sqlite3.connect(path, isolation_level=None, timeout=2**30) for line in lines: con.execute(line) con.row_factory = StateEngine.db_row_factory return con
Create a sqlite3 db at the given path. Create all the tables and indexes we need. Returns a db connection on success Raises an exception on error
def db_connect(cls, path): con = sqlite3.connect(path, isolation_level=None, timeout=2**30) con.row_factory = StateEngine.db_row_factory return con
connect to our chainstate db
def db_open(cls, impl, working_dir): path = config.get_snapshots_filename(impl, working_dir) return cls.db_connect(path)
Open a connection to our chainstate db
def db_query_execute(cls, cur, query, values, verbose=True): timeout = 1.0 if verbose: log.debug(cls.db_format_query(query, values)) while True: try: ret = cur.execute(query, values) return ret except sqlite3.OperationalError as oe: if oe.message == "database is locked": timeout = timeout * 2 + timeout * random.random() log.error("Query timed out due to lock; retrying in %s: %s" % (timeout, cls.db_format_query( query, values ))) time.sleep(timeout) else: log.exception(oe) log.error("FATAL: failed to execute query (%s, %s)" % (query, values)) log.error("\n".join(traceback.format_stack())) os.abort() except Exception, e: log.exception(e) log.error("FATAL: failed to execute query (%s, %s)" % (query, values)) log.error("\n".join(traceback.format_stack())) os.abort()
Execute a query. Handle db timeouts. Abort on failure.
def db_chainstate_append(cls, cur, **fields): missing = [] extra = [] for reqfield in CHAINSTATE_FIELDS: if reqfield not in fields: missing.append(reqfield) for fieldname in fields: if fieldname not in CHAINSTATE_FIELDS: extra.append(fieldname) if len(missing) > 0 or len(extra) > 0: raise ValueError("Invalid fields: missing: {}, extra: {}".format(','.join(missing), ','.join(extra))) query = 'INSERT INTO chainstate ({}) VALUES ({});'.format( ','.join( CHAINSTATE_FIELDS ), ','.join( ['?'] * len(CHAINSTATE_FIELDS))) args = tuple([fields[fieldname] for fieldname in CHAINSTATE_FIELDS]) cls.db_query_execute(cur, query, args) return True
Insert a row into the chain state. Meant to be executed as part of a transaction. Return True on success Raise an exception if the fields are invalid Abort on db error.
def db_snapshot_append(cls, cur, block_id, consensus_hash, ops_hash, timestamp): query = 'INSERT INTO snapshots (block_id,consensus_hash,ops_hash,timestamp) VALUES (?,?,?,?);' args = (block_id,consensus_hash,ops_hash,timestamp) cls.db_query_execute(cur, query, args) return True
Append hash info for the last block processed, and the time at which it was done. Meant to be executed as part of a transaction. Return True on success Raise an exception on invalid block number Abort on db error
def db_chainstate_get_block(cls, cur, block_height): query = 'SELECT * FROM chainstate WHERE block_id = ? ORDER BY vtxindex;' args = (block_height,) rows = cls.db_query_execute(cur, query, args, verbose=False) ret = [] for r in rows: rowdata = { 'txid': str(r['txid']), 'block_id': r['block_id'], 'txindex': r['txindex'], 'vtxindex': r['vtxindex'], 'opcode': str(r['opcode']), 'data_hex': str(r['data_hex']), 'senders': simplejson.loads(r['senders']), 'tx_hex': str(r['tx_hex']), 'tx_merkle_path': str(r['tx_merkle_path']), 'fee': r['fee'] } ret.append(rowdata) return ret
Get the list of virtualchain transactions accepted at a given block. Returns the list of rows, where each row is a dict.
def db_set_indexing(cls, is_indexing, impl, working_dir): indexing_lockfile_path = config.get_lockfile_filename(impl, working_dir) if is_indexing: # make sure this exists with open(indexing_lockfile_path, 'w') as f: pass else: # make sure it does not exist try: os.unlink(indexing_lockfile_path) except: pass
Set lockfile path as to whether or not the system is indexing. NOT THREAD SAFE, USE ONLY FOR CRASH DETECTION.
def db_is_indexing(cls, impl, working_dir): indexing_lockfile_path = config.get_lockfile_filename(impl, working_dir) return os.path.exists(indexing_lockfile_path)
Is the system indexing? Return True if so, False if not.
def get_lastblock(cls, impl, working_dir): if not cls.db_exists(impl, working_dir): return None con = cls.db_open(impl, working_dir) query = 'SELECT MAX(block_id) FROM snapshots;' rows = cls.db_query_execute(con, query, (), verbose=False) ret = None for r in rows: ret = r['MAX(block_id)'] con.close() return ret
What was the last block processed? Return the number on success Return None on failure to read
def get_ops_hashes(cls, impl, working_dir, start_block_height=None, end_block_height=None): if (start_block_height is None and end_block_height is not None) or (start_block_height is not None and end_block_height is None): raise ValueError("Need either both or neither start/end block height") con = cls.db_open(impl, working_dir) range_query = ' WHERE ops_hash IS NOT NULL' range_args = () if start_block_height and end_block_height: range_query += ' AND block_id >= ? AND block_id < ?' range_args += (start_block_height,end_block_height) query = 'SELECT block_id,ops_hash FROM snapshots' + range_query + ';' args = range_args rows = cls.db_query_execute(con, query, range_args, verbose=False) ret = {} block_min = None block_max = None for r in rows: ret[r['block_id']] = r['ops_hash'] if block_min is None or block_min > r['block_id']: block_min = r['block_id'] if block_max is None or block_max < r['block_id']: block_max = r['block_id'] con.close() # sanity check if block_min is not None and block_max is not None: for i in range(block_min,block_max+1): oh = ret.get(i, None) assert oh is not None, 'Missing ops hash for {}'.format(i) assert isinstance(ret[i], (str,unicode)), 'ops hash for {} is type {}'.format(i, type(ret[i])) return ret
Read all consensus hashes into memory. They're write-once read-many, so no need to worry about cache-coherency.
def get_state_paths(cls, impl, working_dir): return [config.get_db_filename(impl, working_dir), config.get_snapshots_filename(impl, working_dir)]
Get the set of state paths that point to the current chain and state info. Returns a list of paths.
def get_backup_blocks(cls, impl, working_dir): ret = [] backup_dir = config.get_backups_directory(impl, working_dir) if not os.path.exists(backup_dir): return [] for name in os.listdir( backup_dir ): if ".bak." not in name: continue suffix = name.split(".bak.")[-1] try: block_id = int(suffix) except: continue # must exist... backup_paths = cls.get_backup_paths(block_id, impl, working_dir) for p in backup_paths: if not os.path.exists(p): # doesn't exist block_id = None continue if block_id is not None: # have backup at this block ret.append(block_id) return ret
Get the set of block IDs that were backed up
def get_backup_paths(cls, block_id, impl, working_dir): backup_dir = config.get_backups_directory(impl, working_dir) backup_paths = [] for p in cls.get_state_paths(impl, working_dir): pbase = os.path.basename(p) backup_path = os.path.join( backup_dir, pbase + (".bak.%s" % block_id)) backup_paths.append( backup_path ) return backup_paths
Get the set of backup paths, given the virtualchain implementation module and block number
def backup_restore(cls, block_id, impl, working_dir): backup_dir = config.get_backups_directory(impl, working_dir) backup_paths = cls.get_backup_paths(block_id, impl, working_dir) for p in backup_paths: assert os.path.exists(p), "No such backup file: {}".format(p) for p in cls.get_state_paths(impl, working_dir): pbase = os.path.basename(p) backup_path = os.path.join(backup_dir, pbase + (".bak.{}".format(block_id))) log.debug("Restoring '{}' to '{}'".format(backup_path, p)) shutil.copy(backup_path, p) return True
Restore from a backup, given the virutalchain implementation module and block number. NOT THREAD SAFE. DO NOT CALL WHILE INDEXING. Return True on success Raise exception on error, i.e. if a backup file is missing
def make_backups(self, block_id): assert self.setup, "Not set up yet. Call .db_setup() first!" # make a backup? if self.backup_frequency is not None: if (block_id % self.backup_frequency) == 0: backup_dir = config.get_backups_directory(self.impl, self.working_dir) if not os.path.exists(backup_dir): try: os.makedirs(backup_dir) except Exception, e: log.exception(e) log.error("FATAL: failed to make backup directory '%s'" % backup_dir) traceback.print_stack() os.abort() for p in self.get_state_paths(self.impl, self.working_dir): if os.path.exists(p): try: pbase = os.path.basename(p) backup_path = os.path.join(backup_dir, pbase + (".bak.{}".format(block_id - 1))) if not os.path.exists(backup_path): rc = sqlite3_backup(p, backup_path) if not rc: log.warning("Failed to back up as an SQLite db. Falling back to /bin/cp") shutil.copy(p, backup_path) else: log.error("Will not overwrite '%s'" % backup_path) except Exception, e: log.exception(e) log.error("FATAL: failed to back up '%s'" % p) traceback.print_stack() os.abort() return
If we're doing backups on a regular basis, then carry them out here if it is time to do so. This method does nothing otherwise. Return None on success Abort on failure
def clear_old_backups(self, block_id): assert self.setup, "Not set up yet. Call .db_setup() first!" if self.backup_max_age is None: # never delete backups return # find old backups backup_dir = config.get_backups_directory(self.impl, self.working_dir) if not os.path.exists(backup_dir): return backups = os.listdir( backup_dir ) for backup_name in backups: if backup_name in [".", ".."]: continue backup_path = os.path.join(backup_dir, backup_name) backup_block = None try: backup_block = int(backup_path.split(".")[-1]) except: # not a backup file log.info("Skipping non-backup '%s'" % backup_path) if not backup_path.endswith( ".bak.%s" % backup_block ): # not a backup file log.info("Skipping non-backup '%s'" % backup_path) continue if backup_block + self.backup_max_age < block_id: # dead log.info("Removing old backup '%s'" % backup_path) try: os.unlink(backup_path) except: pass
If we limit the number of backups we make, then clean out old ones older than block_id - backup_max_age (given in the constructor) This method does nothing otherwise. Return None on success Raise exception on error
def make_ops_snapshot( cls, serialized_ops ): record_hashes = [] for serialized_op in serialized_ops: record_hash = bin_double_sha256( serialized_op ).encode('hex') record_hashes.append(record_hash) if len(record_hashes) == 0: record_hashes.append(bin_double_sha256("").encode('hex')) # put records into their own Merkle tree, and mix the root with the consensus hashes. record_hashes.sort() record_merkle_tree = MerkleTree( record_hashes ) record_root_hash = record_merkle_tree.root() return record_root_hash
Generate a deterministic hash over the sequence of (serialized) operations.
def make_snapshot_from_ops_hash( cls, record_root_hash, prev_consensus_hashes ): # mix into previous consensus hashes... all_hashes = prev_consensus_hashes[:] + [record_root_hash] all_hashes.sort() all_hashes_merkle_tree = MerkleTree( all_hashes ) root_hash = all_hashes_merkle_tree.root() consensus_hash = StateEngine.calculate_consensus_hash( root_hash ) return consensus_hash
Generate the consensus hash from the hash over the current ops, and all previous required consensus hashes.
def make_snapshot( cls, serialized_ops, prev_consensus_hashes ): record_root_hash = StateEngine.make_ops_snapshot( serialized_ops ) log.debug("Snapshot('{}', {})".format(record_root_hash, prev_consensus_hashes)) return (cls.make_snapshot_from_ops_hash( record_root_hash, prev_consensus_hashes ), record_root_hash)
Generate a consensus hash, using the tx-ordered list of serialized name operations, and a list of previous consensus hashes that contains the (k-1)th, (k-2)th; (k-3)th; ...; (k - (2**i - 1))th consensus hashes, all the way back to the beginning of time (prev_consensus_hashes[i] is the (k - (2**(i+1) - 1))th consensus hash) Returns (consensus_hash, ops_hash)
def serialize_op( cls, opcode, opdata, opfields, verbose=True ): fields = opfields.get( opcode, None ) if fields is None: log.error("BUG: unrecongnized opcode '%s'" % opcode ) return None all_values = [] debug_all_values = [] missing = [] for field in fields: if not opdata.has_key(field): missing.append( field ) field_value = opdata.get(field, None) if field_value is None: field_value = "" # netstring format debug_all_values.append( str(field) + "=" + str(len(str(field_value))) + ":" + str(field_value) ) all_values.append( str(len(str(field_value))) + ":" + str(field_value) ) if len(missing) > 0: log.error("Missing fields; dump follows:\n{}".format(simplejson.dumps( opdata, indent=4, sort_keys=True ))) raise Exception("BUG: missing fields '{}'".format(",".join(missing))) if verbose: log.debug("SERIALIZE: {}:{}".format(opcode, ",".join(debug_all_values) )) field_values = ",".join( all_values ) return opcode + ":" + field_values
Given an opcode (byte), associated data (dict), and the operation fields to serialize (opfields), convert it into its canonical serialized form (i.e. in order to generate a consensus hash. opdata is allowed to have extra fields. They will be ignored Return the canonical form on success. Return None on error.
def set_virtualchain_field(cls, opdata, virtualchain_field, value): assert virtualchain_field in RESERVED_KEYS, 'Invalid field name {} (choose from {})'.format(virtualchain_field, ','.join(RESERVED_KEYS)) opdata[virtualchain_field] = value
Set a virtualchain field value. Used by implementations that generate extra consensus data at the end of a block
def parse_block(self, block_id, txs): ops = [] for i in range(0,len(txs)): tx = txs[i] op = self.parse_transaction(block_id, tx) if op is not None: ops.append( op ) return ops
Given the sequence of transactions in a block, turn them into a sequence of virtual chain operations. Return the list of successfully-parsed virtualchain transactions
def remove_reserved_keys(self, op): sanitized = {} reserved = {} for k in op.keys(): if str(k) not in RESERVED_KEYS: sanitized[str(k)] = copy.deepcopy(op[k]) else: reserved[str(k)] = copy.deepcopy(op[k]) return sanitized, reserved
Remove reserved keywords from an op dict, which can then safely be passed into the db. Returns a new op dict, and the reserved fields
def log_accept(self, block_id, vtxindex, opcode, op_data): log.debug("ACCEPT op {} at ({}, {}) ({})".format(opcode, block_id, vtxindex, json.dumps(op_data, sort_keys=True)))
Log an accepted operation
def get_block_statistics(cls, block_id): if not os.environ.get("BLOCKSTACK_TEST"): raise Exception("This method is only available in the test framework") global STATISTICS return STATISTICS.get(block_id)
Get block statistics. Only works in test mode.
def get_consensus_at(self, block_id): query = 'SELECT consensus_hash FROM snapshots WHERE block_id = ?;' args = (block_id,) con = self.db_open(self.impl, self.working_dir) rows = self.db_query_execute(con, query, args, verbose=False) res = None for r in rows: res = r['consensus_hash'] con.close() return res
Get the consensus hash at a given block. Return the consensus hash if we have one for this block. Return None if we don't
def get_block_from_consensus( self, consensus_hash ): query = 'SELECT block_id FROM snapshots WHERE consensus_hash = ?;' args = (consensus_hash,) con = self.db_open(self.impl, self.working_dir) rows = self.db_query_execute(con, query, args, verbose=False) res = None for r in rows: res = r['block_id'] con.close() return res
Get the block number with the given consensus hash. Return None if there is no such block.
def get_valid_consensus_hashes( self, block_id ): first_block_to_check = block_id - self.impl.get_valid_transaction_window() query = 'SELECT consensus_hash FROM snapshots WHERE block_id >= ? AND block_id <= ?;' args = (first_block_to_check,block_id) valid_consensus_hashes = [] con = self.db_open(self.impl, self.working_dir) rows = self.db_query_execute(con, query, args, verbose=False) for r in rows: assert r['consensus_hash'] is not None assert isinstance(r['consensus_hash'], (str,unicode)) valid_consensus_hashes.append(str(r['consensus_hash'])) con.close() return valid_consensus_hashes
Get the list of valid consensus hashes for a given block.
def lb2pix(nside, l, b, nest=True): theta = np.radians(90. - b) phi = np.radians(l) if not hasattr(l, '__len__'): if (b < -90.) or (b > 90.): return -1 pix_idx = hp.pixelfunc.ang2pix(nside, theta, phi, nest=nest) return pix_idx idx = (b >= -90.) & (b <= 90.) pix_idx = np.empty(l.shape, dtype='i8') pix_idx[idx] = hp.pixelfunc.ang2pix(nside, theta[idx], phi[idx], nest=nest) pix_idx[~idx] = -1 return pix_idx
Converts Galactic (l, b) to HEALPix pixel index. Args: nside (:obj:`int`): The HEALPix :obj:`nside` parameter. l (:obj:`float`, or array of :obj:`float`): Galactic longitude, in degrees. b (:obj:`float`, or array of :obj:`float`): Galactic latitude, in degrees. nest (Optional[:obj:`bool`]): If :obj:`True` (the default), nested pixel ordering will be used. If :obj:`False`, ring ordering will be used. Returns: The HEALPix pixel index or indices. Has the same shape as the input :obj:`l` and :obj:`b`.
def fetch(version='bayestar2017'): doi = { 'bayestar2015': '10.7910/DVN/40C44C', 'bayestar2017': '10.7910/DVN/LCYHJG' } # Raise an error if the specified version of the map does not exist try: doi = doi[version] except KeyError as err: raise ValueError('Version "{}" does not exist. Valid versions are: {}'.format( version, ', '.join(['"{}"'.format(k) for k in doi.keys()]) )) requirements = { 'bayestar2015': {'contentType': 'application/x-hdf'}, 'bayestar2017': {'filename': 'bayestar2017.h5'} }[version] local_fname = os.path.join(data_dir(), 'bayestar', '{}.h5'.format(version)) # Download the data fetch_utils.dataverse_download_doi( doi, local_fname, file_requirements=requirements)
Downloads the specified version of the Bayestar dust map. Args: version (Optional[:obj:`str`]): The map version to download. Valid versions are :obj:`'bayestar2017'` (Green, Schlafly, Finkbeiner et al. 2018) and :obj:`'bayestar2015'` (Green, Schlafly, Finkbeiner et al. 2015). Defaults to :obj:`'bayestar2017'`. Raises: :obj:`ValueError`: The requested version of the map does not exist. :obj:`DownloadError`: Either no matching file was found under the given DOI, or the MD5 sum of the file was not as expected. :obj:`requests.exceptions.HTTPError`: The given DOI does not exist, or there was a problem connecting to the Dataverse.
def _raise_on_mode(self, mode): valid_modes = [ 'random_sample', 'random_sample_per_pix', 'samples', 'median', 'mean', 'best', 'percentile'] if mode not in valid_modes: raise ValueError( '"{}" is not a valid `mode`. Valid modes are:\n' ' {}'.format(mode, valid_modes) )
Checks that the provided query mode is one of the accepted values. If not, raises a :obj:`ValueError`.
def _silent_none(value): ''' >>> _silent_none(12) 12 >>> _silent_none(True) True >>> _silent_none(None) '' >>> _silent_none('') '' >>> _silent_none(False) False >>> _silent_none('None') '' >>> _silent_none("foooooooo") 'foooooooo' ''' if value is None: return '' if type(value) == int: return value if type(value) == bool: return value if not value: return '' try: if str(value) == 'None': return '' except UnicodeEncodeError: pass return valuf _silent_none(value): ''' >>> _silent_none(12) 12 >>> _silent_none(True) True >>> _silent_none(None) '' >>> _silent_none('') '' >>> _silent_none(False) False >>> _silent_none('None') '' >>> _silent_none("foooooooo") 'foooooooo' ''' if value is None: return '' if type(value) == int: return value if type(value) == bool: return value if not value: return '' try: if str(value) == 'None': return '' except UnicodeEncodeError: pass return value
>>> _silent_none(12) 12 >>> _silent_none(True) True >>> _silent_none(None) '' >>> _silent_none('') '' >>> _silent_none(False) False >>> _silent_none('None') '' >>> _silent_none("foooooooo") 'foooooooo'
def set_jinja2_silent_none(config): # pragma: no cover config.commit() jinja2_env = config.get_jinja2_environment() jinja2_env.finalize = _silent_none
if variable is None print '' instead of 'None'
def _quotient_exponent(x, y): assert mpfr.mpfr_regular_p(x) assert mpfr.mpfr_regular_p(y) # Make copy of x with the exponent of y. x2 = mpfr.Mpfr_t() mpfr.mpfr_init2(x2, mpfr.mpfr_get_prec(x)) mpfr.mpfr_set(x2, x, mpfr.MPFR_RNDN) mpfr.mpfr_set_exp(x2, mpfr.mpfr_get_exp(y)) # Compare x2 and y, disregarding the sign. extra = mpfr.mpfr_cmpabs(x2, y) >= 0 return extra + mpfr.mpfr_get_exp(x) - mpfr.mpfr_get_exp(y)
Given two positive finite MPFR instances x and y, find the exponent of x / y; that is, the unique integer e such that 2**(e-1) <= x / y < 2**e.
def script_hex_to_address( script_hex, blockchain='bitcoin', **blockchain_opts): if blockchain == 'bitcoin': return btc_script_hex_to_address(script_hex, **blockchain_opts) else: raise ValueError("Unknown blockchain '{}'".format(blockchain))
High-level API call (meant to be blockchain agnostic) Examine a script (hex-encoded) and extract an address. Return the address on success Return None on error
def make_payment_script(address, blockchain='bitcoin', **blockchain_opts): if blockchain == 'bitcoin': return btc_make_payment_script(address, **blockchain_opts) else: raise ValueError("Unknown blockchain '{}'".format(blockchain))
High-level API call (meant to be blockchain agnostic) Make a pay-to-address script.
def make_data_script( data, blockchain='bitcoin', **blockchain_opts): if blockchain == 'bitcoin': return btc_make_data_script(data, **blockchain_opts) else: raise ValueError("Unknown blockchain '{}'".format(blockchain))
High-level API call (meant to be blockchain agnostic) Make a data-bearing transaction output. Data must be a hex string Returns a hex string.
def _validate_handler(column_name, value, predicate_refs): # only does validate when attribute value is not None # else, just return it, let sqlalchemy decide if the value was legal according to `nullable` argument's value if value is not None: for predicate_ref in predicate_refs: predicate, predicate_name, predicate_args = _decode_predicate_ref(predicate_ref) validate_result = predicate(value, *predicate_args) if isinstance(validate_result, dict) and 'value' in validate_result: value = validate_result['value'] elif type(validate_result) != bool: raise Exception( 'predicate (name={}) can only return bool or dict(value=new_value) value'.format(predicate_name)) elif not validate_result: raise ModelInvalid(u'db model validate failed: column={}, value={}, predicate={}, arguments={}'.format( column_name, value, predicate_name, ','.join(map(str, predicate_args)) )) return value
handle predicate's return value
def row_structural_typicality(X_L_list, X_D_list, row_id): count = 0 assert len(X_L_list) == len(X_D_list) for X_L, X_D in zip(X_L_list, X_D_list): for r in range(len(X_D[0])): for c in range( len(X_L['column_partition']['assignments'])): if X_D[X_L['column_partition']['assignments'][c]][r] == \ X_D[X_L['column_partition']['assignments'][c]][row_id]: count += 1 return float(count) / \ (len(X_D_list) * len(X_D[0]) * len(X_L_list[0]['column_partition']['assignments']))
Returns how typical the row is (opposite of how anomalous).
def column_structural_typicality(X_L_list, col_id): count = 0 for X_L in X_L_list: for c in range(len(X_L['column_partition']['assignments'])): if X_L['column_partition']['assignments'][col_id] ==\ X_L['column_partition']['assignments'][c]: count += 1 return float(count) / \ (len(X_L_list) * len(X_L_list[0]['column_partition']['assignments']))
Returns how typical column is (opposite of how anomalous).
def simple_predictive_probability_multistate(M_c, X_L_list, X_D_list, Y, Q): logprobs = [float(simple_predictive_probability(M_c, X_L, X_D, Y, Q)) for X_L, X_D in zip(X_L_list, X_D_list)] return logmeanexp(logprobs)
Returns the simple predictive probability, averaged over each sample.
def predictive_probability_multistate(M_c, X_L_list, X_D_list, Y, Q): logprobs = [float(predictive_probability(M_c, X_L, X_D, Y, Q)) for X_L, X_D in zip(X_L_list, X_D_list)] return logmeanexp(logprobs)
Returns the predictive probability, averaged over each sample.
def similarity( M_c, X_L_list, X_D_list, given_row_id, target_row_id, target_column=None): score = 0.0 # Set col_idxs: defaults to all columns. if target_column: if type(target_column) == str: col_idxs = [M_c['name_to_idx'][target_column]] elif type(target_column) == list: col_idxs = target_column else: col_idxs = [target_column] else: col_idxs = M_c['idx_to_name'].keys() col_idxs = [int(col_idx) for col_idx in col_idxs] ## Iterate over all latent states. for X_L, X_D in zip(X_L_list, X_D_list): for col_idx in col_idxs: view_idx = X_L['column_partition']['assignments'][col_idx] if X_D[view_idx][given_row_id] == X_D[view_idx][target_row_id]: score += 1.0 return score / (len(X_L_list)*len(col_idxs))
Returns the similarity of the given row to the target row, averaged over all the column indexes given by col_idxs. Similarity is defined as the proportion of times that two cells are in the same view and category.
def query(self, coords): # gal = coords.transform_to('galactic') gal = coords l = gal.l.deg b = gal.b.deg # Detect scalar input scalar_input = not hasattr(l, '__len__') if scalar_input: l = np.array([l]) b = np.array([b]) # Fill return array with NaNs ebv = np.empty(l.shape, dtype='f8') ebv[:] = np.nan # Fill northern cap idx = (b >= 65.) & (b <= 90.) ebv[idx] = self._lb2ebv_northcap(l[idx], b[idx]) # Fill southern cap idx = (b <= -65.) & (b >= -90.) ebv[idx] = self._lb2ebv_southcap(l[idx], b[idx]) # Fill northern midplane idx = (b < 65.) & (b >= 10.) ebv[idx] = self._lb2ebv_midnorth(l[idx], b[idx]) # Fill southern midplane idx = (b > -65.) & (b <= -10.) ebv[idx] = self._lb2ebv_midsouth(l[idx], b[idx]) if scalar_input: ebv = ebv[0] return ebv
Returns E(B-V) at the specified location(s) on the sky. Args: coords (`astropy.coordinates.SkyCoord`): The coordinates to query. Returns: A float array of reddening, in units of E(B-V), at the given coordinates. The shape of the output is the same as the shape of the coordinates stored by `coords`.
def db_parse( block_id, opcode, op_payload, senders, inputs, outputs, fee, db_state=None ): print "\nreference implementation of db_parse\n" return None
Given the block ID, and information from what looks like an OP_RETURN transaction that is part of the virtual chain, parse the transaction's OP_RETURN nulldata into a dict. Return the dict if this is a valid op. Return None if not. NOTE: the virtual chain indexer reserves all keys that start with 'virtualchain_'
def db_check( block_id, opcode, op, txid, vtxindex, checked, db_state=None ): print "\nreference implementation of db_check\n" return False
Given the block ID and a parsed operation, check to see if this is a *valid* operation for the purposes of this virtual chain's database. Return True if so; False if not.
def admin_docker_list_view(context, request): return { 'paginator': Page( context.all, url_maker=lambda p: request.path_url + "?page=%s" % p, page=int(request.params.get('page', 1)), items_per_page=6 ) }
Show list of docker images.
def admin_docker_massaction_view(context, request): items_list = request.POST.getall('selected_item') for item in items_list: try: context.cli.remove_image(item) request.session.flash(["deleted {}".format(item), "success"]) except docker.errors.APIError as e: request.session.flash([e.explanation, "error"]) url = "/" + request.sacrud_prefix + "/" + resource_path(context.__parent__) return HTTPFound(location=url)
Mass action view.
def coord2healpix(coords, frame, nside, nest=True): if coords.frame.name != frame: c = coords.transform_to(frame) else: c = coords if hasattr(c, 'ra'): phi = c.ra.rad theta = 0.5*np.pi - c.dec.rad return hp.pixelfunc.ang2pix(nside, theta, phi, nest=nest) elif hasattr(c, 'l'): phi = c.l.rad theta = 0.5*np.pi - c.b.rad return hp.pixelfunc.ang2pix(nside, theta, phi, nest=nest) elif hasattr(c, 'x'): return hp.pixelfunc.vec2pix(nside, c.x.kpc, c.y.kpc, c.z.kpc, nest=nest) elif hasattr(c, 'w'): return hp.pixelfunc.vec2pix(nside, c.w.kpc, c.u.kpc, c.v.kpc, nest=nest) else: raise dustexceptions.CoordFrameError( 'No method to transform from coordinate frame "{}" to HEALPix.'.format( frame))
Calculate HEALPix indices from an astropy SkyCoord. Assume the HEALPix system is defined on the coordinate frame ``frame``. Args: coords (:obj:`astropy.coordinates.SkyCoord`): The input coordinates. frame (:obj:`str`): The frame in which the HEALPix system is defined. nside (:obj:`int`): The HEALPix nside parameter to use. Must be a power of 2. nest (Optional[:obj:`bool`]): ``True`` (the default) if nested HEALPix ordering is desired. ``False`` for ring ordering. Returns: An array of pixel indices (integers), with the same shape as the input SkyCoord coordinates (:obj:`coords.shape`). Raises: :obj:`dustexceptions.CoordFrameError`: If the specified frame is not supported.
def ensure_coord_type(f): @wraps(f) def _wrapper_func(self, coords, **kwargs): if not isinstance(coords, coordinates.SkyCoord): raise TypeError('`coords` must be an astropy.coordinates.SkyCoord object.') return f(self, coords, **kwargs) return _wrapper_func
A decorator for class methods of the form .. code-block:: python Class.method(self, coords, **kwargs) where ``coords`` is an :obj:`astropy.coordinates.SkyCoord` object. The decorator raises a :obj:`TypeError` if the ``coords`` that gets passed to ``Class.method`` is not an :obj:`astropy.coordinates.SkyCoord` instance. Args: f (class method): A function with the signature ``(self, coords, **kwargs)``, where ``coords`` is a :obj:`SkyCoord` object containing an array. Returns: A function that raises a :obj:`TypeError` if ``coords`` is not an :obj:`astropy.coordinates.SkyCoord` object, but which otherwise behaves the same as the decorated function.
def query_gal(self, l, b, d=None, **kwargs): if not isinstance(l, units.Quantity): l = l * units.deg if not isinstance(b, units.Quantity): b = b * units.deg if d is None: coords = coordinates.SkyCoord(l, b, frame='galactic') else: if not isinstance(d, units.Quantity): d = d * units.kpc coords = coordinates.SkyCoord( l, b, distance=d, frame='galactic') return self.query(coords, **kwargs)
Query using Galactic coordinates. Args: l (:obj:`float`, scalar or array-like): Galactic longitude, in degrees, or as an :obj:`astropy.unit.Quantity`. b (:obj:`float`, scalar or array-like): Galactic latitude, in degrees, or as an :obj:`astropy.unit.Quantity`. d (Optional[:obj:`float`, scalar or array-like]): Distance from the Solar System, in kpc, or as an :obj:`astropy.unit.Quantity`. Defaults to ``None``, meaning no distance is specified. **kwargs: Any additional keyword arguments accepted by derived classes. Returns: The results of the query, which must be implemented by derived classes.
def query_equ(self, ra, dec, d=None, frame='icrs', **kwargs): valid_frames = ['icrs', 'fk4', 'fk5', 'fk4noeterms'] if frame not in valid_frames: raise ValueError( '`frame` not understood. Must be one of {}.'.format(valid_frames)) if not isinstance(ra, units.Quantity): ra = ra * units.deg if not isinstance(dec, units.Quantity): dec = dec * units.deg if d is None: coords = coordinates.SkyCoord(ra, dec, frame='icrs') else: if not isinstance(d, units.Quantity): d = d * units.kpc coords = coordinates.SkyCoord( ra, dec, distance=d, frame='icrs') return self.query(coords, **kwargs)
Query using Equatorial coordinates. By default, the ICRS frame is used, although other frames implemented by :obj:`astropy.coordinates` may also be specified. Args: ra (:obj:`float`, scalar or array-like): Galactic longitude, in degrees, or as an :obj:`astropy.unit.Quantity`. dec (`float`, scalar or array-like): Galactic latitude, in degrees, or as an :obj:`astropy.unit.Quantity`. d (Optional[:obj:`float`, scalar or array-like]): Distance from the Solar System, in kpc, or as an :obj:`astropy.unit.Quantity`. Defaults to ``None``, meaning no distance is specified. frame (Optional[:obj:`icrs`]): The coordinate system. Can be ``'icrs'`` (the default), ``'fk5'``, ``'fk4'`` or ``'fk4noeterms'``. **kwargs: Any additional keyword arguments accepted by derived classes. Returns: The results of the query, which must be implemented by derived classes.
def populate_model(model_or_inst, excludes=None, only=None): inst = model_or_inst if _is_inst(model_or_inst) else model_or_inst() parser = make_request_parser(model_or_inst, excludes, only, for_populate=True) req_args = parser.parse_args() for key, value in req_args.items(): setattr(inst, key, value) return inst
Call `make_request_parser()` to build a `RequestParser`, use it extract user request data, and padding the data into model instance. If user passed a model class, instead of model instance, create a new instance use the extracted data.
def _mpfr_get_str2(base, ndigits, op, rounding_mode): digits, exp = mpfr.mpfr_get_str(base, ndigits, op, rounding_mode) negative = digits.startswith('-') if negative: digits = digits[1:] return negative, digits, exp
Variant of mpfr_get_str, for internal use: simply splits off the '-' sign from the digit string, and returns a triple (sign, digits, exp) Also converts the byte-string produced by mpfr_get_str to Unicode.