code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def calculate_merkle_pairs(bin_hashes, hash_function=bin_double_sha256): hashes = list(bin_hashes) # if there are an odd number of hashes, double up the last one if len(hashes) % 2 == 1: hashes.append(hashes[-1]) new_hashes = [] for i in range(0, len(hashes), 2): new_hashes.append(hash_function(hashes[i] + hashes[i+1])) return new_hashes
Calculate the parents of a row of a merkle tree. Takes in a list of binary hashes, returns a binary hash. The returned parents list is such that parents[i] == hash(bin_hashes[2*i] + bin_hashes[2*i+1]).
def verify_merkle_path(merkle_root_hex, serialized_path, leaf_hash_hex, hash_function=bin_double_sha256): merkle_root = hex_to_bin_reversed(merkle_root_hex) leaf_hash = hex_to_bin_reversed(leaf_hash_hex) path = MerkleTree.path_deserialize(serialized_path) path = [{'order': p['order'], 'hash': hex_to_bin_reversed(p['hash'])} for p in path] if len(path) == 0: raise ValueError("Empty path") cur_hash = leaf_hash for i in range(0, len(path)): if path[i]['order'] == 'l': # left sibling cur_hash = hash_function(path[i]['hash'] + cur_hash) elif path[i]['order'] == 'r': # right sibling cur_hash = hash_function(cur_hash + path[i]['hash']) elif path[i]['order'] == 'm': # merkle root assert len(path) == 1 return cur_hash == path[i]['hash'] return cur_hash == merkle_root
Verify a merkle path. The given path is the path from two leaf nodes to the root itself. merkle_root_hex is a little-endian, hex-encoded hash. serialized_path is the serialized merkle path path_hex is a list of little-endian, hex-encoded hashes. Return True if the path is consistent with the merkle root. Return False if not.
def path_serialize(cls, path): # make it into a netstring path_parts = ['{}-{}'.format(p['order'], p['hash']) for p in path] path_ns_parts = ['{}:{},'.format(len(pp), pp) for pp in path_parts] path_str = ''.join(path_ns_parts) return '{}:{},'.format(len(path_str), path_str)
Given a list of [{'hash': ..., 'order': ...}], serialize it to a string.
def path_deserialize(cls, serialized_path): def _chomp_netstring_payload(s): try: ns_len_str, ns_body = s.split(':', 1) ns_len = int(ns_len_str) assert ns_body[ns_len] == ',' ns_payload = ns_body[:ns_len] return ns_payload, ns_body[ns_len+1:] except: raise ValueError("Invalid netstring '{}'".format(s)) path_str, extra = _chomp_netstring_payload(serialized_path) if len(extra) > 0: raise ValueError("Danlging data in '{}'".format(serialized_path)) path = [] while True: path_part, path_str = _chomp_netstring_payload(path_str) try: order, hash_hex = path_part.split('-', 1) assert order in ['l', 'r', 'm'] path.append({'order': order, 'hash': hash_hex}) except: raise ValueError("Invalid path entry {}".format(path_part)) if len(path_str) == 0: break return path
Given a netstring of path parts, go and parse it back into [{'hash': ..., 'order': ...}]
def _coords2vec(self, coords): # c = coords.transform_to(self._frame) # vec = np.empty((c.shape[0], 2), dtype='f8') # vec[:,0] = coordinates.Longitude(coords.l, wrap_angle=360.*units.deg).deg[:] # vec[:,1] = coords.b.deg[:] # return np.radians(vec) c = coords.transform_to(self._frame).represent_as('cartesian') vec_norm = np.sqrt(c.x**2 + c.y**2 + c.z**2) vec = np.empty((c.shape[0], 3), dtype=c.x.dtype) vec[:,0] = (c.x / vec_norm).value[:] vec[:,1] = (c.y / vec_norm).value[:] vec[:,2] = (c.z / vec_norm).value[:] return vec
Converts from sky coordinates to unit vectors. Before conversion to unit vectors, the coordiantes are transformed to the coordinate system used internally by the :obj:`UnstructuredDustMap`, which can be set during initialization of the class. Args: coords (:obj:`astropy.coordinates.SkyCoord`): Input coordinates to convert to unit vectors. Returns: Cartesian unit vectors corresponding to the input coordinates, after transforming to the coordinate system used internally by the :obj:`UnstructuredDustMap`.
def _coords2idx(self, coords): x = self._coords2vec(coords) idx = self._kd.query(x, p=self._metric_p, distance_upper_bound=self._max_pix_scale) return idx[1]
Converts from sky coordinates to pixel indices. Args: coords (:obj:`astropy.coordinates.SkyCoord`): Sky coordinates. Returns: Pixel indices of the coordinates, with the same shape as the input coordinates. Pixels which are outside the map are given an index equal to the number of pixels in the map.
def _gal2idx(self, gal): # Make sure that l is in domain [-180 deg, 180 deg) l = coordinates.Longitude(gal.l, wrap_angle=180.*units.deg) j = (self._inv_pix_scale * (l.deg - self._l_bounds[0])).astype('i4') k = (self._inv_pix_scale * (gal.b.deg - self._b_bounds[0])).astype('i4') idx = (j < 0) | (j >= self._shape[0]) | (k < 0) | (k >= self._shape[1]) if np.any(idx): j[idx] = -1 k[idx] = -1 return j, k, ~idx
Converts from Galactic coordinates to pixel indices. Args: gal (:obj:`astropy.coordinates.SkyCoord`): Galactic coordinates. Must store an array of coordinates (i.e., not be scalar). Returns: ``j, k, mask`` - Pixel indices of the coordinates, as well as a mask of in-bounds coordinates. Outputs have the same shape as the input coordinates.
def add_block_hash( self, block_hash ): if len(self.block_hashes) > 2000: raise Exception("A getheaders request cannot have over 2000 block hashes") hash_num = int("0x" + block_hash, 16) bh = BlockHash() bh.block_hash = hash_num self.block_hashes.append( bh ) self.hash_stop = hash_num
Append up to 2000 block hashes for which to get headers.
def run( self ): self.handshake() try: self.loop() except socket.error, se: if self.finished: return True else: raise
Interact with the blockchain peer, until we get a socket error or we exit the loop explicitly. Return True on success Raise on error
def send_getheaders( self, prev_block_hash ): getheaders = GetHeaders() getheaders.add_block_hash( prev_block_hash ) log.debug("send getheaders") self.send_message( getheaders )
Request block headers from a particular block hash. Will receive up to 2000 blocks, starting with the block *after* the given block hash (prev_block_hash)
def handle_version(self, message_header, message): log.debug("handle version") verack = VerAck() log.debug("send VerAck") self.send_message(verack) self.verack = True # begin! self.send_getheaders( self.first_block_hash )
This method will handle the Version message and will send a VerAck message when it receives the Version message. :param message_header: The Version message header :param message: The Version message
def handle_ping(self, message_header, message): log.debug("handle ping") pong = Pong() pong.nonce = message.nonce log.debug("send pong") self.send_message(pong)
This method will handle the Ping message and then will answer every Ping message with a Pong message using the nonce received. :param message_header: The header of the Ping message :param message: The Ping message
def init(cls, path): if not os.path.exists( path ): block_header_serializer = BlockHeaderSerializer() genesis_block_header = BlockHeader() if USE_MAINNET: # we know the mainnet block header # but we don't know the testnet/regtest block header genesis_block_header.version = 1 genesis_block_header.prev_block = 0 genesis_block_header.merkle_root = int(GENESIS_BLOCK_MERKLE_ROOT, 16 ) genesis_block_header.timestamp = 1231006505 genesis_block_header.bits = int( "1d00ffff", 16 ) genesis_block_header.nonce = 2083236893 genesis_block_header.txns_count = 0 with open(path, "wb") as f: bin_data = block_header_serializer.serialize( genesis_block_header ) f.write( bin_data )
Set up an SPV client. If the locally-stored headers do not exist, then create a stub headers file with the genesis block information.
def height(cls, path): if os.path.exists( path ): sb = os.stat( path ) h = (sb.st_size / BLOCK_HEADER_SIZE) - 1 return h else: return None
Get the locally-stored block height
def read_header_at( cls, f): header_parser = BlockHeaderSerializer() hdr = header_parser.deserialize( f ) h = {} h['version'] = hdr.version h['prev_block_hash'] = "%064x" % hdr.prev_block h['merkle_root'] = "%064x" % hdr.merkle_root h['timestamp'] = hdr.timestamp h['bits'] = hdr.bits h['nonce'] = hdr.nonce h['hash'] = hdr.calculate_hash() return h
Given an open file-like object, read a block header from it and return it as a dict containing: * version (int) * prev_block_hash (hex str) * merkle_root (hex str) * timestamp (int) * bits (int) * nonce (ini) * hash (hex str)
def load_header_chain( cls, chain_path ): header_parser = BlockHeaderSerializer() chain = [] height = 0 with open(chain_path, "rb") as f: h = SPVClient.read_header_at( f ) h['block_height'] = height height += 1 chain.append(h) return chain
Load the header chain from disk. Each chain element will be a dictionary with: *
def read_header(cls, headers_path, block_height, allow_none=False): if os.path.exists(headers_path): header_parser = BlockHeaderSerializer() sb = os.stat( headers_path ) if sb.st_size < BLOCK_HEADER_SIZE * block_height: # beyond EOF if allow_none: return None else: raise Exception('EOF on block headers') with open( headers_path, "rb" ) as f: f.seek( block_height * BLOCK_HEADER_SIZE, os.SEEK_SET ) hdr = SPVClient.read_header_at( f ) return hdr else: if allow_none: return None else: raise Exception('No such file or directory: {}'.format(headers_path))
Get a block header at a particular height from disk. Return the header if found Return None if not.
def get_target(cls, path, index, chain=None): if chain is None: chain = [] # Do not use mutables as default values! max_target = 0x00000000FFFF0000000000000000000000000000000000000000000000000000 if index == 0: return 0x1d00ffff, max_target first = SPVClient.read_header( path, (index-1)*BLOCK_DIFFICULTY_CHUNK_SIZE) last = SPVClient.read_header( path, index*BLOCK_DIFFICULTY_CHUNK_SIZE - 1, allow_none=True) if last is None: for h in chain: if h.get('block_height') == index*BLOCK_DIFFICULTY_CHUNK_SIZE - 1: last = h nActualTimespan = last.get('timestamp') - first.get('timestamp') nTargetTimespan = BLOCK_DIFFICULTY_INTERVAL nActualTimespan = max(nActualTimespan, nTargetTimespan/4) nActualTimespan = min(nActualTimespan, nTargetTimespan*4) bits = last.get('bits') # convert to bignum MM = 256*256*256 a = bits%MM if a < 0x8000: a *= 256 target = (a) * pow(2, 8 * (bits/MM - 3)) # new target new_target = min( max_target, (target * nActualTimespan)/nTargetTimespan ) # convert it to bits c = ("%064X"%new_target)[2:] i = 31 while c[0:2]=="00": c = c[2:] i -= 1 c = int('0x'+c[0:6],16) if c >= 0x800000: c /= 256 i += 1 new_bits = c + MM * i return new_bits, new_target
Calculate the target difficulty at a particular difficulty interval (index). Return (bits, target) on success
def block_header_verify( cls, headers_path, block_id, block_hash, block_header ): prev_header = cls.read_header( headers_path, block_id - 1 ) prev_hash = prev_header['hash'] return bits.block_header_verify( block_header, prev_hash, block_hash )
Given the block's numeric ID, its hash, and the bitcoind-returned block_data, use the SPV header chain to verify the block's integrity. block_header must be a dict with the following structure: * version: protocol version (int) * prevhash: previous block hash (hex str) * merkleroot: block Merkle root (hex str) * timestamp: UNIX time stamp (int) * bits: difficulty bits (hex str) * nonce: PoW nonce (int) * hash: block hash (hex str) (i.e. the format that the reference bitcoind returns via JSON RPC) Return True on success Return False on error
def block_verify( cls, verified_block_header, block_txids ): block_data = { 'merkleroot': verified_block_header['merkleroot'], 'tx': block_txids } return bits.block_verify( block_data )
Given the block's verified header structure (see block_header_verify) and its list of transaction IDs (as hex strings), verify that the transaction IDs are legit. Return True on success Return False on error.
def tx_hash( cls, tx ): tx_hex = bits.btc_bitcoind_tx_serialize( tx ) tx_hash = hashing.bin_double_sha256(tx_hex.decode('hex'))[::-1].encode('hex') return tx_hash
Calculate the hash of a transction structure given by bitcoind
def tx_verify( cls, verified_block_txids, tx ): tx_hash = cls.tx_hash( tx ) return tx_hash in verified_block_txids
Given the block's verified block txids, verify that a transaction is legit. @tx must be a dict with the following fields: * locktime: int * version: int * vin: list of dicts with: * vout: int, * hash: hex str * sequence: int (optional) * scriptSig: dict with: * hex: hex str * vout: list of dicts with: * value: float * scriptPubKey: dict with: * hex: hex str
def tx_index( cls, verified_block_txids, verified_tx ): tx_hash = cls.tx_hash( verified_tx ) return verified_block_txids.index( tx_hash )
Given a block's verified block txids and a verified transaction, find out where it is in the list of txids (i.e. what's its index)?
def block_header_index( cls, path, block_header ): with open( path, "rb" ) as f: chain_raw = f.read() for blk in xrange(0, len(chain_raw) / (BLOCK_HEADER_SIZE)): if chain_raw[blk * BLOCK_HEADER_SIZE : blk * BLOCK_HEADER_SIZE + BLOCK_HEADER_SIZE] == block_header: return blk return -1
Given a block's serialized header, go and find out what its block ID is (if it is present at all). Return the >= 0 index on success Return -1 if not found. NOTE: this is slow
def verify_header_chain(cls, path, chain=None): if chain is None: chain = SPVClient.load_header_chain( path ) prev_header = chain[0] for i in xrange(1, len(chain)): header = chain[i] height = header.get('block_height') prev_hash = prev_header.get('hash') if prev_hash != header.get('prev_block_hash'): log.error("prev hash mismatch: %s vs %s" % (prev_hash, header.get('prev_block_hash'))) return False bits, target = SPVClient.get_target( path, height/BLOCK_DIFFICULTY_CHUNK_SIZE, chain) if bits != header.get('bits'): log.error("bits mismatch: %s vs %s" % (bits, header.get('bits'))) return False _hash = header.get('hash') if int('0x'+_hash, 16) > target: log.error("insufficient proof of work: %s vs target %s" % (int('0x'+_hash, 16), target)) return False prev_header = header return True
Verify that a given chain of block headers has sufficient proof of work.
def parse_mail_date(datestr): '''Helper method used by :meth:`Message.from_email_message` to convert dates from rfc822 format to iso 8601. :param datestr: string containing a date in rfc822 format :returns: string with date in iso 8601 format ''' time_tuple = email.utils.parsedate_tz(datestr) if time_tuple is None: return datestr dt = datetime.datetime.fromtimestamp(email.utils.mktime_tz(time_tuple)) return dt.isoformat(f parse_mail_date(datestr): '''Helper method used by :meth:`Message.from_email_message` to convert dates from rfc822 format to iso 8601. :param datestr: string containing a date in rfc822 format :returns: string with date in iso 8601 format ''' time_tuple = email.utils.parsedate_tz(datestr) if time_tuple is None: return datestr dt = datetime.datetime.fromtimestamp(email.utils.mktime_tz(time_tuple)) return dt.isoformat()
Helper method used by :meth:`Message.from_email_message` to convert dates from rfc822 format to iso 8601. :param datestr: string containing a date in rfc822 format :returns: string with date in iso 8601 format
def IEEEContext(bitwidth): try: precision = {16: 11, 32: 24, 64: 53, 128: 113}[bitwidth] except KeyError: if not (bitwidth >= 128 and bitwidth % 32 == 0): raise ValueError("nonstandard bitwidth: bitwidth should be " "16, 32, 64, 128, or k*32 for some k >= 4") # The formula for the precision involves rounding 4*log2(width) to the # nearest integer. We have: # # round(4*log2(width)) == round(log2(width**8)/2) # == floor((log2(width**8) + 1)/2) # == (width**8).bit_length() // 2 # # (Note that 8*log2(width) can never be an odd integer, so we # don't care which way half-way cases round in the 'round' # operation.) precision = bitwidth - _bit_length(bitwidth ** 8) // 2 + 13 emax = 1 << bitwidth - precision - 1 return Context( precision=precision, emin=4 - emax - precision, emax=emax, subnormalize=True, )
Return IEEE 754-2008 context for a given bit width. The IEEE 754 standard specifies binary interchange formats with bitwidths 16, 32, 64, 128, and all multiples of 32 greater than 128. This function returns the context corresponding to the interchange format for the given bitwidth. See section 3.6 of IEEE 754-2008 or the bigfloat source for more details.
def view(molecule, viewer=settings['defaults']['viewer'], use_curr_dir=False): try: molecule.view(viewer=viewer, use_curr_dir=use_curr_dir) except AttributeError: if pd.api.types.is_list_like(molecule): cartesian_list = molecule else: raise ValueError('Argument is neither list nor Cartesian.') if use_curr_dir: TEMP_DIR = os.path.curdir else: TEMP_DIR = tempfile.gettempdir() def give_filename(i): filename = 'ChemCoord_list_' + str(i) + '.molden' return os.path.join(TEMP_DIR, filename) i = 1 while os.path.exists(give_filename(i)): i = i + 1 to_molden(cartesian_list, buf=give_filename(i)) def open_file(i): """Open file and close after being finished.""" try: subprocess.check_call([viewer, give_filename(i)]) except (subprocess.CalledProcessError, FileNotFoundError): raise finally: if use_curr_dir: pass else: os.remove(give_filename(i)) Thread(target=open_file, args=(i,)).start()
View your molecule or list of molecules. .. note:: This function writes a temporary file and opens it with an external viewer. If you modify your molecule afterwards you have to recall view in order to see the changes. Args: molecule: Can be a cartesian, or a list of cartesians. viewer (str): The external viewer to use. The default is specified in settings.viewer use_curr_dir (bool): If True, the temporary file is written to the current diretory. Otherwise it gets written to the OS dependendent temporary directory. Returns: None:
def to_molden(cartesian_list, buf=None, sort_index=True, overwrite=True, float_format='{:.6f}'.format): if sort_index: cartesian_list = [molecule.sort_index() for molecule in cartesian_list] give_header = ("[MOLDEN FORMAT]\n" + "[N_GEO]\n" + str(len(cartesian_list)) + "\n" + '[GEOCONV]\n' + 'energy\n{energy}' + 'max-force\n{max_force}' + 'rms-force\n{rms_force}' + '[GEOMETRIES] (XYZ)\n').format values = len(cartesian_list) * '1\n' energy = [str(m.metadata.get('energy', 1)) for m in cartesian_list] energy = '\n'.join(energy) + '\n' header = give_header(energy=energy, max_force=values, rms_force=values) coordinates = [x.to_xyz(sort_index=sort_index, float_format=float_format) for x in cartesian_list] output = header + '\n'.join(coordinates) if buf is not None: if overwrite: with open(buf, mode='w') as f: f.write(output) else: with open(buf, mode='x') as f: f.write(output) else: return output
Write a list of Cartesians into a molden file. .. note:: Since it permamently writes a file, this function is strictly speaking **not sideeffect free**. The list to be written is of course not changed. Args: cartesian_list (list): buf (str): StringIO-like, optional buffer to write to sort_index (bool): If sort_index is true, the Cartesian is sorted by the index before writing. overwrite (bool): May overwrite existing files. float_format (one-parameter function): Formatter function to apply to column’s elements if they are floats. The result of this function must be a unicode string. Returns: formatted : string (or unicode, depending on data and options)
def write_molden(*args, **kwargs): message = 'Will be removed in the future. Please use to_molden().' with warnings.catch_warnings(): warnings.simplefilter("always") warnings.warn(message, DeprecationWarning) return to_molden(*args, **kwargs)
Deprecated, use :func:`~chemcoord.xyz_functions.to_molden`
def read_molden(inputfile, start_index=0, get_bonds=True): from chemcoord.cartesian_coordinates.cartesian_class_main import Cartesian with open(inputfile, 'r') as f: found = False while not found: line = f.readline() if '[N_GEO]' in line: found = True number_of_molecules = int(f.readline().strip()) energies = [] found = False while not found: line = f.readline() if 'energy' in line: found = True for _ in range(number_of_molecules): energies.append(float(f.readline().strip())) found = False while not found: line = f.readline() if '[GEOMETRIES] (XYZ)' in line: found = True current_line = f.tell() number_of_atoms = int(f.readline().strip()) f.seek(current_line) cartesians = [] for energy in energies: cartesian = Cartesian.read_xyz( f, start_index=start_index, get_bonds=get_bonds, nrows=number_of_atoms, engine='python') cartesian.metadata['energy'] = energy cartesians.append(cartesian) return cartesians
Read a molden file. Args: inputfile (str): start_index (int): Returns: list: A list containing :class:`~chemcoord.Cartesian` is returned.
def isclose(a, b, align=False, rtol=1.e-5, atol=1.e-8): coords = ['x', 'y', 'z'] if not (set(a.index) == set(b.index) and np.alltrue(a.loc[:, 'atom'] == b.loc[a.index, 'atom'])): message = 'Can only compare molecules with the same atoms and labels' raise ValueError(message) if align: a = a.get_inertia()['transformed_Cartesian'] b = b.get_inertia()['transformed_Cartesian'] A, B = a.loc[:, coords], b.loc[a.index, coords] out = a._frame.copy() out['atom'] = True out.loc[:, coords] = np.isclose(A, B, rtol=rtol, atol=atol) return out
Compare two molecules for numerical equality. Args: a (Cartesian): b (Cartesian): align (bool): a and b are prealigned along their principal axes of inertia and moved to their barycenters before comparing. rtol (float): Relative tolerance for the numerical equality comparison look into :func:`numpy.isclose` for further explanation. atol (float): Relative tolerance for the numerical equality comparison look into :func:`numpy.isclose` for further explanation. Returns: :class:`numpy.ndarray`: Boolean array.
def allclose(a, b, align=False, rtol=1.e-5, atol=1.e-8): return np.alltrue(isclose(a, b, align=align, rtol=rtol, atol=atol))
Compare two molecules for numerical equality. Args: a (Cartesian): b (Cartesian): align (bool): a and b are prealigned along their principal axes of inertia and moved to their barycenters before comparing. rtol (float): Relative tolerance for the numerical equality comparison look into :func:`numpy.allclose` for further explanation. atol (float): Relative tolerance for the numerical equality comparison look into :func:`numpy.allclose` for further explanation. Returns: bool:
def concat(cartesians, ignore_index=False, keys=None): frames = [molecule._frame for molecule in cartesians] new = pd.concat(frames, ignore_index=ignore_index, keys=keys, verify_integrity=True) if type(ignore_index) is bool: new = pd.concat(frames, ignore_index=ignore_index, keys=keys, verify_integrity=True) else: new = pd.concat(frames, ignore_index=True, keys=keys, verify_integrity=True) if type(ignore_index) is int: new.index = range(ignore_index, ignore_index + len(new)) else: new.index = ignore_index return cartesians[0].__class__(new)
Join list of cartesians into one molecule. Wrapper around the :func:`pandas.concat` function. Default values are the same as in the pandas function except for ``verify_integrity`` which is set to true in case of this library. Args: ignore_index (sequence, bool, int): If it is a boolean, it behaves like in the description of :meth:`pandas.DataFrame.append`. If it is a sequence, it becomes the new index. If it is an integer, ``range(ignore_index, ignore_index + len(new))`` becomes the new index. keys (sequence): If multiple levels passed, should contain tuples. Construct hierarchical index using the passed keys as the outermost level Returns: Cartesian:
def dot(A, B): try: result = A.__matmul__(B) if result is NotImplemented: result = B.__rmatmul__(A) except AttributeError: result = B.__rmatmul__(A) return result
Matrix multiplication between A and B This function is equivalent to ``A @ B``, which is unfortunately not possible under python 2.x. Args: A (sequence): B (sequence): Returns: sequence:
def get_rotation_matrix(axis, angle): axis = normalize(np.array(axis)) if not (np.array([1, 1, 1]).shape) == (3, ): raise ValueError('axis.shape has to be 3') angle = float(angle) return _jit_get_rotation_matrix(axis, angle)
Returns the rotation matrix. This function returns a matrix for the counterclockwise rotation around the given axis. The Input angle is in radians. Args: axis (vector): angle (float): Returns: Rotation matrix (np.array):
def _jit_get_rotation_matrix(axis, angle): axis = _jit_normalize(axis) a = m.cos(angle / 2) b, c, d = axis * m.sin(angle / 2) rot_matrix = np.empty((3, 3)) rot_matrix[0, 0] = a**2 + b**2 - c**2 - d**2 rot_matrix[0, 1] = 2. * (b * c - a * d) rot_matrix[0, 2] = 2. * (b * d + a * c) rot_matrix[1, 0] = 2. * (b * c + a * d) rot_matrix[1, 1] = a**2 + c**2 - b**2 - d**2 rot_matrix[1, 2] = 2. * (c * d - a * b) rot_matrix[2, 0] = 2. * (b * d - a * c) rot_matrix[2, 1] = 2. * (c * d + a * b) rot_matrix[2, 2] = a**2 + d**2 - b**2 - c**2 return rot_matrix
Returns the rotation matrix. This function returns a matrix for the counterclockwise rotation around the given axis. The Input angle is in radians. Args: axis (vector): angle (float): Returns: Rotation matrix (np.array):
def orthonormalize_righthanded(basis): v1, v2 = basis[:, 0], basis[:, 1] e1 = normalize(v1) e3 = normalize(np.cross(e1, v2)) e2 = normalize(np.cross(e3, e1)) return np.array([e1, e2, e3]).T
Orthonormalizes righthandedly a given 3D basis. This functions returns a right handed orthonormalize_righthandedd basis. Since only the first two vectors in the basis are used, it does not matter if you give two or three vectors. Right handed means, that: .. math:: \\vec{e_1} \\times \\vec{e_2} &= \\vec{e_3} \\\\ \\vec{e_2} \\times \\vec{e_3} &= \\vec{e_1} \\\\ \\vec{e_3} \\times \\vec{e_1} &= \\vec{e_2} \\\\ Args: basis (np.array): An array of shape = (3,2) or (3,3) Returns: new_basis (np.array): A right handed orthonormalized basis.
def get_kabsch_rotation(Q, P): # Naming of variables follows the wikipedia article: # http://en.wikipedia.org/wiki/Kabsch_algorithm A = np.dot(np.transpose(P), Q) # One can't initialize an array over its transposed V, S, W = np.linalg.svd(A) # pylint:disable=unused-variable W = W.T d = np.linalg.det(np.dot(W, V.T)) return np.linalg.multi_dot((W, np.diag([1., 1., d]), V.T))
Calculate the optimal rotation from ``P`` unto ``Q``. Using the Kabsch algorithm the optimal rotation matrix for the rotation of ``other`` unto ``self`` is calculated. The algorithm is described very well in `wikipedia <http://en.wikipedia.org/wiki/Kabsch_algorithm>`_. Args: other (Cartesian): Returns: :class:`~numpy.array`: Rotation matrix
def _empty_except_predicates(xast, node, context): '''Check if a node is empty (no child nodes or attributes) except for any predicates defined in the specified xpath. :param xast: parsed xpath (xpath abstract syntax tree) from :mod:`eulxml.xpath` :param node: lxml element to check :param context: any context required for the xpath (e.g., namespace definitions) :returns: boolean indicating if the element is empty or not ''' # copy the node, remove predicates, and check for any remaining # child nodes or attributes node_c = deepcopy(node) _remove_predicates(xast, node_c, context) return bool(len(node_c) == 0 and len(node_c.attrib) == 0f _empty_except_predicates(xast, node, context): '''Check if a node is empty (no child nodes or attributes) except for any predicates defined in the specified xpath. :param xast: parsed xpath (xpath abstract syntax tree) from :mod:`eulxml.xpath` :param node: lxml element to check :param context: any context required for the xpath (e.g., namespace definitions) :returns: boolean indicating if the element is empty or not ''' # copy the node, remove predicates, and check for any remaining # child nodes or attributes node_c = deepcopy(node) _remove_predicates(xast, node_c, context) return bool(len(node_c) == 0 and len(node_c.attrib) == 0)
Check if a node is empty (no child nodes or attributes) except for any predicates defined in the specified xpath. :param xast: parsed xpath (xpath abstract syntax tree) from :mod:`eulxml.xpath` :param node: lxml element to check :param context: any context required for the xpath (e.g., namespace definitions) :returns: boolean indicating if the element is empty or not
def pop(self, i=None): if i is None: i = len(self) - 1 val = self[i] del(self[i]) return val
Remove the item at the given position in the list, and return it. If no index is specified, removes and returns the last item in the list.
def insert(self, i, x): if i == len(self): # end of list or empty list: append self.append(x) elif len(self.matches) > i: # create a new xml node at the requested position insert_index = self.matches[i].getparent().index(self.matches[i]) _create_xml_node(self.xast, self.node, self.context, insert_index) # then use default set logic self[i] = x else: raise IndexError("Can't insert '%s' at index %d - list length is only %d" \ % (x, i, len(self)))
Insert an item (x) at a given position (i).
def get_field(self, schema): type = schema.get_type(self.schema_type) logger.debug('Found schema type %s; base type %s, restricted values %s' % \ (self.schema_type, type.base_type(), type.restricted_values)) kwargs = {} if type.restricted_values: # field has a restriction with enumerated values - pass as choices to field # - empty value at beginning of list for unset value; for required fields, # will force user to select a value, rather than first item being default choices = [] choices.extend(type.restricted_values) # restricted values could include a blank # if it's there, remove it so we don't get two if '' in choices: choices.remove('') choices.insert(0, '') # add blank choice at the beginning of the list kwargs['choices'] = choices # TODO: possibly also useful to look for pattern restrictions basetype = type.base_type() if basetype == 'string': newfield = StringField(self.xpath, required=self.required, **kwargs) # copy original creation counter to newly created field # to preserve declaration order newfield.creation_counter = self.creation_counter return newfield else: raise Exception("basetype %s is not yet supported by SchemaField" % basetype)
Get the requested type definition from the schema and return the appropriate :class:`~eulxml.xmlmap.fields.Field`. :param schema: instance of :class:`eulxml.xmlmap.core.XsdSchema` :rtype: :class:`eulxml.xmlmap.fields.Field`
def roundrobin(*iterables): "roundrobin('ABC', 'D', 'EF') --> A D E B F C" # Recipe credited to George Sakkis pending = len(iterables) nexts = itertools.cycle(iter(it).next for it in iterables) while pending: try: for next in nexts: yield next() except StopIteration: pending -= 1 nexts = itertools.cycle(itertools.islice(nexts, pending)f roundrobin(*iterables): "roundrobin('ABC', 'D', 'EF') --> A D E B F C" # Recipe credited to George Sakkis pending = len(iterables) nexts = itertools.cycle(iter(it).next for it in iterables) while pending: try: for next in nexts: yield next() except StopIteration: pending -= 1 nexts = itertools.cycle(itertools.islice(nexts, pending))
roundrobin('ABC', 'D', 'EF') --> A D E B F C
def get_scc_from_tuples(constraints): classes = unionfind.classes(constraints) return dict((x, tuple(c)) for x, c in classes.iteritems())
Given set of equivalences, return map of transitive equivalence classes. >> constraints = [(1,2), (2,3)] >> get_scc_from_tuples(constraints) { 1: (1, 2, 3), 2: (1, 2, 3), 3: (1, 2, 3), }
def _parse_field_list(fieldnames, include_parents=False): field_parts = (name.split('.') for name in fieldnames) return _collect_fields(field_parts, include_parents)
Parse a list of field names, possibly including dot-separated subform fields, into an internal ParsedFieldList object representing the base fields and subform listed. :param fieldnames: a list of field names as strings. dot-separated names are interpreted as subform fields. :param include_parents: optional boolean, defaults to False. if True, subform fields implicitly include their parent fields in the parsed list.
def _collect_fields(field_parts_list, include_parents): fields = [] subpart_lists = defaultdict(list) for parts in field_parts_list: field, subparts = parts[0], parts[1:] if subparts: if include_parents and field not in fields: fields.append(field) subpart_lists[field].append(subparts) else: fields.append(field) subfields = dict((field, _collect_fields(subparts, include_parents)) for field, subparts in six.iteritems(subpart_lists)) return ParsedFieldList(fields, subfields)
utility function to enable recursion in _parse_field_list()
def xmlobject_to_dict(instance, fields=None, exclude=None, prefix=''): data = {} # convert prefix to combining form for convenience if prefix: prefix = '%s-' % prefix else: prefix = '' for name, field in six.iteritems(instance._fields): # not editable? if fields and not name in fields: continue if exclude and name in exclude: continue if isinstance(field, xmlmap.fields.NodeField): nodefield = getattr(instance, name) if nodefield is not None: subprefix = '%s%s' % (prefix, name) node_data = xmlobject_to_dict(nodefield, prefix=subprefix) data.update(node_data) # FIXME: fields/exclude if isinstance(field, xmlmap.fields.NodeListField): for i, child in enumerate(getattr(instance, name)): subprefix = '%s%s-%d' % (prefix, name, i) node_data = xmlobject_to_dict(child, prefix=subprefix) data.update(node_data) # FIXME: fields/exclude else: data[prefix + name] = getattr(instance, name) return data
Generate a dictionary based on the data in an XmlObject instance to pass as a Form's ``initial`` keyword argument. :param instance: instance of :class:`~eulxml.xmlmap.XmlObject` :param fields: optional list of fields - if specified, only the named fields will be included in the data returned :param exclude: optional list of fields to exclude from the data
def xmlobjectform_factory(model, form=XmlObjectForm, fields=None, exclude=None, widgets=None, max_num=None, label=None, can_delete=True, extra=None, can_order=False): attrs = {'model': model} if fields is not None: attrs['fields'] = fields if exclude is not None: attrs['exclude'] = exclude if widgets is not None: attrs['widgets'] = widgets if max_num is not None: attrs['max_num'] = max_num if extra is not None: attrs['extra'] = extra if can_delete is not None: attrs['can_delete'] = can_delete if can_order is not None: attrs['can_order'] = can_order # If parent form class already has an inner Meta, the Meta we're # creating needs to inherit from the parent's inner meta. parent = (object,) if hasattr(form, 'Meta'): parent = (form.Meta, object) Meta = type(str('Meta'), parent, attrs) # Give this new form class a reasonable name. class_name = model.__name__ + str('XmlObjectForm') # Class attributes for the new form class. form_class_attrs = { 'Meta': Meta, # django has a callback formfield here; do we need that? # label for a subform/formset 'form_label': label, } return XmlObjectFormType(class_name, (form,), form_class_attrs)
Dynamically generate a new :class:`XmlObjectForm` class using the specified :class:`eulxml.xmlmap.XmlObject` class. Based on django's modelform_factory.
def update_instance(self): # NOTE: django model form has a save method - not applicable here, # since an XmlObject by itself is not expected to have a save method # (only likely to be saved in context of a fedora or exist object) if hasattr(self, 'cleaned_data'): # possible to have an empty object/no data opts = self._meta # NOTE: _fields doesn't seem to order, which is # problematic for some xml (e.g., where order matters for validity) # use field order as declared in the form for update order # when possible. # (NOTE: this could be problematic also, since display order may # not always be the same as schema order) fields_in_order = [] if hasattr(self.Meta, 'fields'): fields_in_order.extend(self.Meta.fields) fields_in_order.extend([name for name in six.iterkeys(self.instance._fields) if name in self.Meta.fields]) else: fields_in_order = self.instance._fields.keys() for name in fields_in_order: # for name in self.instance._fields.iterkeys(): # for name in self.declared_fields.iterkeys(): if opts.fields and name not in opts.parsed_fields.fields: continue if opts.exclude and name in opts.parsed_exclude.fields: continue if name in self.cleaned_data: # special case: we don't want empty attributes and elements # for fields which returned no data from the form # converting '' to None and letting XmlObject handle if self.cleaned_data[name] == '': self.cleaned_data[name] = None setattr(self.instance, name, self.cleaned_data[name]) # update sub-model portions via any subforms for name, subform in six.iteritems(self.subforms): self._update_subinstance(name, subform) for formset in six.itervalues(self.formsets): formset.update_instance() return self.instance
Save bound form data into the XmlObject model instance and return the updated instance.
def _update_subinstance(self, name, subform): old_subinstance = getattr(self.instance, name) new_subinstance = subform.update_instance() # if our instance previously had no node for the subform AND the # updated one has data, then attach the new node. if old_subinstance is None and not new_subinstance.is_empty(): setattr(self.instance, name, new_subinstance) # on the other hand, if the instance previously had a node for the # subform AND the updated one is empty, then remove the node. if old_subinstance is not None and new_subinstance.is_empty(): delattr(self.instance, name)
Save bound data for a single subform into the XmlObject model instance.
def is_valid(self): valid = super(XmlObjectForm, self).is_valid() and \ all(s.is_valid() for s in six.itervalues(self.subforms)) and \ all(s.is_valid() for s in six.itervalues(self.formsets)) # schema validation can only be done after regular validation passes, # because xmlobject must be updated with cleaned_data if valid and self.instance is not None: # update instance required to check schema-validity instance = self.update_instance() if instance.is_valid(): return True else: # if not schema-valid, add validation errors to error dictionary # NOTE: not overriding _get_errors because that is used by the built-in validation # append to any existing non-field errors if NON_FIELD_ERRORS not in self._errors: self._errors[NON_FIELD_ERRORS] = self.error_class() self._errors[NON_FIELD_ERRORS].append("There was an unexpected schema validation error. " + "This should not happen! Please report the following errors:") for err in instance.validation_errors(): self._errors[NON_FIELD_ERRORS].append('VALIDATION ERROR: %s' % err.message) return False return valid
Returns True if this form and all subforms (if any) are valid. If all standard form-validation tests pass, uses :class:`~eulxml.xmlmap.XmlObject` validation methods to check for schema-validity (if a schema is associated) and reporting errors. Additonal notes: * schema validation requires that the :class:`~eulxml.xmlmap.XmlObject` be initialized with the cleaned form data, so if normal validation checks pass, the associated :class:`~eulxml.xmlmap.XmlObject` instance will be updated with data via :meth:`update_instance` * schema validation errors SHOULD NOT happen in a production system :rtype: boolean
def _html_output(self, normal_row, error_row, row_ender, help_text_html, errors_on_separate_row): parts = [] parts.append(super(XmlObjectForm, self)._html_output(normal_row, error_row, row_ender, help_text_html, errors_on_separate_row)) def _subform_output(subform): return subform._html_output(normal_row, error_row, row_ender, help_text_html, errors_on_separate_row) for name, subform in six.iteritems(self.subforms): # use form label if one was set if hasattr(subform, 'form_label'): name = subform.form_label parts.append(self._html_subform_output(subform, name, _subform_output)) for name, formset in six.iteritems(self.formsets): parts.append(u(formset.management_form)) # use form label if one was set # - use declared subform label if any if hasattr(formset.forms[0], 'form_label') and \ formset.forms[0].form_label is not None: name = formset.forms[0].form_label # fallback to generated label from field name elif hasattr(formset, 'form_label'): name = formset.form_label # collect the html output for all the forms in the formset subform_parts = list() for subform in formset.forms: subform_parts.append(self._html_subform_output(subform, gen_html=_subform_output, suppress_section=True)) # then wrap all forms in the section container, so formset label appears once parts.append(self._html_subform_output(name=name, content=u'\n'.join(subform_parts))) return mark_safe(u'\n'.join(parts))
Extend BaseForm's helper function for outputting HTML. Used by as_table(), as_ul(), as_p(). Combines the HTML version of the main form's fields with the HTML content for any subforms.
def connect_bitcoind_impl( bitcoind_opts ): if 'bitcoind_port' not in bitcoind_opts.keys() or bitcoind_opts['bitcoind_port'] is None: log.error("No port given") raise ValueError("No RPC port given (bitcoind_port)") if 'bitcoind_timeout' not in bitcoind_opts.keys() or bitcoind_opts['bitcoind_timeout'] is None: # default bitcoind_opts['bitcoind_timeout'] = 300 try: int(bitcoind_opts['bitcoind_port']) except: log.error("Not an int: '%s'" % bitcoind_opts.get('bitcoind_port')) raise try: float(bitcoind_opts.get('bitcoind_timeout', 300)) except: log.error("Not a float: '%s'" % bitcoind_opts.get('bitcoind_timeout', 300)) raise return create_bitcoind_connection( bitcoind_opts['bitcoind_user'], bitcoind_opts['bitcoind_passwd'], \ bitcoind_opts['bitcoind_server'], int(bitcoind_opts['bitcoind_port']), \ bitcoind_opts.get('bitcoind_use_https', False), float(bitcoind_opts.get('bitcoind_timeout', 300)) )
Create a connection to bitcoind, using a dict of config options.
def get_bitcoind_client(config_path=None, bitcoind_opts=None): if bitcoind_opts is None and config_path is None: raise ValueError("Need bitcoind opts or config path") bitcoind_opts = get_bitcoind_config(config_file=config_path) log.debug("Connect to bitcoind at %s:%s (%s)" % (bitcoind_opts['bitcoind_server'], bitcoind_opts['bitcoind_port'], config_path)) client = connect_bitcoind_impl( bitcoind_opts ) return client
Connect to bitcoind
def ecdsa_private_key(privkey_str=None, compressed=None): if compressed is None: compressed = False if privkey_str is not None: if len(privkey_str) == 66 and privkey_str[-2:] == '01': compressed = True return _ECPrivateKey(privkey_str, compressed=compressed)
Make a private key, but enforce the following rule: * unless the key's hex encoding specifically ends in '01', treat it as uncompressed.
def ecdsa_public_key(pubkey_str, compressed=None): if compressed == True: pubkey_str = keylib.key_formatting.compress(pubkey_str) elif compressed == False: pubkey_str = keylib.key_formatting.decompress(pubkey_str) return _ECPublicKey(pubkey_str)
Make a public key object, but enforce the following rule: * if compressed is True or False, make the key compressed/uncompressed. * otherwise, return whatever the hex encoding is
def set_privkey_compressed(privkey, compressed=True): if len(privkey) != 64 and len(privkey) != 66: raise ValueError("expected 32-byte private key as a hex string") # compressed? if compressed and len(privkey) == 64: privkey += '01' if not compressed and len(privkey) == 66: if privkey[-2:] != '01': raise ValueError("private key does not end in '01'") privkey = privkey[:-2] return privkey
Make sure the private key given is compressed or not compressed
def get_pubkey_hex( privatekey_hex ): if not isinstance(privatekey_hex, (str, unicode)): raise ValueError("private key is not a hex string but {}".format(str(type(privatekey_hex)))) # remove 'compressed' hint if len(privatekey_hex) > 64: if privatekey_hex[-2:] != '01': raise ValueError("private key does not end in 01") privatekey_hex = privatekey_hex[:64] # get hex public key privatekey_int = int(privatekey_hex, 16) privk = ec.derive_private_key(privatekey_int, ec.SECP256K1(), default_backend()) pubk = privk.public_key() x = pubk.public_numbers().x y = pubk.public_numbers().y pubkey_hex = "04{:064x}{:064x}".format(x, y) return pubkey_hex
Get the uncompressed hex form of a private key
def get_uncompressed_private_and_public_keys( privkey_str ): if not isinstance(privkey_str, (str, unicode)): raise ValueError("private key given is not a string") pk = ecdsa_private_key(str(privkey_str)) pk_hex = pk.to_hex() # force uncompressed if len(pk_hex) > 64: if pk_hex[-2:] != '01': raise ValueError("private key does not end in '01'") pk_hex = pk_hex[:64] pubk_hex = ecdsa_private_key(pk_hex).public_key().to_hex() return pk_hex, pubk_hex
Get the private and public keys from a private key string. Make sure the both are *uncompressed*
def decode_privkey_hex(privkey_hex): if not isinstance(privkey_hex, (str, unicode)): raise ValueError("private key is not a string") # force uncompressed priv = str(privkey_hex) if len(priv) > 64: if priv[-2:] != '01': raise ValueError("private key does not end in '01'") priv = priv[:64] pk_i = int(priv, 16) return pk_i
Decode a private key for ecdsa signature
def decode_pubkey_hex(pubkey_hex): if not isinstance(pubkey_hex, (str, unicode)): raise ValueError("public key is not a string") pubk = keylib.key_formatting.decompress(str(pubkey_hex)) assert len(pubk) == 130 pubk_raw = pubk[2:] pubk_i = (int(pubk_raw[:64], 16), int(pubk_raw[64:], 16)) return pubk_i
Decode a public key for ecdsa verification
def encode_signature(sig_r, sig_s): # enforce low-s if sig_s * 2 >= SECP256k1_order: log.debug("High-S to low-S") sig_s = SECP256k1_order - sig_s sig_bin = '{:064x}{:064x}'.format(sig_r, sig_s).decode('hex') assert len(sig_bin) == 64 sig_b64 = base64.b64encode(sig_bin) return sig_b64
Encode an ECDSA signature, with low-s
def decode_signature(sigb64): sig_bin = base64.b64decode(sigb64) if len(sig_bin) != 64: raise ValueError("Invalid base64 signature") sig_hex = sig_bin.encode('hex') sig_r = int(sig_hex[:64], 16) sig_s = int(sig_hex[64:], 16) return sig_r, sig_s
Decode a signature into r, s
def sign_raw_data(raw_data, privatekey_hex): if not isinstance(raw_data, (str, unicode)): raise ValueError("Data is not a string") raw_data = str(raw_data) si = ECSigner(privatekey_hex) si.update(raw_data) return si.finalize()
Sign a string of data. Returns signature as a base64 string
def verify_raw_data(raw_data, pubkey_hex, sigb64): if not isinstance(raw_data, (str, unicode)): raise ValueError("data is not a string") raw_data = str(raw_data) vi = ECVerifier(pubkey_hex, sigb64) vi.update(raw_data) return vi.verify()
Verify the signature over a string, given the public key and base64-encode signature. Return True on success. Return False on error.
def sign_digest(hash_hex, privkey_hex, hashfunc=hashlib.sha256): if not isinstance(hash_hex, (str, unicode)): raise ValueError("hash hex is not a string") hash_hex = str(hash_hex) pk_i = decode_privkey_hex(privkey_hex) privk = ec.derive_private_key(pk_i, ec.SECP256K1(), default_backend()) sig = privk.sign(hash_hex.decode('hex'), ec.ECDSA(utils.Prehashed(hashes.SHA256()))) sig_r, sig_s = decode_dss_signature(sig) sigb64 = encode_signature(sig_r, sig_s) return sigb64
Given a digest and a private key, sign it. Return the base64-encoded signature
def verify_digest(hash_hex, pubkey_hex, sigb64, hashfunc=hashlib.sha256): if not isinstance(hash_hex, (str, unicode)): raise ValueError("hash hex is not a string") hash_hex = str(hash_hex) pubk_uncompressed_hex = keylib.key_formatting.decompress(pubkey_hex) sig_r, sig_s = decode_signature(sigb64) pubk = ec.EllipticCurvePublicNumbers.from_encoded_point(ec.SECP256K1(), pubk_uncompressed_hex.decode('hex')).public_key(default_backend()) signature = encode_dss_signature(sig_r, sig_s) try: pubk.verify(signature, hash_hex.decode('hex'), ec.ECDSA(utils.Prehashed(hashes.SHA256()))) return True except InvalidSignature: return False
Given a digest, public key (as hex), and a base64 signature, verify that the public key signed the digest. Return True if so Return False if not
def finalize(self): signature = self.signer.finalize() sig_r, sig_s = decode_dss_signature(signature) sig_b64 = encode_signature(sig_r, sig_s) return sig_b64
Get the base64-encoded signature itself. Can only be called once.
def update(self, data): try: self.verifier.update(data) except TypeError: log.error("Invalid data: {} ({})".format(type(data), data)) raise
Update the hash used to generate the signature
def semiconvergents(x): (q, n), d = divmod(x.numerator, x.denominator), x.denominator yield Fraction(q) p0, q0, p1, q1 = 1, 0, q, 1 while n: (q, n), d = divmod(d, n), n for _ in range(q): p0, q0 = p0+p1, q0+q1 yield Fraction(p0, q0) p0, q0, p1, q1 = p1, q1, p0, q0
Semiconvergents of continued fraction expansion of a Fraction x.
def logn2(n, p): with precision(p): extra = 10 while True: with precision(p+extra): # use extra precision for intermediate step log2upper = log2(n, RoundTowardPositive) log2lower = log2(n, RoundTowardNegative) lower = div(1, log2upper, RoundTowardNegative) upper = div(1, log2lower, RoundTowardPositive) # if lower and upper are adjacent (or equal) we're done if next_up(lower) == upper: return (Fraction(*lower.as_integer_ratio()), Fraction(*upper.as_integer_ratio())) # otherwise, increase the precision and try again extra += 10
Best p-bit lower and upper bounds for log(2)/log(n), as Fractions.
def sort_values(self, by, axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last'): if inplace: self._frame.sort_values( by, axis=axis, ascending=ascending, inplace=inplace, kind=kind, na_position=na_position) else: new = self.__class__(self._frame.sort_values( by, axis=axis, ascending=ascending, inplace=inplace, kind=kind, na_position=na_position)) new.metadata = self.metadata.copy() new._metadata = copy.deepcopy(self._metadata) return new
Sort by the values along either axis Wrapper around the :meth:`pandas.DataFrame.sort_values` method.
def replace(self, to_replace=None, value=None, inplace=False, limit=None, regex=False, method='pad', axis=None): if inplace: self._frame.replace(to_replace=to_replace, value=value, inplace=inplace, limit=limit, regex=regex, method=method, axis=axis) else: new = self.__class__(self._frame.replace( to_replace=to_replace, value=value, inplace=inplace, limit=limit, regex=regex, method=method, axis=axis)) new.metadata = self.metadata.copy() new._metadata = copy.deepcopy(self._metadata) return new
Replace values given in 'to_replace' with 'value'. Wrapper around the :meth:`pandas.DataFrame.replace` method.
def set_index(self, keys, drop=True, append=False, inplace=False, verify_integrity=False): if drop is True: try: assert type(keys) is not str dropped_cols = set(keys) except (TypeError, AssertionError): dropped_cols = set([keys]) if not self._required_cols <= (set(self.columns) - set(dropped_cols)): raise PhysicalMeaning('You drop a column that is needed to ' 'be a physical meaningful description ' 'of a molecule.') if inplace: self._frame.set_index(keys, drop=drop, append=append, inplace=inplace, verify_integrity=verify_integrity) else: new = self._frame.set_index(keys, drop=drop, append=append, inplace=inplace, verify_integrity=verify_integrity) return self.__class__(new, _metadata=self._metadata, metadata=self.metadata)
Set the DataFrame index (row labels) using one or more existing columns. Wrapper around the :meth:`pandas.DataFrame.set_index` method.
def append(self, other, ignore_index=False): if not isinstance(other, self.__class__): raise ValueError('May only append instances of same type.') if type(ignore_index) is bool: new_frame = self._frame.append(other._frame, ignore_index=ignore_index, verify_integrity=True) else: new_frame = self._frame.append(other._frame, ignore_index=True, verify_integrity=True) if type(ignore_index) is int: new_frame.index = range(ignore_index, ignore_index + len(new_frame)) else: new_frame.index = ignore_index return self.__class__(new_frame)
Append rows of `other` to the end of this frame, returning a new object. Wrapper around the :meth:`pandas.DataFrame.append` method. Args: other (Cartesian): ignore_index (sequence, bool, int): If it is a boolean, it behaves like in the description of :meth:`pandas.DataFrame.append`. If it is a sequence, it becomes the new index. If it is an integer, ``range(ignore_index, ignore_index + len(new))`` becomes the new index. Returns: Cartesian:
def apply(self, *args, **kwargs): return self.__class__(self._frame.apply(*args, **kwargs), metadata=self.metadata, _metadata=self._metadata)
Applies function along input axis of DataFrame. Wrapper around the :meth:`pandas.DataFrame.apply` method.
def applymap(self, *args, **kwargs): return self.__class__(self._frame.applymap(*args, **kwargs), metadata=self.metadata, _metadata=self._metadata)
Applies function elementwise Wrapper around the :meth:`pandas.DataFrame.applymap` method.
def top_articles( self, project, access='all-access', year=None, month=None, day=None, limit=1000): yesterday = date.today() - timedelta(days=1) year = str(year or yesterday.year) month = str(month or yesterday.month).rjust(2, '0') day = str(day or yesterday.day).rjust(2, '0') url = '/'.join([endpoints['top'], project, access, year, month, day]) try: result = requests.get(url, headers=self.headers).json() if 'items' in result and len(result['items']) == 1: r = result['items'][0]['articles'] r.sort(key=lambda x: x['rank']) return r[0:(limit)] except: print('ERROR while fetching or parsing ' + url) traceback.print_exc() raise raise Exception( 'The pageview API returned nothing useful at: {}'.format(url) )
Get pageview counts for one or more articles See `<https://wikimedia.org/api/rest_v1/metrics/pageviews/?doc\\ #!/Pageviews_data/get_metrics_pageviews_top_project\\ _access_year_month_day>`_ :Parameters: project : str a wikimedia project such as en.wikipedia or commons.wikimedia access : str access method (desktop, mobile-web, mobile-app, or by default, all-access) year : int default : yesterday's year month : int default : yesterday's month day : int default : yesterday's day limit : int limit the number of articles returned to only the top <limit> default : 1000 :Returns: a sorted list of articles that looks like: [ { rank: <int>, article: <str>, views: <int> } ... ]
def quick_marshal(*args, **kwargs): @marshal_with_model(*args, **kwargs) def fn(value): return value return fn
In some case, one view functions may return different model in different situation. Use `marshal_with_model` to handle this situation was tedious. This function can simplify this process. Usage: quick_marshal(args_to_marshal_with_model)(db_instance_or_query)
def _wrap_field(field): class WrappedField(field): def output(self, key, obj): value = _fields.get_value(key if self.attribute is None else self.attribute, obj) # For all fields, when its value was null (None), return null directly, # instead of return its default value (eg. int type's default value was 0) # Because sometimes the client **needs** to know, was a field of the model empty, to decide its behavior. return None if value is None else self.format(value) return WrappedField
Improve Flask-RESTFul's original field type
def fetch(): doi = '10.7910/DVN/AFJNWJ' fname = os.path.join( data_dir(), 'lenz2017', 'ebv_lhd.hpx.fits') fetch_utils.dataverse_download_doi( doi, fname, file_requirements={'filename': 'ebv_lhd.hpx.fits'})
Downloads the Lenz, Hensley & Doré (2017) dust map, placing it in the default :obj:`dustmaps` data directory.
def query(self, coords, **kwargs): return super(Lenz2017Query, self).query(coords, **kwargs)
Returns E(B-V), in mags, at the specified location(s) on the sky. Args: coords (:obj:`astropy.coordinates.SkyCoord`): The coordinates to query. Returns: A float array of the reddening, in magnitudes of E(B-V), at the selected coordinates.
def write_configuration_file(filepath=_give_default_file_path(), overwrite=False): config = configparser.ConfigParser() config.read_dict(settings) if os.path.isfile(filepath) and not overwrite: try: raise FileExistsError except NameError: # because of python2 warn('File exists already and overwrite is False (default).') else: with open(filepath, 'w') as configfile: config.write(configfile)
Create a configuration file. Writes the current state of settings into a configuration file. .. note:: Since a file is permamently written, this function is strictly speaking not sideeffect free. Args: filepath (str): Where to write the file. The default is under both UNIX and Windows ``~/.chemcoordrc``. overwrite (bool): Returns: None:
def read_configuration_file(filepath=_give_default_file_path()): config = configparser.ConfigParser() config.read(filepath) def get_correct_type(section, key, config): """Gives e.g. the boolean True for the string 'True'""" def getstring(section, key, config): return config[section][key] def getinteger(section, key, config): # pylint:disable=unused-variable return config[section].getint(key) def getboolean(section, key, config): return config[section].getboolean(key) def getfloat(section, key, config): # pylint:disable=unused-variable return config[section].getfloat(key) special_actions = {} # Something different than a string is expected special_actions['defaults'] = {} special_actions['defaults']['use_lookup'] = getboolean try: return special_actions[section][key](section, key, config) except KeyError: return getstring(section, key, config) for section in config.sections(): for key in config[section]: settings[section][key] = get_correct_type(section, key, config) return settings
Read the configuration file. .. note:: This function changes ``cc.settings`` inplace and is therefore not sideeffect free. Args: filepath (str): Where to read the file. The default is under both UNIX and Windows ``~/.chemcoordrc``. Returns: None:
def short(self): '''Short-form of the unit title, excluding any unit date, as an instance of :class:`~eulxml.xmlmap.eadmap.UnitTitle` . Can be used with formatting anywhere the full form of the unittitle can be used.''' # if there is no unitdate to remove, just return the current object if not self.unitdate: return self # preserve any child elements (e.g., title or emph) # initialize a unittitle with a *copy* of the current node ut = UnitTitle(node=deepcopy(self.node)) # remove the unitdate node and return ut.node.remove(ut.unitdate.node) return uf short(self): '''Short-form of the unit title, excluding any unit date, as an instance of :class:`~eulxml.xmlmap.eadmap.UnitTitle` . Can be used with formatting anywhere the full form of the unittitle can be used.''' # if there is no unitdate to remove, just return the current object if not self.unitdate: return self # preserve any child elements (e.g., title or emph) # initialize a unittitle with a *copy* of the current node ut = UnitTitle(node=deepcopy(self.node)) # remove the unitdate node and return ut.node.remove(ut.unitdate.node) return ut
Short-form of the unit title, excluding any unit date, as an instance of :class:`~eulxml.xmlmap.eadmap.UnitTitle` . Can be used with formatting anywhere the full form of the unittitle can be used.
def hasSubseries(self): if self.c and self.c[0] and ((self.c[0].level in ('series', 'subseries')) or (self.c[0].c and self.c[0].c[0])): return True else: return False
Check if this component has subseries or not. Determined based on level of first subcomponent (series or subseries) or if first component has subcomponents present. :rtype: boolean
def hasSeries(self): if len(self.c) and (self.c[0].level == 'series' or (self.c[0].c and self.c[0].c[0])): return True else: return False
Check if this finding aid has series/subseries. Determined based on level of first component (series) or if first component has subcomponents present. :rtype: boolean
def initialize( self, M_c, M_r, T, seed, initialization=b'from_the_prior', row_initialization=-1, n_chains=1, ROW_CRP_ALPHA_GRID=(), COLUMN_CRP_ALPHA_GRID=(), S_GRID=(), MU_GRID=(), N_GRID=31,): # FIXME: why is M_r passed? arg_tuples = self.get_initialize_arg_tuples( M_c, M_r, T, initialization, row_initialization, n_chains, ROW_CRP_ALPHA_GRID, COLUMN_CRP_ALPHA_GRID, S_GRID, MU_GRID, N_GRID, make_get_next_seed(seed),) chain_tuples = self.mapper(self.do_initialize, arg_tuples) X_L_list, X_D_list = zip(*chain_tuples) if n_chains == 1: X_L_list, X_D_list = X_L_list[0], X_D_list[0] return X_L_list, X_D_list
Sample a latent state from prior. T, list of lists: The data table in mapped representation (all floats, generated by data_utils.read_data_objects) :returns: X_L, X_D -- the latent state
def insert( self, M_c, T, X_L_list, X_D_list, new_rows=None, N_GRID=31, CT_KERNEL=0): if new_rows is None: raise ValueError("new_row must exist") if not isinstance(new_rows, list): raise TypeError('new_rows must be list of lists') if not isinstance(new_rows[0], list): raise TypeError('new_rows must be list of lists') X_L_list, X_D_list, was_multistate = su.ensure_multistate( X_L_list, X_D_list) # get insert arg tuples arg_tuples = self.get_insert_arg_tuples( M_c, T, X_L_list, X_D_list, new_rows, N_GRID, CT_KERNEL) chain_tuples = self.mapper(self.do_insert, arg_tuples) X_L_list, X_D_list = zip(*chain_tuples) if not was_multistate: X_L_list, X_D_list = X_L_list[0], X_D_list[0] T.extend(new_rows) ret_tuple = X_L_list, X_D_list, T return ret_tuple
Insert mutates the data T.
def simple_predictive_sample(self, M_c, X_L, X_D, Y, Q, seed, n=1): get_next_seed = make_get_next_seed(seed) samples = _do_simple_predictive_sample( M_c, X_L, X_D, Y, Q, n, get_next_seed) return samples
Sample values from predictive distribution of the given latent state. :param Y: A list of constraints to apply when sampling. Each constraint is a triplet of (r, d, v): r is the row index, d is the column index and v is the value of the constraint :type Y: list of lists :param Q: A list of values to sample. Each value is doublet of (r, d): r is the row index, d is the column index :type Q: list of lists :param n: the number of samples to draw :type n: int :returns: list of floats. Samples in the same order specified by Q
def simple_predictive_probability(self, M_c, X_L, X_D, Y, Q): return su.simple_predictive_probability(M_c, X_L, X_D, Y, Q)
Calculate probability of a cell taking a value given a latent state. :param Y: A list of constraints to apply when querying. Each constraint is a triplet of (r, d, v): r is the row index, d is the column index and v is the value of the constraint :type Y: list of lists :param Q: A list of values to query. Each value is triplet of (r, d, v): r is the row index, d is the column index, and v is the value at which the density is evaluated. :type Q: list of lists :returns: list of floats -- probabilities of the values specified by Q
def simple_predictive_probability_multistate( self, M_c, X_L_list, X_D_list, Y, Q): return su.simple_predictive_probability_multistate( M_c, X_L_list, X_D_list, Y, Q)
Calculate probability of a cell taking a value given a latent state. :param Y: A list of constraints to apply when querying. Each constraint is a triplet of (r,d,v): r is the row index, d is the column index and v is the value of the constraint :type Y: list of lists :param Q: A list of values to query. Each value is triplet of (r,d,v): r is the row index, d is the column index, and v is the value at which the density is evaluated. :type Q: list of lists :returns: list of floats -- probabilities of the values specified by Q
def predictive_probability(self, M_c, X_L, X_D, Y, Q): return su.predictive_probability(M_c, X_L, X_D, Y, Q)
Calculate probability of cells jointly taking values given a latent state. :param Y: A list of constraints to apply when querying. Each constraint is a triplet of (r, d, v): r is the row index, d is the column index and v is the value of the constraint :type Y: list of lists :param Q: A list of values to query. Each value is triplet of (r, d, v): r is the row index, d is the column index, and v is the value at which the density is evaluated. :type Q: list of lists :returns: float -- joint log probability of the values specified by Q
def predictive_probability_multistate(self, M_c, X_L_list, X_D_list, Y, Q): return su.predictive_probability_multistate( M_c, X_L_list, X_D_list, Y, Q)
Calculate probability of cells jointly taking values given a latent state. :param Y: A list of constraints to apply when querying. Each constraint is a triplet of (r,d,v): r is the row index, d is the column index and v is the value of the constraint :type Y: list of lists :param Q: A list of values to query. Each value is triplet of (r,d,v): r is the row index, d is the column index, and v is the value at which the density is evaluated. :type Q: list of lists :returns: float -- joint log probabilities of the values specified by Q
def mutual_information( self, M_c, X_L_list, X_D_list, Q, seed, n_samples=1000): get_next_seed = make_get_next_seed(seed) return iu.mutual_information( M_c, X_L_list, X_D_list, Q, get_next_seed, n_samples)
Estimate mutual information for each pair of columns on Q given the set of samples. :param Q: List of tuples where each tuple contains the two column indexes to compare :type Q: list of two-tuples of ints :param n_samples: the number of simple predictive samples to use :type n_samples: int :returns: list of list -- where each sublist is a set of MIs and Linfoots from each crosscat sample.
def row_structural_typicality(self, X_L_list, X_D_list, row_id): return su.row_structural_typicality(X_L_list, X_D_list, row_id)
Returns the typicality (opposite of anomalousness) of given row. :param row_id: id of the target row :type row_id: int :returns: float, the typicality, from 0 to 1
def similarity( self, M_c, X_L_list, X_D_list, given_row_id, target_row_id, target_columns=None): return su.similarity( M_c, X_L_list, X_D_list, given_row_id, target_row_id, target_columns)
Computes the similarity of the given row to the target row, averaged over all the column indexes given by target_columns. :param given_row_id: the id of one of the rows to measure similarity between :type given_row_id: int :param target_row_id: the id of the other row to measure similarity between :type target_row_id: int :param target_columns: the columns to average the similarity over. Defaults to all columns. :type target_columns: int, string, or list of ints :returns: float
def impute(self, M_c, X_L, X_D, Y, Q, seed, n): get_next_seed = make_get_next_seed(seed) e = su.impute(M_c, X_L, X_D, Y, Q, n, get_next_seed) return e
Impute values from predictive distribution of the given latent state. :param Y: A list of constraints to apply when sampling. Each constraint is a triplet of (r,d,v): r is the row index, d is the column index and v is the value of the constraint :type Y: list of lists :param Q: A list of values to sample. Each value is doublet of (r, d): r is the row index, d is the column index :type Q: list of lists :param n: the number of samples to use in the imputation :type n: int :returns: list of floats -- imputed values in the same order as specified by Q
def impute_and_confidence(self, M_c, X_L, X_D, Y, Q, seed, n): get_next_seed = make_get_next_seed(seed) if isinstance(X_L, (list, tuple)): assert isinstance(X_D, (list, tuple)) # TODO: multistate impute doesn't exist yet # e,confidence = su.impute_and_confidence_multistate( # M_c, X_L, X_D, Y, Q, n, self.get_next_seed) e, confidence = su.impute_and_confidence( M_c, X_L, X_D, Y, Q, n, get_next_seed) else: e, confidence = su.impute_and_confidence( M_c, X_L, X_D, Y, Q, n, get_next_seed) return (e, confidence)
Impute values and confidence of the value from the predictive distribution of the given latent state. :param Y: A list of constraints to apply when sampling. Each constraint is a triplet of (r, d, v): r is the row index, d is the column index and v is the value of the constraint :type Y: list of lists :param Q: A list of values to sample. Each value is doublet of (r, d): r is the row index, d is the column index :type Q: list of lists :param n: the number of samples to use in the imputation :type n: int :returns: list of lists -- list of (value, confidence) tuples in the same order as specified by Q