text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Return True if x != y and False otherwise. <END_TASK> <USER_TASK:> Description: def notequal(x, y): """ Return True if x != y and False otherwise. This function returns True whenever x and/or y is a NaN. """
x = BigFloat._implicit_convert(x) y = BigFloat._implicit_convert(y) return not mpfr.mpfr_equal_p(x, y)
<SYSTEM_TASK:> Return True if x or y is a NaN and False otherwise. <END_TASK> <USER_TASK:> Description: def unordered(x, y): """ Return True if x or y is a NaN and False otherwise. """
x = BigFloat._implicit_convert(x) y = BigFloat._implicit_convert(y) return mpfr.mpfr_unordered_p(x, y)
<SYSTEM_TASK:> Return the exponential of x. <END_TASK> <USER_TASK:> Description: def exp(x, context=None): """ Return the exponential of x. """
return _apply_function_in_current_context( BigFloat, mpfr.mpfr_exp, (BigFloat._implicit_convert(x),), context, )
<SYSTEM_TASK:> Return two raised to the power x. <END_TASK> <USER_TASK:> Description: def exp2(x, context=None): """ Return two raised to the power x. """
return _apply_function_in_current_context( BigFloat, mpfr.mpfr_exp2, (BigFloat._implicit_convert(x),), context, )
<SYSTEM_TASK:> Return the inverse cosine of ``x``. <END_TASK> <USER_TASK:> Description: def acos(x, context=None): """ Return the inverse cosine of ``x``. The mathematically exact result lies in the range [0, π]. However, note that as a result of rounding to the current context, it's possible for the actual value returned to be fractionally larger than π:: >>> from bigfloat import precision >>> with precision(12): ... x = acos(-1) ... >>> print(x) 3.1416 >>> x > const_pi() True """
return _apply_function_in_current_context( BigFloat, mpfr.mpfr_acos, (BigFloat._implicit_convert(x),), context, )
<SYSTEM_TASK:> Return the inverse sine of ``x``. <END_TASK> <USER_TASK:> Description: def asin(x, context=None): """ Return the inverse sine of ``x``. The mathematically exact result lies in the range [-π/2, π/2]. However, note that as a result of rounding to the current context, it's possible for the actual value to lie just outside this range. """
return _apply_function_in_current_context( BigFloat, mpfr.mpfr_asin, (BigFloat._implicit_convert(x),), context, )
<SYSTEM_TASK:> Return the inverse tangent of ``x``. <END_TASK> <USER_TASK:> Description: def atan(x, context=None): """ Return the inverse tangent of ``x``. The mathematically exact result lies in the range [-π/2, π/2]. However, note that as a result of rounding to the current context, it's possible for the actual value to lie just outside this range. """
return _apply_function_in_current_context( BigFloat, mpfr.mpfr_atan, (BigFloat._implicit_convert(x),), context, )
<SYSTEM_TASK:> Return one less than the exponential of x. <END_TASK> <USER_TASK:> Description: def expm1(x, context=None): """ Return one less than the exponential of x. """
return _apply_function_in_current_context( BigFloat, mpfr.mpfr_expm1, (BigFloat._implicit_convert(x),), context, )
<SYSTEM_TASK:> Return the exponential integral of x. <END_TASK> <USER_TASK:> Description: def eint(x, context=None): """ Return the exponential integral of x. """
return _apply_function_in_current_context( BigFloat, mpfr.mpfr_eint, (BigFloat._implicit_convert(x),), context, )
<SYSTEM_TASK:> Return the real part of the dilogarithm of x. <END_TASK> <USER_TASK:> Description: def li2(x, context=None): """ Return the real part of the dilogarithm of x. """
return _apply_function_in_current_context( BigFloat, mpfr.mpfr_li2, (BigFloat._implicit_convert(x),), context, )
<SYSTEM_TASK:> Return the logarithm of the absolute value of the Gamma function at x. <END_TASK> <USER_TASK:> Description: def lgamma(x, context=None): """ Return the logarithm of the absolute value of the Gamma function at x. """
return _apply_function_in_current_context( BigFloat, lambda rop, op, rnd: mpfr.mpfr_lgamma(rop, op, rnd)[0], (BigFloat._implicit_convert(x),), context, )
<SYSTEM_TASK:> Return the value of the error function at x. <END_TASK> <USER_TASK:> Description: def erf(x, context=None): """ Return the value of the error function at x. """
return _apply_function_in_current_context( BigFloat, mpfr.mpfr_erf, (BigFloat._implicit_convert(x),), context, )
<SYSTEM_TASK:> Return the value of the complementary error function at x. <END_TASK> <USER_TASK:> Description: def erfc(x, context=None): """ Return the value of the complementary error function at x. """
return _apply_function_in_current_context( BigFloat, mpfr.mpfr_erfc, (BigFloat._implicit_convert(x),), context, )
<SYSTEM_TASK:> Return the value of the first kind Bessel function of order 0 at x. <END_TASK> <USER_TASK:> Description: def j0(x, context=None): """ Return the value of the first kind Bessel function of order 0 at x. """
return _apply_function_in_current_context( BigFloat, mpfr.mpfr_j0, (BigFloat._implicit_convert(x),), context, )
<SYSTEM_TASK:> Return the value of the first kind Bessel function of order 1 at x. <END_TASK> <USER_TASK:> Description: def j1(x, context=None): """ Return the value of the first kind Bessel function of order 1 at x. """
return _apply_function_in_current_context( BigFloat, mpfr.mpfr_j1, (BigFloat._implicit_convert(x),), context, )
<SYSTEM_TASK:> Return the value of the first kind Bessel function of order ``n`` at ``x``. <END_TASK> <USER_TASK:> Description: def jn(n, x, context=None): """ Return the value of the first kind Bessel function of order ``n`` at ``x``. ``n`` should be a Python integer. """
return _apply_function_in_current_context( BigFloat, mpfr.mpfr_jn, (n, BigFloat._implicit_convert(x)), context, )
<SYSTEM_TASK:> Return the value of the second kind Bessel function of order 0 at x. <END_TASK> <USER_TASK:> Description: def y0(x, context=None): """ Return the value of the second kind Bessel function of order 0 at x. """
return _apply_function_in_current_context( BigFloat, mpfr.mpfr_y0, (BigFloat._implicit_convert(x),), context, )
<SYSTEM_TASK:> Return the value of the second kind Bessel function of order 1 at x. <END_TASK> <USER_TASK:> Description: def y1(x, context=None): """ Return the value of the second kind Bessel function of order 1 at x. """
return _apply_function_in_current_context( BigFloat, mpfr.mpfr_y1, (BigFloat._implicit_convert(x),), context, )
<SYSTEM_TASK:> Return the value of the second kind Bessel function of order ``n`` at <END_TASK> <USER_TASK:> Description: def yn(n, x, context=None): """ Return the value of the second kind Bessel function of order ``n`` at ``x``. ``n`` should be a Python integer. """
return _apply_function_in_current_context( BigFloat, mpfr.mpfr_yn, (n, BigFloat._implicit_convert(x)), context, )
<SYSTEM_TASK:> Return the arithmetic geometric mean of x and y. <END_TASK> <USER_TASK:> Description: def agm(x, y, context=None): """ Return the arithmetic geometric mean of x and y. """
return _apply_function_in_current_context( BigFloat, mpfr.mpfr_agm, ( BigFloat._implicit_convert(x), BigFloat._implicit_convert(y), ), context, )
<SYSTEM_TASK:> Return the Euclidean norm of x and y, i.e., the square root of the sum of <END_TASK> <USER_TASK:> Description: def hypot(x, y, context=None): """ Return the Euclidean norm of x and y, i.e., the square root of the sum of the squares of x and y. """
return _apply_function_in_current_context( BigFloat, mpfr.mpfr_hypot, ( BigFloat._implicit_convert(x), BigFloat._implicit_convert(y), ), context, )
<SYSTEM_TASK:> Return the next higher or equal integer to x. <END_TASK> <USER_TASK:> Description: def ceil(x, context=None): """ Return the next higher or equal integer to x. If the result is not exactly representable, it will be rounded according to the current context. Note that the rounding step means that it's possible for the result to be smaller than ``x``. For example:: >>> x = 2**100 + 1 >>> ceil(2**100 + 1) >= x False One way to be sure of getting a result that's greater than or equal to ``x`` is to use the ``RoundTowardPositive`` rounding mode:: >>> with RoundTowardPositive: ... x = 2**100 + 1 ... ceil(x) >= x ... True Similar comments apply to the :func:`floor`, :func:`round` and :func:`trunc` functions. .. note:: This function corresponds to the MPFR function ``mpfr_rint_ceil``, not to ``mpfr_ceil``. """
return _apply_function_in_current_context( BigFloat, mpfr.mpfr_rint_ceil, (BigFloat._implicit_convert(x),), context, )
<SYSTEM_TASK:> Return the next lower or equal integer to x. <END_TASK> <USER_TASK:> Description: def floor(x, context=None): """ Return the next lower or equal integer to x. If the result is not exactly representable, it will be rounded according to the current context. Note that it's possible for the result to be larger than ``x``. See the documentation of the :func:`ceil` function for more information. .. note:: This function corresponds to the MPFR function ``mpfr_rint_floor``, not to ``mpfr_floor``. """
return _apply_function_in_current_context( BigFloat, mpfr.mpfr_rint_floor, (BigFloat._implicit_convert(x),), context, )
<SYSTEM_TASK:> Return the next integer towards zero. <END_TASK> <USER_TASK:> Description: def trunc(x, context=None): """ Return the next integer towards zero. If the result is not exactly representable, it will be rounded according to the current context. .. note:: This function corresponds to the MPFR function ``mpfr_rint_trunc``, not to ``mpfr_trunc``. """
return _apply_function_in_current_context( BigFloat, mpfr.mpfr_rint_trunc, (BigFloat._implicit_convert(x),), context, )
<SYSTEM_TASK:> Return the fractional part of ``x``. <END_TASK> <USER_TASK:> Description: def frac(x, context=None): """ Return the fractional part of ``x``. The result has the same sign as ``x``. """
return _apply_function_in_current_context( BigFloat, mpfr.mpfr_frac, (BigFloat._implicit_convert(x),), context, )
<SYSTEM_TASK:> Return the minimum of x and y. <END_TASK> <USER_TASK:> Description: def min(x, y, context=None): """ Return the minimum of x and y. If x and y are both NaN, return NaN. If exactly one of x and y is NaN, return the non-NaN value. If x and y are zeros of different signs, return −0. """
return _apply_function_in_current_context( BigFloat, mpfr.mpfr_min, ( BigFloat._implicit_convert(x), BigFloat._implicit_convert(y), ), context, )
<SYSTEM_TASK:> Return the maximum of x and y. <END_TASK> <USER_TASK:> Description: def max(x, y, context=None): """ Return the maximum of x and y. If x and y are both NaN, return NaN. If exactly one of x and y is NaN, return the non-NaN value. If x and y are zeros of different signs, return +0. """
return _apply_function_in_current_context( BigFloat, mpfr.mpfr_max, ( BigFloat._implicit_convert(x), BigFloat._implicit_convert(y), ), context, )
<SYSTEM_TASK:> Return a new BigFloat object with the magnitude of x but the sign of y. <END_TASK> <USER_TASK:> Description: def copysign(x, y, context=None): """ Return a new BigFloat object with the magnitude of x but the sign of y. """
return _apply_function_in_current_context( BigFloat, mpfr.mpfr_copysign, ( BigFloat._implicit_convert(x), BigFloat._implicit_convert(y), ), context, )
<SYSTEM_TASK:> Convert an integer, float or BigFloat with no loss of precision. <END_TASK> <USER_TASK:> Description: def exact(cls, value, precision=None): """Convert an integer, float or BigFloat with no loss of precision. Also convert a string with given precision. This constructor makes no use of the current context. """
# figure out precision to use if isinstance(value, six.string_types): if precision is None: raise TypeError("precision must be supplied when " "converting from a string") else: if precision is not None: raise TypeError("precision argument should not be " "specified except when converting " "from a string") if isinstance(value, float): precision = _builtin_max(DBL_PRECISION, PRECISION_MIN) elif isinstance(value, six.integer_types): precision = _builtin_max(_bit_length(value), PRECISION_MIN) elif isinstance(value, BigFloat): precision = value.precision else: raise TypeError("Can't convert argument %s of type %s " "to BigFloat" % (value, type(value))) # Use unlimited exponents, with given precision. with _saved_flags(): set_flagstate(set()) # clear all flags context = ( WideExponentContext + Context(precision=precision) + RoundTiesToEven ) with context: result = BigFloat(value) if test_flag(Overflow): raise ValueError("value too large to represent as a BigFloat") if test_flag(Underflow): raise ValueError("value too small to represent as a BigFloat") if test_flag(Inexact) and not isinstance(value, six.string_types): # since this is supposed to be an exact conversion, the # inexact flag should never be set except when converting # from a string. assert False, ("Inexact conversion in BigFloat.exact. " "This shouldn't ever happen. Please report.") return result
<SYSTEM_TASK:> Return the significand of self, as a BigFloat. <END_TASK> <USER_TASK:> Description: def _significand(self): """Return the significand of self, as a BigFloat. If self is a nonzero finite number, return a BigFloat m with the same precision as self, such that 0.5 <= m < 1. and self = +/-m * 2**e for some exponent e. If self is zero, infinity or nan, return a copy of self with the sign set to 0. """
m = self.copy() if self and is_finite(self): mpfr.mpfr_set_exp(m, 0) mpfr.mpfr_setsign(m, m, False, ROUND_TIES_TO_EVEN) return m
<SYSTEM_TASK:> Return the exponent of self, as an integer. <END_TASK> <USER_TASK:> Description: def _exponent(self): """Return the exponent of self, as an integer. The exponent is defined as the unique integer k such that 2**(k-1) <= abs(self) < 2**k. If self is not finite and nonzero, return a string: one of '0', 'inf' or 'nan'. """
if self and is_finite(self): return mpfr.mpfr_get_exp(self) if not self: return '0' elif is_inf(self): return 'inf' elif is_nan(self): return 'nan' else: assert False, "shouldn't ever get here"
<SYSTEM_TASK:> Return a copy of self with the opposite sign bit. <END_TASK> <USER_TASK:> Description: def copy_neg(self): """ Return a copy of self with the opposite sign bit. Unlike -self, this does not make use of the context: the result has the same precision as the original. """
result = mpfr.Mpfr_t.__new__(BigFloat) mpfr.mpfr_init2(result, self.precision) new_sign = not self._sign() mpfr.mpfr_setsign(result, self, new_sign, ROUND_TIES_TO_EVEN) return result
<SYSTEM_TASK:> Return a copy of self with the sign bit unset. <END_TASK> <USER_TASK:> Description: def copy_abs(self): """ Return a copy of self with the sign bit unset. Unlike abs(self), this does not make use of the context: the result has the same precision as the original. """
result = mpfr.Mpfr_t.__new__(BigFloat) mpfr.mpfr_init2(result, self.precision) mpfr.mpfr_setsign(result, self, False, ROUND_TIES_TO_EVEN) return result
<SYSTEM_TASK:> Format a nonzero finite BigFloat instance to a given number of <END_TASK> <USER_TASK:> Description: def _format_to_floating_precision(self, precision): """ Format a nonzero finite BigFloat instance to a given number of significant digits. Returns a triple (negative, digits, exp) where: - negative is a boolean, True for a negative number, else False - digits is a string giving the digits of the output - exp represents the exponent of the output, The normalization of the exponent is such that <digits>E<exp> represents the decimal approximation to self. Rounding is always round-to-nearest. """
if precision <= 0: raise ValueError("precision argument should be at least 1") sign, digits, exp = _mpfr_get_str2( 10, precision, self, ROUND_TIES_TO_EVEN, ) return sign, digits, exp - len(digits)
<SYSTEM_TASK:> Format 'self' to a given number of digits after the decimal point. <END_TASK> <USER_TASK:> Description: def _format_to_fixed_precision(self, precision): """ Format 'self' to a given number of digits after the decimal point. Returns a triple (negative, digits, exp) where: - negative is a boolean, True for a negative number, else False - digits is a string giving the digits of the output - exp represents the exponent of the output The normalization of the exponent is such that <digits>E<exp> represents the decimal approximation to self. """
# MPFR only provides functions to format to a given number of # significant digits. So we must: # # (1) Identify an e such that 10**(e-1) <= abs(x) < 10**e. # # (2) Determine the number of significant digits required, and format # to that number of significant digits. # # (3) Adjust output if necessary if it's been rounded up to 10**e. # Zeros if is_zero(self): return is_negative(self), '0', -precision # Specials if is_inf(self): return is_negative(self), 'inf', None if is_nan(self): return is_negative(self), 'nan', None # Figure out the exponent by making a call to get_str2. exp satisfies # 10**(exp-1) <= self < 10**exp _, _, exp = _mpfr_get_str2( 10, 2, self, ROUND_TOWARD_ZERO, ) sig_figs = exp + precision if sig_figs < 0: sign = self._sign() return sign, '0', -precision elif sig_figs == 0: # Ex: 0.1 <= x < 1.0, rounding x to nearest multiple of 1.0. # Or: 100.0 <= x < 1000.0, rounding x to nearest multiple of 1000.0 sign, digits, new_exp = _mpfr_get_str2( 10, 2, self, ROUND_TOWARD_NEGATIVE, ) if int(digits) == 50: # Halfway case sign, digits, new_exp = _mpfr_get_str2( 10, 2, self, ROUND_TOWARD_POSITIVE, ) digits = '1' if int(digits) > 50 or new_exp == exp + 1 else '0' return sign, digits, -precision negative, digits, new_exp = self._format_to_floating_precision( sig_figs ) # It's possible that the rounding up involved changes the exponent; # in that case we have to adjust the digits accordingly. The only # possibility should be that new_exp == exp + 1. if new_exp + len(digits) != exp: assert new_exp + len(digits) == exp + 1 digits += '0' return negative, digits, -precision
<SYSTEM_TASK:> Implicit conversion used for binary operations, comparisons, <END_TASK> <USER_TASK:> Description: def _implicit_convert(cls, arg): """Implicit conversion used for binary operations, comparisons, functions, etc. Return value should be an instance of BigFloat."""
# ints, long and floats mix freely with BigFloats, and are # converted exactly. if isinstance(arg, six.integer_types) or isinstance(arg, float): return cls.exact(arg) elif isinstance(arg, BigFloat): return arg else: raise TypeError("Unable to convert argument %s of type %s " "to BigFloat" % (arg, type(arg)))
<SYSTEM_TASK:> Calculate the parents of a row of a merkle tree. <END_TASK> <USER_TASK:> Description: def calculate_merkle_pairs(bin_hashes, hash_function=bin_double_sha256): """ Calculate the parents of a row of a merkle tree. Takes in a list of binary hashes, returns a binary hash. The returned parents list is such that parents[i] == hash(bin_hashes[2*i] + bin_hashes[2*i+1]). """
hashes = list(bin_hashes) # if there are an odd number of hashes, double up the last one if len(hashes) % 2 == 1: hashes.append(hashes[-1]) new_hashes = [] for i in range(0, len(hashes), 2): new_hashes.append(hash_function(hashes[i] + hashes[i+1])) return new_hashes
<SYSTEM_TASK:> Verify a merkle path. The given path is the path from two leaf nodes to the root itself. <END_TASK> <USER_TASK:> Description: def verify_merkle_path(merkle_root_hex, serialized_path, leaf_hash_hex, hash_function=bin_double_sha256): """ Verify a merkle path. The given path is the path from two leaf nodes to the root itself. merkle_root_hex is a little-endian, hex-encoded hash. serialized_path is the serialized merkle path path_hex is a list of little-endian, hex-encoded hashes. Return True if the path is consistent with the merkle root. Return False if not. """
merkle_root = hex_to_bin_reversed(merkle_root_hex) leaf_hash = hex_to_bin_reversed(leaf_hash_hex) path = MerkleTree.path_deserialize(serialized_path) path = [{'order': p['order'], 'hash': hex_to_bin_reversed(p['hash'])} for p in path] if len(path) == 0: raise ValueError("Empty path") cur_hash = leaf_hash for i in range(0, len(path)): if path[i]['order'] == 'l': # left sibling cur_hash = hash_function(path[i]['hash'] + cur_hash) elif path[i]['order'] == 'r': # right sibling cur_hash = hash_function(cur_hash + path[i]['hash']) elif path[i]['order'] == 'm': # merkle root assert len(path) == 1 return cur_hash == path[i]['hash'] return cur_hash == merkle_root
<SYSTEM_TASK:> Converts from sky coordinates to pixel indices. <END_TASK> <USER_TASK:> Description: def _coords2idx(self, coords): """ Converts from sky coordinates to pixel indices. Args: coords (:obj:`astropy.coordinates.SkyCoord`): Sky coordinates. Returns: Pixel indices of the coordinates, with the same shape as the input coordinates. Pixels which are outside the map are given an index equal to the number of pixels in the map. """
x = self._coords2vec(coords) idx = self._kd.query(x, p=self._metric_p, distance_upper_bound=self._max_pix_scale) return idx[1]
<SYSTEM_TASK:> Converts from Galactic coordinates to pixel indices. <END_TASK> <USER_TASK:> Description: def _gal2idx(self, gal): """ Converts from Galactic coordinates to pixel indices. Args: gal (:obj:`astropy.coordinates.SkyCoord`): Galactic coordinates. Must store an array of coordinates (i.e., not be scalar). Returns: ``j, k, mask`` - Pixel indices of the coordinates, as well as a mask of in-bounds coordinates. Outputs have the same shape as the input coordinates. """
# Make sure that l is in domain [-180 deg, 180 deg) l = coordinates.Longitude(gal.l, wrap_angle=180.*units.deg) j = (self._inv_pix_scale * (l.deg - self._l_bounds[0])).astype('i4') k = (self._inv_pix_scale * (gal.b.deg - self._b_bounds[0])).astype('i4') idx = (j < 0) | (j >= self._shape[0]) | (k < 0) | (k >= self._shape[1]) if np.any(idx): j[idx] = -1 k[idx] = -1 return j, k, ~idx
<SYSTEM_TASK:> Append up to 2000 block hashes for which to get headers. <END_TASK> <USER_TASK:> Description: def add_block_hash( self, block_hash ): """ Append up to 2000 block hashes for which to get headers. """
if len(self.block_hashes) > 2000: raise Exception("A getheaders request cannot have over 2000 block hashes") hash_num = int("0x" + block_hash, 16) bh = BlockHash() bh.block_hash = hash_num self.block_hashes.append( bh ) self.hash_stop = hash_num
<SYSTEM_TASK:> Interact with the blockchain peer, <END_TASK> <USER_TASK:> Description: def run( self ): """ Interact with the blockchain peer, until we get a socket error or we exit the loop explicitly. Return True on success Raise on error """
self.handshake() try: self.loop() except socket.error, se: if self.finished: return True else: raise
<SYSTEM_TASK:> This method will handle the Ping message and then <END_TASK> <USER_TASK:> Description: def handle_ping(self, message_header, message): """ This method will handle the Ping message and then will answer every Ping message with a Pong message using the nonce received. :param message_header: The header of the Ping message :param message: The Ping message """
log.debug("handle ping") pong = Pong() pong.nonce = message.nonce log.debug("send pong") self.send_message(pong)
<SYSTEM_TASK:> Set up an SPV client. <END_TASK> <USER_TASK:> Description: def init(cls, path): """ Set up an SPV client. If the locally-stored headers do not exist, then create a stub headers file with the genesis block information. """
if not os.path.exists( path ): block_header_serializer = BlockHeaderSerializer() genesis_block_header = BlockHeader() if USE_MAINNET: # we know the mainnet block header # but we don't know the testnet/regtest block header genesis_block_header.version = 1 genesis_block_header.prev_block = 0 genesis_block_header.merkle_root = int(GENESIS_BLOCK_MERKLE_ROOT, 16 ) genesis_block_header.timestamp = 1231006505 genesis_block_header.bits = int( "1d00ffff", 16 ) genesis_block_header.nonce = 2083236893 genesis_block_header.txns_count = 0 with open(path, "wb") as f: bin_data = block_header_serializer.serialize( genesis_block_header ) f.write( bin_data )
<SYSTEM_TASK:> Get the locally-stored block height <END_TASK> <USER_TASK:> Description: def height(cls, path): """ Get the locally-stored block height """
if os.path.exists( path ): sb = os.stat( path ) h = (sb.st_size / BLOCK_HEADER_SIZE) - 1 return h else: return None
<SYSTEM_TASK:> Get a block header at a particular height from disk. <END_TASK> <USER_TASK:> Description: def read_header(cls, headers_path, block_height, allow_none=False): """ Get a block header at a particular height from disk. Return the header if found Return None if not. """
if os.path.exists(headers_path): header_parser = BlockHeaderSerializer() sb = os.stat( headers_path ) if sb.st_size < BLOCK_HEADER_SIZE * block_height: # beyond EOF if allow_none: return None else: raise Exception('EOF on block headers') with open( headers_path, "rb" ) as f: f.seek( block_height * BLOCK_HEADER_SIZE, os.SEEK_SET ) hdr = SPVClient.read_header_at( f ) return hdr else: if allow_none: return None else: raise Exception('No such file or directory: {}'.format(headers_path))
<SYSTEM_TASK:> Given the block's numeric ID, its hash, and the bitcoind-returned block_data, <END_TASK> <USER_TASK:> Description: def block_header_verify( cls, headers_path, block_id, block_hash, block_header ): """ Given the block's numeric ID, its hash, and the bitcoind-returned block_data, use the SPV header chain to verify the block's integrity. block_header must be a dict with the following structure: * version: protocol version (int) * prevhash: previous block hash (hex str) * merkleroot: block Merkle root (hex str) * timestamp: UNIX time stamp (int) * bits: difficulty bits (hex str) * nonce: PoW nonce (int) * hash: block hash (hex str) (i.e. the format that the reference bitcoind returns via JSON RPC) Return True on success Return False on error """
prev_header = cls.read_header( headers_path, block_id - 1 ) prev_hash = prev_header['hash'] return bits.block_header_verify( block_header, prev_hash, block_hash )
<SYSTEM_TASK:> Calculate the hash of a transction structure given by bitcoind <END_TASK> <USER_TASK:> Description: def tx_hash( cls, tx ): """ Calculate the hash of a transction structure given by bitcoind """
tx_hex = bits.btc_bitcoind_tx_serialize( tx ) tx_hash = hashing.bin_double_sha256(tx_hex.decode('hex'))[::-1].encode('hex') return tx_hash
<SYSTEM_TASK:> Given the block's verified block txids, verify that a transaction is legit. <END_TASK> <USER_TASK:> Description: def tx_verify( cls, verified_block_txids, tx ): """ Given the block's verified block txids, verify that a transaction is legit. @tx must be a dict with the following fields: * locktime: int * version: int * vin: list of dicts with: * vout: int, * hash: hex str * sequence: int (optional) * scriptSig: dict with: * hex: hex str * vout: list of dicts with: * value: float * scriptPubKey: dict with: * hex: hex str """
tx_hash = cls.tx_hash( tx ) return tx_hash in verified_block_txids
<SYSTEM_TASK:> Verify that a given chain of block headers <END_TASK> <USER_TASK:> Description: def verify_header_chain(cls, path, chain=None): """ Verify that a given chain of block headers has sufficient proof of work. """
if chain is None: chain = SPVClient.load_header_chain( path ) prev_header = chain[0] for i in xrange(1, len(chain)): header = chain[i] height = header.get('block_height') prev_hash = prev_header.get('hash') if prev_hash != header.get('prev_block_hash'): log.error("prev hash mismatch: %s vs %s" % (prev_hash, header.get('prev_block_hash'))) return False bits, target = SPVClient.get_target( path, height/BLOCK_DIFFICULTY_CHUNK_SIZE, chain) if bits != header.get('bits'): log.error("bits mismatch: %s vs %s" % (bits, header.get('bits'))) return False _hash = header.get('hash') if int('0x'+_hash, 16) > target: log.error("insufficient proof of work: %s vs target %s" % (int('0x'+_hash, 16), target)) return False prev_header = header return True
<SYSTEM_TASK:> Synchronize our local block headers up to the last block ID given. <END_TASK> <USER_TASK:> Description: def sync_header_chain(cls, path, bitcoind_server, last_block_id ): """ Synchronize our local block headers up to the last block ID given. @last_block_id is *inclusive* @bitcoind_server is host:port or just host """
current_block_id = SPVClient.height( path ) if current_block_id is None: assert USE_TESTNET current_block_id = -1 assert (current_block_id >= 0 and USE_MAINNET) or USE_TESTNET if current_block_id < last_block_id: if USE_MAINNET: log.debug("Synchronize %s to %s" % (current_block_id, last_block_id)) else: log.debug("Synchronize testnet %s to %s" % (current_block_id + 1, last_block_id )) # need to sync if current_block_id >= 0: prev_block_header = SPVClient.read_header( path, current_block_id ) prev_block_hash = prev_block_header['hash'] else: # can only happen when in testnet prev_block_hash = GENESIS_BLOCK_HASH_TESTNET # connect sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # timeout (10 min) sock.settimeout(600) bitcoind_port = 8333 if ":" in bitcoind_server: p = bitcoind_server.split(":") bitcoind_server = p[0] bitcoind_port = int(p[1]) log.debug("connect to %s:%s" % (bitcoind_server, bitcoind_port)) sock.connect( (bitcoind_server, bitcoind_port) ) client = BlockHeaderClient( sock, path, prev_block_hash, last_block_id ) # get headers client.run() # verify headers if SPVClient.height(path) < last_block_id: raise Exception("Did not receive all headers up to %s (only got %s)" % (last_block_id, SPVClient.height(path))) # defensive: make sure it's *exactly* that many blocks rc = SPVClient.verify_header_chain( path ) if not rc: raise Exception("Failed to verify headers (stored in '%s')" % path) log.debug("synced headers from %s to %s in %s" % (current_block_id, last_block_id, path)) return True
<SYSTEM_TASK:> Return IEEE 754-2008 context for a given bit width. <END_TASK> <USER_TASK:> Description: def IEEEContext(bitwidth): """ Return IEEE 754-2008 context for a given bit width. The IEEE 754 standard specifies binary interchange formats with bitwidths 16, 32, 64, 128, and all multiples of 32 greater than 128. This function returns the context corresponding to the interchange format for the given bitwidth. See section 3.6 of IEEE 754-2008 or the bigfloat source for more details. """
try: precision = {16: 11, 32: 24, 64: 53, 128: 113}[bitwidth] except KeyError: if not (bitwidth >= 128 and bitwidth % 32 == 0): raise ValueError("nonstandard bitwidth: bitwidth should be " "16, 32, 64, 128, or k*32 for some k >= 4") # The formula for the precision involves rounding 4*log2(width) to the # nearest integer. We have: # # round(4*log2(width)) == round(log2(width**8)/2) # == floor((log2(width**8) + 1)/2) # == (width**8).bit_length() // 2 # # (Note that 8*log2(width) can never be an odd integer, so we # don't care which way half-way cases round in the 'round' # operation.) precision = bitwidth - _bit_length(bitwidth ** 8) // 2 + 13 emax = 1 << bitwidth - precision - 1 return Context( precision=precision, emin=4 - emax - precision, emax=emax, subnormalize=True, )
<SYSTEM_TASK:> View your molecule or list of molecules. <END_TASK> <USER_TASK:> Description: def view(molecule, viewer=settings['defaults']['viewer'], use_curr_dir=False): """View your molecule or list of molecules. .. note:: This function writes a temporary file and opens it with an external viewer. If you modify your molecule afterwards you have to recall view in order to see the changes. Args: molecule: Can be a cartesian, or a list of cartesians. viewer (str): The external viewer to use. The default is specified in settings.viewer use_curr_dir (bool): If True, the temporary file is written to the current diretory. Otherwise it gets written to the OS dependendent temporary directory. Returns: None: """
try: molecule.view(viewer=viewer, use_curr_dir=use_curr_dir) except AttributeError: if pd.api.types.is_list_like(molecule): cartesian_list = molecule else: raise ValueError('Argument is neither list nor Cartesian.') if use_curr_dir: TEMP_DIR = os.path.curdir else: TEMP_DIR = tempfile.gettempdir() def give_filename(i): filename = 'ChemCoord_list_' + str(i) + '.molden' return os.path.join(TEMP_DIR, filename) i = 1 while os.path.exists(give_filename(i)): i = i + 1 to_molden(cartesian_list, buf=give_filename(i)) def open_file(i): """Open file and close after being finished.""" try: subprocess.check_call([viewer, give_filename(i)]) except (subprocess.CalledProcessError, FileNotFoundError): raise finally: if use_curr_dir: pass else: os.remove(give_filename(i)) Thread(target=open_file, args=(i,)).start()
<SYSTEM_TASK:> Write a list of Cartesians into a molden file. <END_TASK> <USER_TASK:> Description: def to_molden(cartesian_list, buf=None, sort_index=True, overwrite=True, float_format='{:.6f}'.format): """Write a list of Cartesians into a molden file. .. note:: Since it permamently writes a file, this function is strictly speaking **not sideeffect free**. The list to be written is of course not changed. Args: cartesian_list (list): buf (str): StringIO-like, optional buffer to write to sort_index (bool): If sort_index is true, the Cartesian is sorted by the index before writing. overwrite (bool): May overwrite existing files. float_format (one-parameter function): Formatter function to apply to column’s elements if they are floats. The result of this function must be a unicode string. Returns: formatted : string (or unicode, depending on data and options) """
if sort_index: cartesian_list = [molecule.sort_index() for molecule in cartesian_list] give_header = ("[MOLDEN FORMAT]\n" + "[N_GEO]\n" + str(len(cartesian_list)) + "\n" + '[GEOCONV]\n' + 'energy\n{energy}' + 'max-force\n{max_force}' + 'rms-force\n{rms_force}' + '[GEOMETRIES] (XYZ)\n').format values = len(cartesian_list) * '1\n' energy = [str(m.metadata.get('energy', 1)) for m in cartesian_list] energy = '\n'.join(energy) + '\n' header = give_header(energy=energy, max_force=values, rms_force=values) coordinates = [x.to_xyz(sort_index=sort_index, float_format=float_format) for x in cartesian_list] output = header + '\n'.join(coordinates) if buf is not None: if overwrite: with open(buf, mode='w') as f: f.write(output) else: with open(buf, mode='x') as f: f.write(output) else: return output
<SYSTEM_TASK:> Read a molden file. <END_TASK> <USER_TASK:> Description: def read_molden(inputfile, start_index=0, get_bonds=True): """Read a molden file. Args: inputfile (str): start_index (int): Returns: list: A list containing :class:`~chemcoord.Cartesian` is returned. """
from chemcoord.cartesian_coordinates.cartesian_class_main import Cartesian with open(inputfile, 'r') as f: found = False while not found: line = f.readline() if '[N_GEO]' in line: found = True number_of_molecules = int(f.readline().strip()) energies = [] found = False while not found: line = f.readline() if 'energy' in line: found = True for _ in range(number_of_molecules): energies.append(float(f.readline().strip())) found = False while not found: line = f.readline() if '[GEOMETRIES] (XYZ)' in line: found = True current_line = f.tell() number_of_atoms = int(f.readline().strip()) f.seek(current_line) cartesians = [] for energy in energies: cartesian = Cartesian.read_xyz( f, start_index=start_index, get_bonds=get_bonds, nrows=number_of_atoms, engine='python') cartesian.metadata['energy'] = energy cartesians.append(cartesian) return cartesians
<SYSTEM_TASK:> Join list of cartesians into one molecule. <END_TASK> <USER_TASK:> Description: def concat(cartesians, ignore_index=False, keys=None): """Join list of cartesians into one molecule. Wrapper around the :func:`pandas.concat` function. Default values are the same as in the pandas function except for ``verify_integrity`` which is set to true in case of this library. Args: ignore_index (sequence, bool, int): If it is a boolean, it behaves like in the description of :meth:`pandas.DataFrame.append`. If it is a sequence, it becomes the new index. If it is an integer, ``range(ignore_index, ignore_index + len(new))`` becomes the new index. keys (sequence): If multiple levels passed, should contain tuples. Construct hierarchical index using the passed keys as the outermost level Returns: Cartesian: """
frames = [molecule._frame for molecule in cartesians] new = pd.concat(frames, ignore_index=ignore_index, keys=keys, verify_integrity=True) if type(ignore_index) is bool: new = pd.concat(frames, ignore_index=ignore_index, keys=keys, verify_integrity=True) else: new = pd.concat(frames, ignore_index=True, keys=keys, verify_integrity=True) if type(ignore_index) is int: new.index = range(ignore_index, ignore_index + len(new)) else: new.index = ignore_index return cartesians[0].__class__(new)
<SYSTEM_TASK:> Matrix multiplication between A and B <END_TASK> <USER_TASK:> Description: def dot(A, B): """Matrix multiplication between A and B This function is equivalent to ``A @ B``, which is unfortunately not possible under python 2.x. Args: A (sequence): B (sequence): Returns: sequence: """
try: result = A.__matmul__(B) if result is NotImplemented: result = B.__rmatmul__(A) except AttributeError: result = B.__rmatmul__(A) return result
<SYSTEM_TASK:> Orthonormalizes righthandedly a given 3D basis. <END_TASK> <USER_TASK:> Description: def orthonormalize_righthanded(basis): """Orthonormalizes righthandedly a given 3D basis. This functions returns a right handed orthonormalize_righthandedd basis. Since only the first two vectors in the basis are used, it does not matter if you give two or three vectors. Right handed means, that: .. math:: \\vec{e_1} \\times \\vec{e_2} &= \\vec{e_3} \\\\ \\vec{e_2} \\times \\vec{e_3} &= \\vec{e_1} \\\\ \\vec{e_3} \\times \\vec{e_1} &= \\vec{e_2} \\\\ Args: basis (np.array): An array of shape = (3,2) or (3,3) Returns: new_basis (np.array): A right handed orthonormalized basis. """
v1, v2 = basis[:, 0], basis[:, 1] e1 = normalize(v1) e3 = normalize(np.cross(e1, v2)) e2 = normalize(np.cross(e3, e1)) return np.array([e1, e2, e3]).T
<SYSTEM_TASK:> Calculate the optimal rotation from ``P`` unto ``Q``. <END_TASK> <USER_TASK:> Description: def get_kabsch_rotation(Q, P): """Calculate the optimal rotation from ``P`` unto ``Q``. Using the Kabsch algorithm the optimal rotation matrix for the rotation of ``other`` unto ``self`` is calculated. The algorithm is described very well in `wikipedia <http://en.wikipedia.org/wiki/Kabsch_algorithm>`_. Args: other (Cartesian): Returns: :class:`~numpy.array`: Rotation matrix """
# Naming of variables follows the wikipedia article: # http://en.wikipedia.org/wiki/Kabsch_algorithm A = np.dot(np.transpose(P), Q) # One can't initialize an array over its transposed V, S, W = np.linalg.svd(A) # pylint:disable=unused-variable W = W.T d = np.linalg.det(np.dot(W, V.T)) return np.linalg.multi_dot((W, np.diag([1., 1., d]), V.T))
<SYSTEM_TASK:> Apply the gradient for transformation to Zmatrix space onto cart_dist. <END_TASK> <USER_TASK:> Description: def apply_grad_zmat_tensor(grad_C, construction_table, cart_dist): """Apply the gradient for transformation to Zmatrix space onto cart_dist. Args: grad_C (:class:`numpy.ndarray`): A ``(3, n, n, 3)`` array. The mathematical details of the index layout is explained in :meth:`~chemcoord.Cartesian.get_grad_zmat()`. construction_table (pandas.DataFrame): Explained in :meth:`~chemcoord.Cartesian.get_construction_table()`. cart_dist (:class:`~chemcoord.Cartesian`): Distortions in cartesian space. Returns: :class:`Zmat`: Distortions in Zmatrix space. """
if (construction_table.index != cart_dist.index).any(): message = "construction_table and cart_dist must use the same index" raise ValueError(message) X_dist = cart_dist.loc[:, ['x', 'y', 'z']].values.T C_dist = np.tensordot(grad_C, X_dist, axes=([3, 2], [0, 1])).T if C_dist.dtype == np.dtype('i8'): C_dist = C_dist.astype('f8') try: C_dist[:, [1, 2]] = np.rad2deg(C_dist[:, [1, 2]]) except AttributeError: C_dist[:, [1, 2]] = sympy.deg(C_dist[:, [1, 2]]) from chemcoord.internal_coordinates.zmat_class_main import Zmat cols = ['atom', 'b', 'bond', 'a', 'angle', 'd', 'dihedral'] dtypes = ['O', 'i8', 'f8', 'i8', 'f8', 'i8', 'f8'] new = pd.DataFrame(data=np.zeros((len(construction_table), 7)), index=cart_dist.index, columns=cols, dtype='f8') new = new.astype(dict(zip(cols, dtypes))) new.loc[:, ['b', 'a', 'd']] = construction_table new.loc[:, 'atom'] = cart_dist.loc[:, 'atom'] new.loc[:, ['bond', 'angle', 'dihedral']] = C_dist return Zmat(new, _metadata={'last_valid_cartesian': cart_dist})
<SYSTEM_TASK:> Remove the item at the given position in the list, and return it. <END_TASK> <USER_TASK:> Description: def pop(self, i=None): """Remove the item at the given position in the list, and return it. If no index is specified, removes and returns the last item in the list."""
if i is None: i = len(self) - 1 val = self[i] del(self[i]) return val
<SYSTEM_TASK:> Given set of equivalences, return map of transitive equivalence classes. <END_TASK> <USER_TASK:> Description: def get_scc_from_tuples(constraints): """Given set of equivalences, return map of transitive equivalence classes. >> constraints = [(1,2), (2,3)] >> get_scc_from_tuples(constraints) { 1: (1, 2, 3), 2: (1, 2, 3), 3: (1, 2, 3), } """
classes = unionfind.classes(constraints) return dict((x, tuple(c)) for x, c in classes.iteritems())
<SYSTEM_TASK:> Parse a list of field names, possibly including dot-separated subform <END_TASK> <USER_TASK:> Description: def _parse_field_list(fieldnames, include_parents=False): """ Parse a list of field names, possibly including dot-separated subform fields, into an internal ParsedFieldList object representing the base fields and subform listed. :param fieldnames: a list of field names as strings. dot-separated names are interpreted as subform fields. :param include_parents: optional boolean, defaults to False. if True, subform fields implicitly include their parent fields in the parsed list. """
field_parts = (name.split('.') for name in fieldnames) return _collect_fields(field_parts, include_parents)
<SYSTEM_TASK:> Generate a dictionary based on the data in an XmlObject instance to pass as <END_TASK> <USER_TASK:> Description: def xmlobject_to_dict(instance, fields=None, exclude=None, prefix=''): """ Generate a dictionary based on the data in an XmlObject instance to pass as a Form's ``initial`` keyword argument. :param instance: instance of :class:`~eulxml.xmlmap.XmlObject` :param fields: optional list of fields - if specified, only the named fields will be included in the data returned :param exclude: optional list of fields to exclude from the data """
data = {} # convert prefix to combining form for convenience if prefix: prefix = '%s-' % prefix else: prefix = '' for name, field in six.iteritems(instance._fields): # not editable? if fields and not name in fields: continue if exclude and name in exclude: continue if isinstance(field, xmlmap.fields.NodeField): nodefield = getattr(instance, name) if nodefield is not None: subprefix = '%s%s' % (prefix, name) node_data = xmlobject_to_dict(nodefield, prefix=subprefix) data.update(node_data) # FIXME: fields/exclude if isinstance(field, xmlmap.fields.NodeListField): for i, child in enumerate(getattr(instance, name)): subprefix = '%s%s-%d' % (prefix, name, i) node_data = xmlobject_to_dict(child, prefix=subprefix) data.update(node_data) # FIXME: fields/exclude else: data[prefix + name] = getattr(instance, name) return data
<SYSTEM_TASK:> Save bound form data into the XmlObject model instance and return the <END_TASK> <USER_TASK:> Description: def update_instance(self): """Save bound form data into the XmlObject model instance and return the updated instance."""
# NOTE: django model form has a save method - not applicable here, # since an XmlObject by itself is not expected to have a save method # (only likely to be saved in context of a fedora or exist object) if hasattr(self, 'cleaned_data'): # possible to have an empty object/no data opts = self._meta # NOTE: _fields doesn't seem to order, which is # problematic for some xml (e.g., where order matters for validity) # use field order as declared in the form for update order # when possible. # (NOTE: this could be problematic also, since display order may # not always be the same as schema order) fields_in_order = [] if hasattr(self.Meta, 'fields'): fields_in_order.extend(self.Meta.fields) fields_in_order.extend([name for name in six.iterkeys(self.instance._fields) if name in self.Meta.fields]) else: fields_in_order = self.instance._fields.keys() for name in fields_in_order: # for name in self.instance._fields.iterkeys(): # for name in self.declared_fields.iterkeys(): if opts.fields and name not in opts.parsed_fields.fields: continue if opts.exclude and name in opts.parsed_exclude.fields: continue if name in self.cleaned_data: # special case: we don't want empty attributes and elements # for fields which returned no data from the form # converting '' to None and letting XmlObject handle if self.cleaned_data[name] == '': self.cleaned_data[name] = None setattr(self.instance, name, self.cleaned_data[name]) # update sub-model portions via any subforms for name, subform in six.iteritems(self.subforms): self._update_subinstance(name, subform) for formset in six.itervalues(self.formsets): formset.update_instance() return self.instance
<SYSTEM_TASK:> Save bound data for a single subform into the XmlObject model <END_TASK> <USER_TASK:> Description: def _update_subinstance(self, name, subform): """Save bound data for a single subform into the XmlObject model instance."""
old_subinstance = getattr(self.instance, name) new_subinstance = subform.update_instance() # if our instance previously had no node for the subform AND the # updated one has data, then attach the new node. if old_subinstance is None and not new_subinstance.is_empty(): setattr(self.instance, name, new_subinstance) # on the other hand, if the instance previously had a node for the # subform AND the updated one is empty, then remove the node. if old_subinstance is not None and new_subinstance.is_empty(): delattr(self.instance, name)
<SYSTEM_TASK:> Creates an RPC client to a bitcoind instance. <END_TASK> <USER_TASK:> Description: def create_bitcoind_connection( rpc_username, rpc_password, server, port, use_https, timeout ): """ Creates an RPC client to a bitcoind instance. It will have ".opts" defined as a member, which will be a dict that stores the above connection options. """
from .bitcoin_blockchain import AuthServiceProxy global do_wrap_socket, create_ssl_authproxy log.debug("[%s] Connect to bitcoind at %s://%s@%s:%s, timeout=%s" % (os.getpid(), 'https' if use_https else 'http', rpc_username, server, port, timeout) ) protocol = 'https' if use_https else 'http' if not server or len(server) < 1: raise Exception('Invalid bitcoind host address.') if not port or not is_valid_int(port): raise Exception('Invalid bitcoind port number.') authproxy_config_uri = '%s://%s:%s@%s:%s' % (protocol, rpc_username, rpc_password, server, port) if use_https: # TODO: ship with a cert if do_wrap_socket: # ssl._create_unverified_context and ssl.create_default_context are not supported. # wrap the socket directly connection = BitcoindConnection( server, int(port), timeout=timeout ) ret = AuthServiceProxy(authproxy_config_uri, connection=connection) elif create_ssl_authproxy: # ssl has _create_unverified_context, so we're good to go ret = AuthServiceProxy(authproxy_config_uri, timeout=timeout) else: # have to set up an unverified context ourselves ssl_ctx = ssl.create_default_context() ssl_ctx.check_hostname = False ssl_ctx.verify_mode = ssl.CERT_NONE connection = httplib.HTTPSConnection( server, int(port), context=ssl_ctx, timeout=timeout ) ret = AuthServiceProxy(authproxy_config_uri, connection=connection) else: ret = AuthServiceProxy(authproxy_config_uri) # remember the options bitcoind_opts = { "bitcoind_user": rpc_username, "bitcoind_passwd": rpc_password, "bitcoind_server": server, "bitcoind_port": port, "bitcoind_use_https": use_https, "bitcoind_timeout": timeout } setattr( ret, "opts", bitcoind_opts ) return ret
<SYSTEM_TASK:> Create a connection to bitcoind, using a dict of config options. <END_TASK> <USER_TASK:> Description: def connect_bitcoind_impl( bitcoind_opts ): """ Create a connection to bitcoind, using a dict of config options. """
if 'bitcoind_port' not in bitcoind_opts.keys() or bitcoind_opts['bitcoind_port'] is None: log.error("No port given") raise ValueError("No RPC port given (bitcoind_port)") if 'bitcoind_timeout' not in bitcoind_opts.keys() or bitcoind_opts['bitcoind_timeout'] is None: # default bitcoind_opts['bitcoind_timeout'] = 300 try: int(bitcoind_opts['bitcoind_port']) except: log.error("Not an int: '%s'" % bitcoind_opts.get('bitcoind_port')) raise try: float(bitcoind_opts.get('bitcoind_timeout', 300)) except: log.error("Not a float: '%s'" % bitcoind_opts.get('bitcoind_timeout', 300)) raise return create_bitcoind_connection( bitcoind_opts['bitcoind_user'], bitcoind_opts['bitcoind_passwd'], \ bitcoind_opts['bitcoind_server'], int(bitcoind_opts['bitcoind_port']), \ bitcoind_opts.get('bitcoind_use_https', False), float(bitcoind_opts.get('bitcoind_timeout', 300)) )
<SYSTEM_TASK:> Connect to bitcoind <END_TASK> <USER_TASK:> Description: def get_bitcoind_client(config_path=None, bitcoind_opts=None): """ Connect to bitcoind """
if bitcoind_opts is None and config_path is None: raise ValueError("Need bitcoind opts or config path") bitcoind_opts = get_bitcoind_config(config_file=config_path) log.debug("Connect to bitcoind at %s:%s (%s)" % (bitcoind_opts['bitcoind_server'], bitcoind_opts['bitcoind_port'], config_path)) client = connect_bitcoind_impl( bitcoind_opts ) return client
<SYSTEM_TASK:> Make sure the private key given is compressed or not compressed <END_TASK> <USER_TASK:> Description: def set_privkey_compressed(privkey, compressed=True): """ Make sure the private key given is compressed or not compressed """
if len(privkey) != 64 and len(privkey) != 66: raise ValueError("expected 32-byte private key as a hex string") # compressed? if compressed and len(privkey) == 64: privkey += '01' if not compressed and len(privkey) == 66: if privkey[-2:] != '01': raise ValueError("private key does not end in '01'") privkey = privkey[:-2] return privkey
<SYSTEM_TASK:> Get the uncompressed hex form of a private key <END_TASK> <USER_TASK:> Description: def get_pubkey_hex( privatekey_hex ): """ Get the uncompressed hex form of a private key """
if not isinstance(privatekey_hex, (str, unicode)): raise ValueError("private key is not a hex string but {}".format(str(type(privatekey_hex)))) # remove 'compressed' hint if len(privatekey_hex) > 64: if privatekey_hex[-2:] != '01': raise ValueError("private key does not end in 01") privatekey_hex = privatekey_hex[:64] # get hex public key privatekey_int = int(privatekey_hex, 16) privk = ec.derive_private_key(privatekey_int, ec.SECP256K1(), default_backend()) pubk = privk.public_key() x = pubk.public_numbers().x y = pubk.public_numbers().y pubkey_hex = "04{:064x}{:064x}".format(x, y) return pubkey_hex
<SYSTEM_TASK:> Decode a private key for ecdsa signature <END_TASK> <USER_TASK:> Description: def decode_privkey_hex(privkey_hex): """ Decode a private key for ecdsa signature """
if not isinstance(privkey_hex, (str, unicode)): raise ValueError("private key is not a string") # force uncompressed priv = str(privkey_hex) if len(priv) > 64: if priv[-2:] != '01': raise ValueError("private key does not end in '01'") priv = priv[:64] pk_i = int(priv, 16) return pk_i
<SYSTEM_TASK:> Decode a public key for ecdsa verification <END_TASK> <USER_TASK:> Description: def decode_pubkey_hex(pubkey_hex): """ Decode a public key for ecdsa verification """
if not isinstance(pubkey_hex, (str, unicode)): raise ValueError("public key is not a string") pubk = keylib.key_formatting.decompress(str(pubkey_hex)) assert len(pubk) == 130 pubk_raw = pubk[2:] pubk_i = (int(pubk_raw[:64], 16), int(pubk_raw[64:], 16)) return pubk_i
<SYSTEM_TASK:> Decode a signature into r, s <END_TASK> <USER_TASK:> Description: def decode_signature(sigb64): """ Decode a signature into r, s """
sig_bin = base64.b64decode(sigb64) if len(sig_bin) != 64: raise ValueError("Invalid base64 signature") sig_hex = sig_bin.encode('hex') sig_r = int(sig_hex[:64], 16) sig_s = int(sig_hex[64:], 16) return sig_r, sig_s
<SYSTEM_TASK:> Sign a string of data. <END_TASK> <USER_TASK:> Description: def sign_raw_data(raw_data, privatekey_hex): """ Sign a string of data. Returns signature as a base64 string """
if not isinstance(raw_data, (str, unicode)): raise ValueError("Data is not a string") raw_data = str(raw_data) si = ECSigner(privatekey_hex) si.update(raw_data) return si.finalize()
<SYSTEM_TASK:> Verify the signature over a string, given the public key <END_TASK> <USER_TASK:> Description: def verify_raw_data(raw_data, pubkey_hex, sigb64): """ Verify the signature over a string, given the public key and base64-encode signature. Return True on success. Return False on error. """
if not isinstance(raw_data, (str, unicode)): raise ValueError("data is not a string") raw_data = str(raw_data) vi = ECVerifier(pubkey_hex, sigb64) vi.update(raw_data) return vi.verify()
<SYSTEM_TASK:> Given a digest and a private key, sign it. <END_TASK> <USER_TASK:> Description: def sign_digest(hash_hex, privkey_hex, hashfunc=hashlib.sha256): """ Given a digest and a private key, sign it. Return the base64-encoded signature """
if not isinstance(hash_hex, (str, unicode)): raise ValueError("hash hex is not a string") hash_hex = str(hash_hex) pk_i = decode_privkey_hex(privkey_hex) privk = ec.derive_private_key(pk_i, ec.SECP256K1(), default_backend()) sig = privk.sign(hash_hex.decode('hex'), ec.ECDSA(utils.Prehashed(hashes.SHA256()))) sig_r, sig_s = decode_dss_signature(sig) sigb64 = encode_signature(sig_r, sig_s) return sigb64
<SYSTEM_TASK:> Get the base64-encoded signature itself. <END_TASK> <USER_TASK:> Description: def finalize(self): """ Get the base64-encoded signature itself. Can only be called once. """
signature = self.signer.finalize() sig_r, sig_s = decode_dss_signature(signature) sig_b64 = encode_signature(sig_r, sig_s) return sig_b64
<SYSTEM_TASK:> Update the hash used to generate the signature <END_TASK> <USER_TASK:> Description: def update(self, data): """ Update the hash used to generate the signature """
try: self.verifier.update(data) except TypeError: log.error("Invalid data: {} ({})".format(type(data), data)) raise
<SYSTEM_TASK:> Semiconvergents of continued fraction expansion of a Fraction x. <END_TASK> <USER_TASK:> Description: def semiconvergents(x): """Semiconvergents of continued fraction expansion of a Fraction x."""
(q, n), d = divmod(x.numerator, x.denominator), x.denominator yield Fraction(q) p0, q0, p1, q1 = 1, 0, q, 1 while n: (q, n), d = divmod(d, n), n for _ in range(q): p0, q0 = p0+p1, q0+q1 yield Fraction(p0, q0) p0, q0, p1, q1 = p1, q1, p0, q0
<SYSTEM_TASK:> Replace values given in 'to_replace' with 'value'. <END_TASK> <USER_TASK:> Description: def replace(self, to_replace=None, value=None, inplace=False, limit=None, regex=False, method='pad', axis=None): """Replace values given in 'to_replace' with 'value'. Wrapper around the :meth:`pandas.DataFrame.replace` method. """
if inplace: self._frame.replace(to_replace=to_replace, value=value, inplace=inplace, limit=limit, regex=regex, method=method, axis=axis) else: new = self.__class__(self._frame.replace( to_replace=to_replace, value=value, inplace=inplace, limit=limit, regex=regex, method=method, axis=axis)) new.metadata = self.metadata.copy() new._metadata = copy.deepcopy(self._metadata) return new
<SYSTEM_TASK:> Append rows of `other` to the end of this frame, returning a new object. <END_TASK> <USER_TASK:> Description: def append(self, other, ignore_index=False): """Append rows of `other` to the end of this frame, returning a new object. Wrapper around the :meth:`pandas.DataFrame.append` method. Args: other (Cartesian): ignore_index (sequence, bool, int): If it is a boolean, it behaves like in the description of :meth:`pandas.DataFrame.append`. If it is a sequence, it becomes the new index. If it is an integer, ``range(ignore_index, ignore_index + len(new))`` becomes the new index. Returns: Cartesian: """
if not isinstance(other, self.__class__): raise ValueError('May only append instances of same type.') if type(ignore_index) is bool: new_frame = self._frame.append(other._frame, ignore_index=ignore_index, verify_integrity=True) else: new_frame = self._frame.append(other._frame, ignore_index=True, verify_integrity=True) if type(ignore_index) is int: new_frame.index = range(ignore_index, ignore_index + len(new_frame)) else: new_frame.index = ignore_index return self.__class__(new_frame)
<SYSTEM_TASK:> With this decorator, you can return ORM model instance, or ORM query in view function directly. <END_TASK> <USER_TASK:> Description: def marshal_with_model(model, excludes=None, only=None, extends=None): """With this decorator, you can return ORM model instance, or ORM query in view function directly. We'll transform these objects to standard python data structures, like Flask-RESTFul's `marshal_with` decorator. And, you don't need define fields at all. You can specific columns to be returned, by `excludes` or `only` parameter. (Don't use these tow parameters at the same time, otherwise only `excludes` parameter will be used.) If you want return fields that outside of model, or overwrite the type of some fields, use `extends` parameter to specify them. Notice: this function only support `Flask-SQLAlchemy` Example: class Student(db.Model): id = Column(Integer, primary_key=True) name = Column(String(100)) age = Column(Integer) class SomeApi(Resource): @marshal_with_model(Student, excludes=['id']) def get(self): return Student.query # response: [{"name": "student_a", "age": "16"}, {"name": "student_b", "age": 18}] class AnotherApi(Resource): @marshal_with_model(Student, extends={"nice_guy": fields.Boolean, "age": fields.String}) def get(self): student = Student.query.get(1) student.nice_guy = True student.age = "young" if student.age < 18 else "old" # transform int field to string return student """
if isinstance(excludes, six.string_types): excludes = [excludes] if excludes and only: only = None elif isinstance(only, six.string_types): only = [only] field_definition = {} for col in model.__table__.columns: if only: if col.name not in only: continue elif excludes and col.name in excludes: continue field_definition[col.name] = _type_map[col.type.python_type.__name__] if extends is not None: for k, v in extends.items(): field_definition[k] = v def decorated(f): @wraps(f) @_marshal_with(field_definition) def wrapper(*args, **kwargs): result = f(*args, **kwargs) return result if not _fields.is_indexable_but_not_string(result) else [v for v in result] return wrapper return decorated
<SYSTEM_TASK:> In some case, one view functions may return different model in different situation. <END_TASK> <USER_TASK:> Description: def quick_marshal(*args, **kwargs): """In some case, one view functions may return different model in different situation. Use `marshal_with_model` to handle this situation was tedious. This function can simplify this process. Usage: quick_marshal(args_to_marshal_with_model)(db_instance_or_query) """
@marshal_with_model(*args, **kwargs) def fn(value): return value return fn
<SYSTEM_TASK:> Improve Flask-RESTFul's original field type <END_TASK> <USER_TASK:> Description: def _wrap_field(field): """Improve Flask-RESTFul's original field type"""
class WrappedField(field): def output(self, key, obj): value = _fields.get_value(key if self.attribute is None else self.attribute, obj) # For all fields, when its value was null (None), return null directly, # instead of return its default value (eg. int type's default value was 0) # Because sometimes the client **needs** to know, was a field of the model empty, to decide its behavior. return None if value is None else self.format(value) return WrappedField
<SYSTEM_TASK:> Create a configuration file. <END_TASK> <USER_TASK:> Description: def write_configuration_file(filepath=_give_default_file_path(), overwrite=False): """Create a configuration file. Writes the current state of settings into a configuration file. .. note:: Since a file is permamently written, this function is strictly speaking not sideeffect free. Args: filepath (str): Where to write the file. The default is under both UNIX and Windows ``~/.chemcoordrc``. overwrite (bool): Returns: None: """
config = configparser.ConfigParser() config.read_dict(settings) if os.path.isfile(filepath) and not overwrite: try: raise FileExistsError except NameError: # because of python2 warn('File exists already and overwrite is False (default).') else: with open(filepath, 'w') as configfile: config.write(configfile)
<SYSTEM_TASK:> Check if this component has subseries or not. <END_TASK> <USER_TASK:> Description: def hasSubseries(self): """Check if this component has subseries or not. Determined based on level of first subcomponent (series or subseries) or if first component has subcomponents present. :rtype: boolean """
if self.c and self.c[0] and ((self.c[0].level in ('series', 'subseries')) or (self.c[0].c and self.c[0].c[0])): return True else: return False
<SYSTEM_TASK:> Sample a latent state from prior. <END_TASK> <USER_TASK:> Description: def initialize( self, M_c, M_r, T, seed, initialization=b'from_the_prior', row_initialization=-1, n_chains=1, ROW_CRP_ALPHA_GRID=(), COLUMN_CRP_ALPHA_GRID=(), S_GRID=(), MU_GRID=(), N_GRID=31,): """Sample a latent state from prior. T, list of lists: The data table in mapped representation (all floats, generated by data_utils.read_data_objects) :returns: X_L, X_D -- the latent state """
# FIXME: why is M_r passed? arg_tuples = self.get_initialize_arg_tuples( M_c, M_r, T, initialization, row_initialization, n_chains, ROW_CRP_ALPHA_GRID, COLUMN_CRP_ALPHA_GRID, S_GRID, MU_GRID, N_GRID, make_get_next_seed(seed),) chain_tuples = self.mapper(self.do_initialize, arg_tuples) X_L_list, X_D_list = zip(*chain_tuples) if n_chains == 1: X_L_list, X_D_list = X_L_list[0], X_D_list[0] return X_L_list, X_D_list
<SYSTEM_TASK:> Evolve the latent state by running MCMC transition kernels. <END_TASK> <USER_TASK:> Description: def analyze(self, M_c, T, X_L, X_D, seed, kernel_list=(), n_steps=1, c=(), r=(), max_iterations=-1, max_time=-1, do_diagnostics=False, diagnostics_every_N=1, ROW_CRP_ALPHA_GRID=(), COLUMN_CRP_ALPHA_GRID=(), S_GRID=(), MU_GRID=(), N_GRID=31, do_timing=False, CT_KERNEL=0, progress=None, ): """Evolve the latent state by running MCMC transition kernels. :param seed: The random seed :type seed: int :param M_c: The column metadata :type M_c: dict :param T: The data table in mapped representation (all floats, generated by data_utils.read_data_objects) :param X_L: the latent variables associated with the latent state :type X_L: dict :param X_D: the particular cluster assignments of each row in each view :type X_D: list of lists :param kernel_list: names of the MCMC transition kernels to run :type kernel_list: list of strings :param n_steps: the number of times to run each MCMC transition kernel :type n_steps: int :param c: the (global) column indices to run MCMC transition kernels on :type c: list of ints :param r: the (global) row indices to run MCMC transition kernels on :type r: list of ints :param max_iterations: the maximum number of times ot run each MCMC transition kernel. Applicable only if max_time != -1. :type max_iterations: int :param max_time: the maximum amount of time (seconds) to run MCMC transition kernels for before stopping to return progress :type max_time: float :param progress: a function accepting (n_steps, max_time, step_idx, elapsed_secs, end=None) where `n_steps` is the total number of transition steps, `max_time` is the timeout in secods, `step_idx` is number of transitions so far, `elapsed_secs` is the amount of time so far, and `end=None` is an optional kwarg for indicating the analysis has been completed. For example, `progress` may be used to print a progress bar to standard out. :type progress: function pointer. :returns: X_L, X_D -- the evolved latent state """
if n_steps <= 0: raise ValueError("You must do at least one analyze step.") if CT_KERNEL not in [0, 1]: raise ValueError("CT_KERNEL must be 0 (Gibbs) or 1 (MH)") if do_timing: # Diagnostics and timing are exclusive. do_diagnostics = False diagnostic_func_dict, reprocess_diagnostics_func = \ do_diagnostics_to_func_dict(do_diagnostics) X_L_list, X_D_list, was_multistate = su.ensure_multistate(X_L, X_D) arg_tuples = self.get_analyze_arg_tuples( M_c, T, X_L_list, X_D_list, kernel_list, n_steps, c, r, max_iterations, max_time, diagnostic_func_dict, diagnostics_every_N, ROW_CRP_ALPHA_GRID, COLUMN_CRP_ALPHA_GRID, S_GRID, MU_GRID, N_GRID, do_timing, CT_KERNEL, progress, make_get_next_seed(seed)) chain_tuples = self.mapper(self.do_analyze, arg_tuples) X_L_list, X_D_list, diagnostics_dict_list = zip(*chain_tuples) if do_timing: timing_list = diagnostics_dict_list if not was_multistate: X_L_list, X_D_list = X_L_list[0], X_D_list[0] ret_tuple = X_L_list, X_D_list if diagnostic_func_dict is not None: diagnostics_dict = munge_diagnostics(diagnostics_dict_list) if reprocess_diagnostics_func is not None: diagnostics_dict = reprocess_diagnostics_func(diagnostics_dict) ret_tuple = ret_tuple + (diagnostics_dict, ) if do_timing: ret_tuple = ret_tuple + (timing_list, ) return ret_tuple
<SYSTEM_TASK:> Sample values from predictive distribution of the given latent state. <END_TASK> <USER_TASK:> Description: def simple_predictive_sample(self, M_c, X_L, X_D, Y, Q, seed, n=1): """Sample values from predictive distribution of the given latent state. :param Y: A list of constraints to apply when sampling. Each constraint is a triplet of (r, d, v): r is the row index, d is the column index and v is the value of the constraint :type Y: list of lists :param Q: A list of values to sample. Each value is doublet of (r, d): r is the row index, d is the column index :type Q: list of lists :param n: the number of samples to draw :type n: int :returns: list of floats. Samples in the same order specified by Q """
get_next_seed = make_get_next_seed(seed) samples = _do_simple_predictive_sample( M_c, X_L, X_D, Y, Q, n, get_next_seed) return samples
<SYSTEM_TASK:> Estimate mutual information for each pair of columns on Q given <END_TASK> <USER_TASK:> Description: def mutual_information( self, M_c, X_L_list, X_D_list, Q, seed, n_samples=1000): """Estimate mutual information for each pair of columns on Q given the set of samples. :param Q: List of tuples where each tuple contains the two column indexes to compare :type Q: list of two-tuples of ints :param n_samples: the number of simple predictive samples to use :type n_samples: int :returns: list of list -- where each sublist is a set of MIs and Linfoots from each crosscat sample. """
get_next_seed = make_get_next_seed(seed) return iu.mutual_information( M_c, X_L_list, X_D_list, Q, get_next_seed, n_samples)
<SYSTEM_TASK:> Computes the similarity of the given row to the target row, <END_TASK> <USER_TASK:> Description: def similarity( self, M_c, X_L_list, X_D_list, given_row_id, target_row_id, target_columns=None): """Computes the similarity of the given row to the target row, averaged over all the column indexes given by target_columns. :param given_row_id: the id of one of the rows to measure similarity between :type given_row_id: int :param target_row_id: the id of the other row to measure similarity between :type target_row_id: int :param target_columns: the columns to average the similarity over. Defaults to all columns. :type target_columns: int, string, or list of ints :returns: float """
return su.similarity( M_c, X_L_list, X_D_list, given_row_id, target_row_id, target_columns)
<SYSTEM_TASK:> Impute values from predictive distribution of the given latent state. <END_TASK> <USER_TASK:> Description: def impute(self, M_c, X_L, X_D, Y, Q, seed, n): """Impute values from predictive distribution of the given latent state. :param Y: A list of constraints to apply when sampling. Each constraint is a triplet of (r,d,v): r is the row index, d is the column index and v is the value of the constraint :type Y: list of lists :param Q: A list of values to sample. Each value is doublet of (r, d): r is the row index, d is the column index :type Q: list of lists :param n: the number of samples to use in the imputation :type n: int :returns: list of floats -- imputed values in the same order as specified by Q """
get_next_seed = make_get_next_seed(seed) e = su.impute(M_c, X_L, X_D, Y, Q, n, get_next_seed) return e
<SYSTEM_TASK:> Impute values and confidence of the value from the predictive <END_TASK> <USER_TASK:> Description: def impute_and_confidence(self, M_c, X_L, X_D, Y, Q, seed, n): """Impute values and confidence of the value from the predictive distribution of the given latent state. :param Y: A list of constraints to apply when sampling. Each constraint is a triplet of (r, d, v): r is the row index, d is the column index and v is the value of the constraint :type Y: list of lists :param Q: A list of values to sample. Each value is doublet of (r, d): r is the row index, d is the column index :type Q: list of lists :param n: the number of samples to use in the imputation :type n: int :returns: list of lists -- list of (value, confidence) tuples in the same order as specified by Q """
get_next_seed = make_get_next_seed(seed) if isinstance(X_L, (list, tuple)): assert isinstance(X_D, (list, tuple)) # TODO: multistate impute doesn't exist yet # e,confidence = su.impute_and_confidence_multistate( # M_c, X_L, X_D, Y, Q, n, self.get_next_seed) e, confidence = su.impute_and_confidence( M_c, X_L, X_D, Y, Q, n, get_next_seed) else: e, confidence = su.impute_and_confidence( M_c, X_L, X_D, Y, Q, n, get_next_seed) return (e, confidence)
<SYSTEM_TASK:> Ensures dependencey or indepdendency between columns. <END_TASK> <USER_TASK:> Description: def ensure_col_dep_constraints( self, M_c, M_r, T, X_L, X_D, dep_constraints, seed, max_rejections=100): """Ensures dependencey or indepdendency between columns. `dep_constraints` is a list of where each entry is an (int, int, bool) tuple where the first two entries are column indices and the third entry describes whether the columns are to be dependent (True) or independent (False). Behavior Notes: `ensure_col_dep_constraints` will add `col_ensure` enforcement to the metadata (top level of `X_L`); unensure_col will remove it. Calling ensure_col_dep_constraints twice will replace the first ensure. This operation destroys the existing `X_L` and `X_D` metadata; the user should be aware that it will clobber any existing analyses. Implementation Notes: Initialization is implemented via rejection (by repeatedly initalizing states and throwing ones out that do not adhear to dep_constraints). This means that in the event the contraints in dep_constraints are complex, or impossible, that the rejection alogrithm may fail. The returned metadata looks like this: >>> dep_constraints [(1, 2, True), (2, 5, True), (1, 3, False)] >>> X_L['col_ensure'] { "dependent" : { 1 : (1, 2, 5), 2 : (1, 2, 5), 5 : (1, 5, 2), }, "independent" : { 1 : [3], 3 : [1], } } """
X_L_list, X_D_list, was_multistate = su.ensure_multistate(X_L, X_D) if was_multistate: num_states = len(X_L_list) else: num_states = 1 dependencies = [(c[0], c[1]) for c in dep_constraints if c[2]] independencies = [(c[0], c[1]) for c in dep_constraints if not c[2]] col_ensure_md = dict() col_ensure_md[True] = { str(key) : list(val) for key, val in gu.get_scc_from_tuples(dependencies).iteritems() } col_ensure_md[False] = { str(key) : list(val) for key, val in gu.get_scc_from_tuples(independencies).iteritems() } def assert_dep_constraints(X_L, X_D, dep_constraints): for col1, col2, dep in dep_constraints: if not self.assert_col_dep_constraints( X_L, X_D, col1, col2, dep, True): return False return True X_L_out = [] X_D_out = [] get_next_seed = make_get_next_seed(seed) for _ in range(num_states): counter = 0 X_L_i, X_D_i = self.initialize(M_c, M_r, T, get_next_seed()) while not assert_dep_constraints(X_L_i, X_D_i, dep_constraints): if counter > max_rejections: raise RuntimeError( 'Could not ranomly generate a partition ' 'that satisfies the constraints in dep_constraints.') counter += 1 X_L_i, X_D_i = self.initialize(M_c, M_r, T, get_next_seed()) X_L_i['col_ensure'] = dict() X_L_i['col_ensure']['dependent'] = col_ensure_md[True] X_L_i['col_ensure']['independent'] = col_ensure_md[False] X_D_out.append(X_D_i) X_L_out.append(X_L_i) if was_multistate: return X_L_out, X_D_out else: return X_L_out[0], X_D_out[0]
<SYSTEM_TASK:> Ensures dependencey or indepdendency between rows with respect to <END_TASK> <USER_TASK:> Description: def ensure_row_dep_constraint( self, M_c, T, X_L, X_D, row1, row2, dependent=True, wrt=None, max_iter=100, force=False): """Ensures dependencey or indepdendency between rows with respect to columns."""
X_L_list, X_D_list, was_multistate = su.ensure_multistate(X_L, X_D) if force: raise NotImplementedError else: kernel_list = ('row_partition_assignements',) for i, (X_L_i, X_D_i) in enumerate(zip(X_L_list, X_D_list)): iters = 0 X_L_tmp = copy.deepcopy(X_L_i) X_D_tmp = copy.deepcopy(X_D_i) while not self.assert_row( X_L_tmp, X_D_tmp, row1, row2, dependent=dependent, wrt=wrt): if iters >= max_iter: raise RuntimeError( 'Maximum ensure iterations reached.') # XXX No seed? res = self.analyze( M_c, T, X_L_i, X_D_i, kernel_list=kernel_list, n_steps=1, r=(row1,)) X_L_tmp = res[0] X_D_tmp = res[1] iters += 1 X_L_list[i] = X_L_tmp X_D_list[i] = X_D_tmp if was_multistate: return X_L_list, X_D_list else: return X_L_list[0], X_D_list[0]
<SYSTEM_TASK:> Parse the given format specification and return a dictionary <END_TASK> <USER_TASK:> Description: def parse_format_specifier(specification): """ Parse the given format specification and return a dictionary containing relevant values. """
m = _parse_format_specifier_regex.match(specification) if m is None: raise ValueError( "Invalid format specifier: {!r}".format(specification)) format_dict = m.groupdict('') # Convert zero-padding into fill and alignment. zeropad = format_dict.pop('zeropad') if zeropad: # If zero padding is requested, fill and align fields should be absent. if format_dict['align']: raise ValueError( "Invalid format specifier: {!r}".format(specification)) # Impossible to have 'fill' without 'align'. assert not format_dict['fill'] format_dict['align'] = '=' format_dict['fill'] = '0' # Default alignment is right-aligned. if not format_dict['align']: format_dict['align'] = '>' # Default fill character is space. if not format_dict['fill']: format_dict['fill'] = ' ' # Default sign is '-'. if not format_dict['sign']: format_dict['sign'] = '-' # Convert minimum width to an int; default is zero. format_dict['minimumwidth'] = int(format_dict['minimumwidth'] or '0') # Convert precision to an int, or `None` if no precision given. if format_dict['precision']: format_dict['precision'] = int(format_dict['precision'][1:]) else: format_dict['precision'] = None # If no rounding mode is given, assume 'N'. if not format_dict['rounding']: format_dict['rounding'] = 'N' return format_dict
<SYSTEM_TASK:> Return a dictionary representing the bonds. <END_TASK> <USER_TASK:> Description: def get_bonds(self, self_bonding_allowed=False, offset=3, modified_properties=None, use_lookup=False, set_lookup=True, atomic_radius_data=None ): """Return a dictionary representing the bonds. .. warning:: This function is **not sideeffect free**, since it assigns the output to a variable ``self._metadata['bond_dict']`` if ``set_lookup`` is ``True`` (which is the default). This is necessary for performance reasons. ``.get_bonds()`` will use or not use a lookup depending on ``use_lookup``. Greatly increases performance if True, but could introduce bugs in certain situations. Just imagine a situation where the :class:`~Cartesian` is changed manually. If you apply lateron a method e.g. :meth:`~get_zmat()` that makes use of :meth:`~get_bonds()` the dictionary of the bonds may not represent the actual situation anymore. You have two possibilities to cope with this problem. Either you just re-execute ``get_bonds`` on your specific instance, or you change the ``internally_use_lookup`` option in the settings. Please note that the internal use of the lookup variable greatly improves performance. Args: modified_properties (dic): If you want to change the van der Vaals radius of one or more specific atoms, pass a dictionary that looks like:: modified_properties = {index1: 1.5} For global changes use the constants module. offset (float): use_lookup (bool): set_lookup (bool): self_bonding_allowed (bool): atomic_radius_data (str): Defines which column of :attr:`constants.elements` is used. The default is ``atomic_radius_cc`` and can be changed with :attr:`settings['defaults']['atomic_radius_data']`. Compare with :func:`add_data`. Returns: dict: Dictionary mapping from an atom index to the set of indices of atoms bonded to. """
if atomic_radius_data is None: atomic_radius_data = settings['defaults']['atomic_radius_data'] def complete_calculation(): old_index = self.index self.index = range(len(self)) fragments = self._divide_et_impera(offset=offset) positions = np.array(self.loc[:, ['x', 'y', 'z']], order='F') data = self.add_data([atomic_radius_data, 'valency']) bond_radii = data[atomic_radius_data] if modified_properties is not None: bond_radii.update(pd.Series(modified_properties)) bond_radii = bond_radii.values bond_dict = collections.defaultdict(set) for i, j, k in product(*[range(x) for x in fragments.shape]): # The following call is not side effect free and changes # bond_dict self._update_bond_dict( fragments[i, j, k], positions, bond_radii, bond_dict=bond_dict, self_bonding_allowed=self_bonding_allowed) for i in set(self.index) - set(bond_dict.keys()): bond_dict[i] = {} self.index = old_index rename = dict(enumerate(self.index)) bond_dict = {rename[key]: {rename[i] for i in bond_dict[key]} for key in bond_dict} return bond_dict if use_lookup: try: bond_dict = self._metadata['bond_dict'] except KeyError: bond_dict = complete_calculation() else: bond_dict = complete_calculation() if set_lookup: self._metadata['bond_dict'] = bond_dict return bond_dict
<SYSTEM_TASK:> Return a Cartesian of atoms in the n-th coordination sphere. <END_TASK> <USER_TASK:> Description: def get_coordination_sphere( self, index_of_atom, n_sphere=1, give_only_index=False, only_surface=True, exclude=None, use_lookup=None): """Return a Cartesian of atoms in the n-th coordination sphere. Connected means that a path along covalent bonds exists. Args: index_of_atom (int): give_only_index (bool): If ``True`` a set of indices is returned. Otherwise a new Cartesian instance. n_sphere (int): Determines the number of the coordination sphere. only_surface (bool): Return only the surface of the coordination sphere. exclude (set): A set of indices that should be ignored for the path finding. use_lookup (bool): Use a lookup variable for :meth:`~chemcoord.Cartesian.get_bonds`. The default is specified in ``settings['defaults']['use_lookup']`` Returns: A set of indices or a new Cartesian instance. """
if use_lookup is None: use_lookup = settings['defaults']['use_lookup'] exclude = set() if exclude is None else exclude bond_dict = self.get_bonds(use_lookup=use_lookup) i = index_of_atom if n_sphere != 0: visited = set([i]) | exclude try: tmp_bond_dict = {j: (bond_dict[j] - visited) for j in bond_dict[i]} except KeyError: tmp_bond_dict = {} n = 0 while tmp_bond_dict and (n + 1) < n_sphere: new_tmp_bond_dict = {} for i in tmp_bond_dict: if i in visited: continue visited.add(i) for j in tmp_bond_dict[i]: new_tmp_bond_dict[j] = bond_dict[j] - visited tmp_bond_dict = new_tmp_bond_dict n += 1 if only_surface: index_out = set(tmp_bond_dict.keys()) else: index_out = visited | set(tmp_bond_dict.keys()) else: index_out = {i} if give_only_index: return index_out - exclude else: return self.loc[index_out - exclude]
<SYSTEM_TASK:> Is called after cutting geometric shapes. <END_TASK> <USER_TASK:> Description: def _preserve_bonds(self, sliced_cartesian, use_lookup=None): """Is called after cutting geometric shapes. If you want to change the rules how bonds are preserved, when applying e.g. :meth:`Cartesian.cut_sphere` this is the function you have to modify. It is recommended to inherit from the Cartesian class to tailor it for your project, instead of modifying the source code of ChemCoord. Args: sliced_frame (Cartesian): use_lookup (bool): Use a lookup variable for :meth:`~chemcoord.Cartesian.get_bonds`. The default is specified in ``settings['defaults']['use_lookup']`` Returns: Cartesian: """
if use_lookup is None: use_lookup = settings['defaults']['use_lookup'] included_atoms_set = set(sliced_cartesian.index) assert included_atoms_set.issubset(set(self.index)), \ 'The sliced Cartesian has to be a subset of the bigger frame' bond_dic = self.get_bonds(use_lookup=use_lookup) new_atoms = set([]) for atom in included_atoms_set: new_atoms = new_atoms | bond_dic[atom] new_atoms = new_atoms - included_atoms_set while not new_atoms == set([]): index_of_interest = new_atoms.pop() included_atoms_set = ( included_atoms_set | self.get_coordination_sphere( index_of_interest, n_sphere=float('inf'), only_surface=False, exclude=included_atoms_set, give_only_index=True, use_lookup=use_lookup)) new_atoms = new_atoms - included_atoms_set molecule = self.loc[included_atoms_set, :] return molecule