code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def process_rpc(self, rpc): """Process input and output parts of `rpc`.""" p = "/nc:rpc/" + self.qname(rpc) tmpl = self.xsl_template(p) inp = rpc.search_one("input") if inp is not None: ct = self.xsl_calltemplate("rpc-input", tmpl) self.xsl_withparam("nsid", rpc.i_module.i_modulename + ":", ct) self.process_children(inp, p, 2) outp = rpc.search_one("output") if outp is not None: self.process_children(outp, "/nc:rpc-reply", 1)
Process input and output parts of `rpc`.
def file_link(self, instance): ''' Renders the link to the student upload file. ''' sfile = instance.file_upload if not sfile: return mark_safe('No file submitted by student.') else: return mark_safe('<a href="%s">%s</a><br/>(<a href="%s" target="_new">Preview</a>)' % (sfile.get_absolute_url(), sfile.basename(), sfile.get_preview_url()))
Renders the link to the student upload file.
def _validate_ctypes(self, from_obj, to_obj): """ Asserts that the content types for the given object are valid for this relationship. If validation fails, ``AssertionError`` will be raised. """ if from_obj: from_ctype = ContentType.objects.get_for_model(from_obj) assert from_ctype.natural_key() == self.from_content_type.natural_key(), ( 'Relationship "%s" does not support connections ' 'from "%s" types' % (self.name, from_ctype)) if to_obj: to_ctype = ContentType.objects.get_for_model(to_obj) assert to_ctype.natural_key() == self.to_content_type.natural_key(), ( 'Relationship "%s" does not support connections ' 'to "%s" types' % (self.name, to_ctype))
Asserts that the content types for the given object are valid for this relationship. If validation fails, ``AssertionError`` will be raised.
def add_media(dest, media): """ Optimized version of django.forms.Media.__add__() that doesn't create new objects. """ if django.VERSION >= (2, 2): dest._css_lists += media._css_lists dest._js_lists += media._js_lists elif django.VERSION >= (2, 0): combined = dest + media dest._css = combined._css dest._js = combined._js else: dest.add_css(media._css) dest.add_js(media._js)
Optimized version of django.forms.Media.__add__() that doesn't create new objects.
def range(cls, collection, attribute, left, right, closed, index_id, skip=None, limit=None): """ This will find all documents within a given range. In order to execute a range query, a skip-list index on the queried attribute must be present. :param collection Collection instance :param attribute The attribute path to check :param left The lower bound :param right The upper bound :param closed If true, use interval including left and right, otherwise exclude right, but include left :param index_id ID of the index which should be used for the query :param skip The number of documents to skip in the query :param limit The maximal amount of documents to return. The skip is applied before the limit restriction. :returns Document list """ kwargs = { 'index': index_id, 'attribute': attribute, 'left': left, 'right': right, 'closed': closed, 'skip': skip, 'limit': limit, } return cls._construct_query(name='range', collection=collection, multiple=True, **kwargs)
This will find all documents within a given range. In order to execute a range query, a skip-list index on the queried attribute must be present. :param collection Collection instance :param attribute The attribute path to check :param left The lower bound :param right The upper bound :param closed If true, use interval including left and right, otherwise exclude right, but include left :param index_id ID of the index which should be used for the query :param skip The number of documents to skip in the query :param limit The maximal amount of documents to return. The skip is applied before the limit restriction. :returns Document list
def get_op_version(name): ''' .. versionadded:: 2019.2.0 Returns the glusterfs volume op-version name Name of the glusterfs volume CLI Example: .. code-block:: bash salt '*' glusterfs.get_op_version <volume> ''' cmd = 'volume get {0} cluster.op-version'.format(name) root = _gluster_xml(cmd) if not _gluster_ok(root): return False, root.find('opErrstr').text result = {} for op_version in _iter(root, 'volGetopts'): for item in op_version: if item.tag == 'Value': result = item.text elif item.tag == 'Opt': for child in item: if child.tag == 'Value': result = child.text return result
.. versionadded:: 2019.2.0 Returns the glusterfs volume op-version name Name of the glusterfs volume CLI Example: .. code-block:: bash salt '*' glusterfs.get_op_version <volume>
def count_mapped_reads(self, file_name, paired_end): """ Mapped_reads are not in fastq format, so this one doesn't need to accommodate fastq, and therefore, doesn't require a paired-end parameter because it only uses samtools view. Therefore, it's ok that it has a default parameter, since this is discarded. :param str file_name: File for which to count mapped reads. :param bool paired_end: This parameter is ignored; samtools automatically correctly responds depending on the data in the bamfile. We leave the option here just for consistency, since all the other counting functions require the parameter. This makes it easier to swap counting functions during pipeline development. :return int: Either return code from samtools view command, or -1 to indicate an error state. """ if file_name.endswith("bam"): return self.samtools_view(file_name, param="-c -F4") if file_name.endswith("sam"): return self.samtools_view(file_name, param="-c -F4 -S") return -1
Mapped_reads are not in fastq format, so this one doesn't need to accommodate fastq, and therefore, doesn't require a paired-end parameter because it only uses samtools view. Therefore, it's ok that it has a default parameter, since this is discarded. :param str file_name: File for which to count mapped reads. :param bool paired_end: This parameter is ignored; samtools automatically correctly responds depending on the data in the bamfile. We leave the option here just for consistency, since all the other counting functions require the parameter. This makes it easier to swap counting functions during pipeline development. :return int: Either return code from samtools view command, or -1 to indicate an error state.
def reshape(tt_array, shape, eps=1e-14, rl=1, rr=1): ''' Reshape of the TT-vector [TT1]=TT_RESHAPE(TT,SZ) reshapes TT-vector or TT-matrix into another with mode sizes SZ, accuracy 1e-14 [TT1]=TT_RESHAPE(TT,SZ,EPS) reshapes TT-vector/matrix into another with mode sizes SZ and accuracy EPS [TT1]=TT_RESHAPE(TT,SZ,EPS, RL) reshapes TT-vector/matrix into another with mode size SZ and left tail rank RL [TT1]=TT_RESHAPE(TT,SZ,EPS, RL, RR) reshapes TT-vector/matrix into another with mode size SZ and tail ranks RL*RR Reshapes TT-vector/matrix into a new one, with dimensions specified by SZ. If the i_nput is TT-matrix, SZ must have the sizes for both modes, so it is a _matrix if sizes d2-by-2. If the i_nput is TT-vector, SZ may be either a column or a row _vector. ''' tt1 = _cp.deepcopy(tt_array) sz = _cp.deepcopy(shape) ismatrix = False if isinstance(tt1, _matrix.matrix): d1 = tt1.tt.d d2 = sz.shape[0] ismatrix = True # The size should be [n,m] in R^{d x 2} restn2_n = sz[:, 0] restn2_m = sz[:, 1] sz_n = _cp.copy(sz[:, 0]) sz_m = _cp.copy(sz[:, 1]) n1_n = tt1.n n1_m = tt1.m # We will split/convolve using the _vector form anyway sz = _np.prod(sz, axis=1) tt1 = tt1.tt else: d1 = tt1.d d2 = len(sz) # Recompute sz to include r0,rd, # and the items of tt1 sz[0] = sz[0] * rl sz[d2 - 1] = sz[d2 - 1] * rr tt1.n[0] = tt1.n[0] * tt1.r[0] tt1.n[d1 - 1] = tt1.n[d1 - 1] * tt1.r[d1] if ismatrix: # in _matrix: 1st tail rank goes to the n-mode, last to the m-mode restn2_n[0] = restn2_n[0] * rl restn2_m[d2 - 1] = restn2_m[d2 - 1] * rr n1_n[0] = n1_n[0] * tt1.r[0] n1_m[d1 - 1] = n1_m[d1 - 1] * tt1.r[d1] tt1.r[0] = 1 tt1.r[d1] = 1 n1 = tt1.n assert _np.prod(n1) == _np.prod(sz), 'Reshape: incorrect sizes' needQRs = False if d2 > d1: needQRs = True if d2 <= d1: i2 = 0 n2 = _cp.deepcopy(sz) for i1 in range(d1): if n2[i2] == 1: i2 = i2 + 1 if i2 > d2: break if n2[i2] % n1[i1] == 0: n2[i2] = n2[i2] // n1[i1] else: needQRs = True break r1 = tt1.r tt1 = tt1.to_list(tt1) if needQRs: # We have to split some cores -> perform QRs for i in range(d1 - 1, 0, -1): cr = tt1[i] cr = _np.reshape(cr, (r1[i], n1[i] * r1[i + 1]), order='F') [cr, rv] = _np.linalg.qr(cr.T) # Size n*r2, r1new - r1nwe,r1 cr0 = tt1[i - 1] cr0 = _np.reshape(cr0, (r1[i - 1] * n1[i - 1], r1[i]), order='F') cr0 = _np.dot(cr0, rv.T) # r0*n0, r1new r1[i] = cr.shape[1] cr0 = _np.reshape(cr0, (r1[i - 1], n1[i - 1], r1[i]), order='F') cr = _np.reshape(cr.T, (r1[i], n1[i], r1[i + 1]), order='F') tt1[i] = cr tt1[i - 1] = cr0 r2 = _np.ones(d2 + 1, dtype=_np.int32) i1 = 0 # Working index in tt1 i2 = 0 # Working index in tt2 core2 = _np.zeros((0)) curcr2 = 1 restn2 = sz n2 = _np.ones(d2, dtype=_np.int32) if ismatrix: n2_n = _np.ones(d2, dtype=_np.int32) n2_m = _np.ones(d2, dtype=_np.int32) while i1 < d1: curcr1 = tt1[i1] if _gcd(restn2[i2], n1[i1]) == n1[i1]: # The whole core1 fits to core2. Convolve it if (i1 < d1 - 1) and (needQRs): # QR to the next core - for safety curcr1 = _np.reshape( curcr1, (r1[i1] * n1[i1], r1[i1 + 1]), order='F') [curcr1, rv] = _np.linalg.qr(curcr1) curcr12 = tt1[i1 + 1] curcr12 = _np.reshape( curcr12, (r1[i1 + 1], n1[i1 + 1] * r1[i1 + 2]), order='F') curcr12 = _np.dot(rv, curcr12) r1[i1 + 1] = curcr12.shape[0] tt1[i1 + 1] = _np.reshape(curcr12, (r1[i1 + 1], n1[i1 + 1], r1[i1 + 2]), order='F') # Actually merge is here curcr1 = _np.reshape( curcr1, (r1[i1], n1[i1] * r1[i1 + 1]), order='F') curcr2 = _np.dot(curcr2, curcr1) # size r21*nold, dn*r22 if ismatrix: # Permute if we are working with tt_matrix curcr2 = _np.reshape(curcr2, (r2[i2], n2_n[i2], n2_m[i2], n1_n[ i1], n1_m[i1], r1[i1 + 1]), order='F') curcr2 = _np.transpose(curcr2, [0, 1, 3, 2, 4, 5]) # Update the "matrix" sizes n2_n[i2] = n2_n[i2] * n1_n[i1] n2_m[i2] = n2_m[i2] * n1_m[i1] restn2_n[i2] = restn2_n[i2] // n1_n[i1] restn2_m[i2] = restn2_m[i2] // n1_m[i1] r2[i2 + 1] = r1[i1 + 1] # Update the sizes of tt2 n2[i2] = n2[i2] * n1[i1] restn2[i2] = restn2[i2] // n1[i1] curcr2 = _np.reshape( curcr2, (r2[i2] * n2[i2], r2[i2 + 1]), order='F') i1 = i1 + 1 # current core1 is over else: if (_gcd(restn2[i2], n1[i1]) != 1) or (restn2[i2] == 1): # There exists a nontrivial divisor, or a singleton requested # Split it and convolve n12 = _gcd(restn2[i2], n1[i1]) if ismatrix: # Permute before the truncation # _matrix sizes we are able to split n12_n = _gcd(restn2_n[i2], n1_n[i1]) n12_m = _gcd(restn2_m[i2], n1_m[i1]) curcr1 = _np.reshape(curcr1, (r1[i1], n12_n, n1_n[i1] // n12_n, n12_m, n1_m[i1] // n12_m, r1[i1 + 1]), order='F') curcr1 = _np.transpose(curcr1, [0, 1, 3, 2, 4, 5]) # Update the _matrix sizes of tt2 and tt1 n2_n[i2] = n2_n[i2] * n12_n n2_m[i2] = n2_m[i2] * n12_m restn2_n[i2] = restn2_n[i2] // n12_n restn2_m[i2] = restn2_m[i2] // n12_m n1_n[i1] = n1_n[i1] // n12_n n1_m[i1] = n1_m[i1] // n12_m curcr1 = _np.reshape( curcr1, (r1[i1] * n12, (n1[i1] // n12) * r1[i1 + 1]), order='F') [u, s, v] = _np.linalg.svd(curcr1, full_matrices=False) r = _my_chop2(s, eps * _np.linalg.norm(s) / (d2 - 1) ** 0.5) u = u[:, :r] v = v.T v = v[:, :r] * s[:r] u = _np.reshape(u, (r1[i1], n12 * r), order='F') # u is our admissible chunk, merge it to core2 curcr2 = _np.dot(curcr2, u) # size r21*nold, dn*r22 r2[i2 + 1] = r # Update the sizes of tt2 n2[i2] = n2[i2] * n12 restn2[i2] = restn2[i2] // n12 curcr2 = _np.reshape( curcr2, (r2[i2] * n2[i2], r2[i2 + 1]), order='F') r1[i1] = r # and tt1 n1[i1] = n1[i1] // n12 # keep v in tt1 for next operations curcr1 = _np.reshape( v.T, (r1[i1], n1[i1], r1[i1 + 1]), order='F') tt1[i1] = curcr1 else: # Bad case. We have to merge cores of tt1 until a common # divisor appears i1new = i1 + 1 curcr1 = _np.reshape( curcr1, (r1[i1] * n1[i1], r1[i1 + 1]), order='F') while (_gcd(restn2[i2], n1[i1]) == 1) and (i1new < d1): cr1new = tt1[i1new] cr1new = _np.reshape( cr1new, (r1[i1new], n1[i1new] * r1[i1new + 1]), order='F') # size r1(i1)*n1(i1), n1new*r1new curcr1 = _np.dot(curcr1, cr1new) if ismatrix: # Permutes and _matrix size updates curcr1 = _np.reshape(curcr1, (r1[i1], n1_n[i1], n1_m[i1], n1_n[ i1new], n1_m[i1new], r1[i1new + 1]), order='F') curcr1 = _np.transpose(curcr1, [0, 1, 3, 2, 4, 5]) n1_n[i1] = n1_n[i1] * n1_n[i1new] n1_m[i1] = n1_m[i1] * n1_m[i1new] n1[i1] = n1[i1] * n1[i1new] curcr1 = _np.reshape( curcr1, (r1[i1] * n1[i1], r1[i1new + 1]), order='F') i1new = i1new + 1 # Inner cores merged => squeeze tt1 data n1 = _np.concatenate((n1[:i1], n1[i1new:])) r1 = _np.concatenate((r1[:i1], r1[i1new:])) tt1[i] = _np.reshape( curcr1, (r1[i1], n1[i1], r1[i1new]), order='F') tt1 = tt1[:i1] + tt1[i1new:] d1 = len(n1) if (restn2[i2] == 1) and ((i1 >= d1) or ((i1 < d1) and (n1[i1] != 1))): # The core of tt2 is finished # The second condition prevents core2 from finishing until we # squeeze all tailing singletons in tt1. curcr2 = curcr2.flatten(order='F') core2 = _np.concatenate((core2, curcr2)) i2 = i2 + 1 # Start new core2 curcr2 = 1 # If we have been asked for singletons - just add them while (i2 < d2): core2 = _np.concatenate((core2, _np.ones(1))) r2[i2] = 1 i2 = i2 + 1 tt2 = ones(2, 1) # dummy tensor tt2.d = d2 tt2.n = n2 tt2.r = r2 tt2.core = core2 tt2.ps = _np.int32(_np.cumsum(_np.concatenate((_np.ones(1), r2[:-1] * n2 * r2[1:])))) tt2.n[0] = tt2.n[0] // rl tt2.n[d2 - 1] = tt2.n[d2 - 1] // rr tt2.r[0] = rl tt2.r[d2] = rr if ismatrix: ttt = eye(1, 1) # dummy tt _matrix ttt.n = sz_n ttt.m = sz_m ttt.tt = tt2 return ttt else: return tt2
Reshape of the TT-vector [TT1]=TT_RESHAPE(TT,SZ) reshapes TT-vector or TT-matrix into another with mode sizes SZ, accuracy 1e-14 [TT1]=TT_RESHAPE(TT,SZ,EPS) reshapes TT-vector/matrix into another with mode sizes SZ and accuracy EPS [TT1]=TT_RESHAPE(TT,SZ,EPS, RL) reshapes TT-vector/matrix into another with mode size SZ and left tail rank RL [TT1]=TT_RESHAPE(TT,SZ,EPS, RL, RR) reshapes TT-vector/matrix into another with mode size SZ and tail ranks RL*RR Reshapes TT-vector/matrix into a new one, with dimensions specified by SZ. If the i_nput is TT-matrix, SZ must have the sizes for both modes, so it is a _matrix if sizes d2-by-2. If the i_nput is TT-vector, SZ may be either a column or a row _vector.
def get_instance(self, payload): """ Build an instance of TaskInstance :param dict payload: Payload response from the API :returns: twilio.rest.autopilot.v1.assistant.task.TaskInstance :rtype: twilio.rest.autopilot.v1.assistant.task.TaskInstance """ return TaskInstance(self._version, payload, assistant_sid=self._solution['assistant_sid'], )
Build an instance of TaskInstance :param dict payload: Payload response from the API :returns: twilio.rest.autopilot.v1.assistant.task.TaskInstance :rtype: twilio.rest.autopilot.v1.assistant.task.TaskInstance
def init_app_context(): """Initialize app context for Invenio 2.x.""" try: from invenio.base.factory import create_app app = create_app() app.test_request_context('/').push() app.preprocess_request() except ImportError: pass
Initialize app context for Invenio 2.x.
def detect_encoding(filename, limit_byte_check=-1): """Return file encoding.""" try: with open(filename, 'rb') as input_file: encoding = _detect_encoding(input_file.readline) # Check for correctness of encoding. with open_with_encoding(filename, encoding) as input_file: input_file.read(limit_byte_check) return encoding except (LookupError, SyntaxError, UnicodeDecodeError): return 'latin-1'
Return file encoding.
async def tuple(self, elem=None, elem_type=None, params=None): """ Loads/dumps tuple :return: """ if hasattr(elem_type, 'blob_serialize'): container = elem_type() if elem is None else elem return await container.blob_serialize(self, elem=elem, elem_type=elem_type, params=params) if self.writing: return await self.dump_tuple(elem, elem_type, params) else: return await self.load_tuple(elem_type, params=params, elem=elem)
Loads/dumps tuple :return:
def _end(ins): """ Outputs the ending sequence """ global FLAG_end_emitted output = _16bit_oper(ins.quad[1]) output.append('ld b, h') output.append('ld c, l') if FLAG_end_emitted: return output + ['jp %s' % END_LABEL] FLAG_end_emitted = True output.append('%s:' % END_LABEL) if OPTIONS.headerless.value: return output + ['ret'] output.append('di') output.append('ld hl, (%s)' % CALL_BACK) output.append('ld sp, hl') output.append('exx') output.append('pop hl') output.append('exx') output.append('pop iy') output.append('pop ix') output.append('ei') output.append('ret') output.append('%s:' % CALL_BACK) output.append('DEFW 0') return output
Outputs the ending sequence
def _walk_directory(root_directory): """ Generates the paths of all files that are ancestors of `root_directory`. """ paths = [os.path.join(root, name) for root, dirs, files in os.walk(root_directory) # noqa for name in files] paths.sort() return paths
Generates the paths of all files that are ancestors of `root_directory`.
def latex_to_img(tex): """Return a pygame image from a latex template.""" with tempfile.TemporaryDirectory() as tmpdirname: with open(tmpdirname + r'\tex.tex', 'w') as f: f.write(tex) os.system(r"latex {0}\tex.tex -halt-on-error -interaction=batchmode -disable-installer -aux-directory={0} " r"-output-directory={0}".format(tmpdirname)) os.system(r"dvipng -T tight -z 9 --truecolor -o {0}\tex.png {0}\tex.dvi".format(tmpdirname)) # os.system(r'latex2png ' + tmpdirname) image = pygame.image.load(tmpdirname + r'\tex.png') return image
Return a pygame image from a latex template.
def _get_model_instance(model_cls, data): """Convert dict into object of class of passed model. :param class model_cls: :param object data: :rtype DomainModel: """ if not isinstance(data, (model_cls, dict)): raise TypeError('{0} is not valid type, instance of ' '{1} or dict required'.format(data, model_cls)) return model_cls(**data) if isinstance(data, dict) else data
Convert dict into object of class of passed model. :param class model_cls: :param object data: :rtype DomainModel:
def host(self, value=None): """ Return the host :param string value: new host string """ if value is not None: return URL._mutate(self, host=value) return self._tuple.host
Return the host :param string value: new host string
def visit_Dict(self, node): """ Define set type from all elements type (or empty_dict type). """ self.generic_visit(node) if node.keys: for key, value in zip(node.keys, node.values): value_type = self.result[value] self.combine(node, key, unary_op=partial(self.builder.DictType, of_val=value_type)) else: self.result[node] = self.builder.NamedType( "pythonic::types::empty_dict")
Define set type from all elements type (or empty_dict type).
def one_hot(cls, ij, sz): """ ij: postion sz: size of matrix """ if isinstance(sz, int): sz = (sz, sz) if isinstance(ij, int): ij = (ij, ij) m = np.zeros(sz) m[ij[0], ij[1]] = 1.0 return Matrix(m)
ij: postion sz: size of matrix
def vsubg(v1, v2, ndim): """ Compute the difference between two double precision vectors of arbitrary dimension. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vsubg_c.html :param v1: First vector (minuend). :type v1: Array of floats :param v2: Second vector (subtrahend). :type v2: Array of floats :param ndim: Dimension of v1, v2, and vout. :type ndim: int :return: Difference vector, v1 - v2. :rtype: Array of floats """ v1 = stypes.toDoubleVector(v1) v2 = stypes.toDoubleVector(v2) vout = stypes.emptyDoubleVector(ndim) ndim = ctypes.c_int(ndim) libspice.vsubg_c(v1, v2, ndim, vout) return stypes.cVectorToPython(vout)
Compute the difference between two double precision vectors of arbitrary dimension. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vsubg_c.html :param v1: First vector (minuend). :type v1: Array of floats :param v2: Second vector (subtrahend). :type v2: Array of floats :param ndim: Dimension of v1, v2, and vout. :type ndim: int :return: Difference vector, v1 - v2. :rtype: Array of floats
def send_raw_transaction(self, hextx, **kwargs): """ Broadcasts a transaction over the NEO network and returns the result. :param hextx: hexadecimal string that has been serialized :type hextx: str :return: result of the transaction :rtype: bool """ return self._call(JSONRPCMethods.SEND_RAW_TRANSACTION.value, [hextx, ], **kwargs)
Broadcasts a transaction over the NEO network and returns the result. :param hextx: hexadecimal string that has been serialized :type hextx: str :return: result of the transaction :rtype: bool
def sanitize_dict(input_dict): r""" Given a nested dictionary, ensures that all nested dicts are normal Python dicts. This is necessary for pickling, or just converting an 'auto-vivifying' dict to something that acts normal. """ plain_dict = dict() for key in input_dict.keys(): value = input_dict[key] if hasattr(value, 'keys'): plain_dict[key] = sanitize_dict(value) else: plain_dict[key] = value return plain_dict
r""" Given a nested dictionary, ensures that all nested dicts are normal Python dicts. This is necessary for pickling, or just converting an 'auto-vivifying' dict to something that acts normal.
def _split_regex(regex): """ Return an array of the URL split at each regex match like (?P<id>[\d]+) Call with a regex of '^/foo/(?P<id>[\d]+)/bar/$' and you will receive ['/foo/', '/bar/'] """ if regex[0] == '^': regex = regex[1:] if regex[-1] == '$': regex = regex[0:-1] results = [] line = '' for c in regex: if c == '(': results.append(line) line = '' elif c == ')': line = '' else: line = line + c if len(line) > 0: results.append(line) return results
Return an array of the URL split at each regex match like (?P<id>[\d]+) Call with a regex of '^/foo/(?P<id>[\d]+)/bar/$' and you will receive ['/foo/', '/bar/']
def dynacRepresentation(self): """ Return the Pynac representation of this Set4DAperture instance. """ details = [ self.voltage.val, self.phase.val, self.harmonicNum.val, self.apertureRadius.val, ] return ['BUNCHER', [details]]
Return the Pynac representation of this Set4DAperture instance.
def padding_oracle_encrypt(oracle, plaintext, block_size=128, pool=None): """ Encrypt plaintext using an oracle function that returns ``True`` if the provided ciphertext is correctly PKCS#7 padded after decryption. The cipher needs to operate in CBC mode. Args: oracle(callable): The oracle function. Will be called repeatedly with a chunk of ciphertext. plaintext(bytes): The plaintext data to encrypt. block_size(int): The cipher's block size in bits. pool(multiprocessing.Pool): A multiprocessing pool to use to parallelize the encryption. This pool is used to call the oracle function. Fairly heavy due to the required inter-process state synchronization. If ``None`` (the default), no multiprocessing will be used. Returns: bytes: The encrypted data. Raises: RuntimeError: Raised if the oracle behaves unpredictable. """ plaintext = bytearray(plaintext) block_len = block_size // 8 padding_len = block_len - (len(plaintext) % block_len) plaintext.extend([padding_len] * padding_len) ciphertext = bytearray() chunk = bytearray(os.urandom(block_len)) ciphertext[0:0] = chunk for plain_start in range(len(plaintext) - block_len, -1, -block_len): plain = plaintext[plain_start:plain_start + block_len] chunk = ciphertext[0:0] = encrypt_block(oracle, block_len, chunk, plain, pool) return bytes(ciphertext)
Encrypt plaintext using an oracle function that returns ``True`` if the provided ciphertext is correctly PKCS#7 padded after decryption. The cipher needs to operate in CBC mode. Args: oracle(callable): The oracle function. Will be called repeatedly with a chunk of ciphertext. plaintext(bytes): The plaintext data to encrypt. block_size(int): The cipher's block size in bits. pool(multiprocessing.Pool): A multiprocessing pool to use to parallelize the encryption. This pool is used to call the oracle function. Fairly heavy due to the required inter-process state synchronization. If ``None`` (the default), no multiprocessing will be used. Returns: bytes: The encrypted data. Raises: RuntimeError: Raised if the oracle behaves unpredictable.
def plot_mag(fignum, datablock, s, num, units, norm): """ plots magnetization against (de)magnetizing temperature or field Parameters _________________ fignum : matplotlib figure number for plotting datablock : nested list of [step, 0, 0, magnetization, 1,quality] s : string for title num : matplotlib figure number, can set to 1 units : [T,K,U] for tesla, kelvin or arbitrary norm : [True,False] if True, normalize Effects ______ plots figure """ global globals, graphmenu Ints = [] for plotrec in datablock: Ints.append(plotrec[3]) Ints.sort() plt.figure(num=fignum) T, M, Tv, recnum = [], [], [], 0 Mex, Tex, Vdif = [], [], [] recbak = [] for rec in datablock: if rec[5] == 'g': if units == "T": T.append(rec[0] * 1e3) Tv.append(rec[0] * 1e3) if recnum > 0: Tv.append(rec[0] * 1e3) elif units == "U": T.append(rec[0]) Tv.append(rec[0]) if recnum > 0: Tv.append(rec[0]) elif units == "K": T.append(rec[0] - 273) Tv.append(rec[0] - 273) if recnum > 0: Tv.append(rec[0] - 273) elif "T" in units and "K" in units: if rec[0] < 1.: T.append(rec[0] * 1e3) Tv.append(rec[0] * 1e3) else: T.append(rec[0] - 273) Tv.append(rec[0] - 273) if recnum > 0: Tv.append(rec[0] - 273) else: T.append(rec[0]) Tv.append(rec[0]) if recnum > 0: Tv.append(rec[0]) if norm: M.append(old_div(rec[3], Ints[-1])) else: M.append(rec[3]) if recnum > 0 and len(rec) > 0 and len(recbak) > 0: v = [] if recbak[0] != rec[0]: V0 = pmag.dir2cart([recbak[1], recbak[2], recbak[3]]) V1 = pmag.dir2cart([rec[1], rec[2], rec[3]]) for el in range(3): v.append(abs(V1[el] - V0[el])) vdir = pmag.cart2dir(v) # append vector difference Vdif.append(old_div(vdir[2], Ints[-1])) Vdif.append(old_div(vdir[2], Ints[-1])) recbak = [] for el in rec: recbak.append(el) delta = .005 * M[0] if num == 1: if recnum % 2 == 0: plt.text(T[-1] + delta, M[-1], (' ' + str(recnum)), fontsize=9) recnum += 1 else: if rec[0] < 200: Tex.append(rec[0] * 1e3) if rec[0] >= 200: Tex.append(rec[0] - 273) Mex.append(old_div(rec[3], Ints[-1])) recnum += 1 if globals != 0: globals.MTlist = T globals.MTlisty = M if len(Mex) > 0 and len(Tex) > 0: plt.scatter(Tex, Mex, marker='d', color='k') if len(Vdif) > 0: Vdif.append(old_div(vdir[2], Ints[-1])) Vdif.append(0) if Tv: Tv.append(Tv[-1]) plt.plot(T, M) plt.plot(T, M, 'ro') if len(Tv) == len(Vdif) and norm: plt.plot(Tv, Vdif, 'g-') if units == "T": plt.xlabel("Step (mT)") elif units == "K": plt.xlabel("Step (C)") elif units == "J": plt.xlabel("Step (J)") else: plt.xlabel("Step [mT,C]") if norm == 1: plt.ylabel("Fractional Magnetization") if norm == 0: plt.ylabel("Magnetization") plt.axvline(0, color='k') plt.axhline(0, color='k') tstring = s plt.title(tstring) plt.draw()
plots magnetization against (de)magnetizing temperature or field Parameters _________________ fignum : matplotlib figure number for plotting datablock : nested list of [step, 0, 0, magnetization, 1,quality] s : string for title num : matplotlib figure number, can set to 1 units : [T,K,U] for tesla, kelvin or arbitrary norm : [True,False] if True, normalize Effects ______ plots figure
def load_json_fixture(fixture_path: str) -> Dict[str, Any]: """ Loads a fixture file, caching the most recent files it loaded. """ with open(fixture_path) as fixture_file: file_fixtures = json.load(fixture_file) return file_fixtures
Loads a fixture file, caching the most recent files it loaded.
def execute_sql( self, sql, params=None, param_types=None, query_mode=None, partition=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, ): """Perform an ``ExecuteStreamingSql`` API request. :type sql: str :param sql: SQL query statement :type params: dict, {str -> column value} :param params: values for parameter replacement. Keys must match the names used in ``sql``. :type param_types: dict[str -> Union[dict, .types.Type]] :param param_types: (Optional) maps explicit types for one or more param values; required if parameters are passed. :type query_mode: :class:`google.cloud.spanner_v1.proto.ExecuteSqlRequest.QueryMode` :param query_mode: Mode governing return of results / query plan. See https://cloud.google.com/spanner/reference/rpc/google.spanner.v1#google.spanner.v1.ExecuteSqlRequest.QueryMode1 :type partition: bytes :param partition: (Optional) one of the partition tokens returned from :meth:`partition_query`. :rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet` :returns: a result set instance which can be used to consume rows. :raises ValueError: for reuse of single-use snapshots, or if a transaction ID is already pending for multiple-use snapshots. """ if self._read_request_count > 0: if not self._multi_use: raise ValueError("Cannot re-use single-use snapshot.") if self._transaction_id is None: raise ValueError("Transaction ID pending.") if params is not None: if param_types is None: raise ValueError("Specify 'param_types' when passing 'params'.") params_pb = Struct( fields={key: _make_value_pb(value) for key, value in params.items()} ) else: params_pb = None database = self._session._database metadata = _metadata_with_prefix(database.name) transaction = self._make_txn_selector() api = database.spanner_api restart = functools.partial( api.execute_streaming_sql, self._session.name, sql, transaction=transaction, params=params_pb, param_types=param_types, query_mode=query_mode, partition_token=partition, seqno=self._execute_sql_count, metadata=metadata, retry=retry, timeout=timeout, ) iterator = _restart_on_unavailable(restart) self._read_request_count += 1 self._execute_sql_count += 1 if self._multi_use: return StreamedResultSet(iterator, source=self) else: return StreamedResultSet(iterator)
Perform an ``ExecuteStreamingSql`` API request. :type sql: str :param sql: SQL query statement :type params: dict, {str -> column value} :param params: values for parameter replacement. Keys must match the names used in ``sql``. :type param_types: dict[str -> Union[dict, .types.Type]] :param param_types: (Optional) maps explicit types for one or more param values; required if parameters are passed. :type query_mode: :class:`google.cloud.spanner_v1.proto.ExecuteSqlRequest.QueryMode` :param query_mode: Mode governing return of results / query plan. See https://cloud.google.com/spanner/reference/rpc/google.spanner.v1#google.spanner.v1.ExecuteSqlRequest.QueryMode1 :type partition: bytes :param partition: (Optional) one of the partition tokens returned from :meth:`partition_query`. :rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet` :returns: a result set instance which can be used to consume rows. :raises ValueError: for reuse of single-use snapshots, or if a transaction ID is already pending for multiple-use snapshots.
def unredirect_stdout(self): """Redirect stdout and stderr back to screen.""" if hasattr(self, 'hijacked_stdout') and hasattr(self, 'hijacked_stderr'): sys.stdout = self.hijacked_stdout sys.stderr = self.hijacked_stderr
Redirect stdout and stderr back to screen.
def _push_new_state(self): """Push a new state into history. This new state will be used to hold resolution results of the next coming round. """ try: base = self._states[-1] except IndexError: graph = DirectedGraph() graph.add(None) # Sentinel as root dependencies' parent. state = State(mapping={}, graph=graph) else: state = State( mapping=base.mapping.copy(), graph=base.graph.copy(), ) self._states.append(state)
Push a new state into history. This new state will be used to hold resolution results of the next coming round.
def available_metrics(self): """ List all available metrics that you can add to this machine :returns: A list of dicts, each of which is a metric that you can add to a monitored machine """ req = self.request(self.mist_client.uri+"/clouds/"+self.cloud.id+"/machines/"+self.id+"/metrics") metrics = req.get().json() return metrics
List all available metrics that you can add to this machine :returns: A list of dicts, each of which is a metric that you can add to a monitored machine
def average(iterator): """Iterative mean.""" count = 0 total = 0 for num in iterator: count += 1 total += num return float(total)/count
Iterative mean.
def to_df(self, variables=None, format='wide', sparse=True, sampling_rate=None, include_sparse=True, include_dense=True, **kwargs): ''' Merge columns into a single pandas DataFrame. Args: variables (list): Optional list of variable names to retain; if None, all variables are written out. format (str): Whether to return a DataFrame in 'wide' or 'long' format. In 'wide' format, each row is defined by a unique onset/duration, and each variable is in a separate column. In 'long' format, each row is a unique combination of onset, duration, and variable name, and a single 'amplitude' column provides the value. sparse (bool): If True, variables will be kept in a sparse format provided they are all internally represented as such. If False, a dense matrix (i.e., uniform sampling rate for all events) will be exported. Will be ignored if at least one variable is dense. sampling_rate (float): If a dense matrix is written out, the sampling rate (in Hz) to use for downsampling. Defaults to the value currently set in the instance. kwargs: Optional keyword arguments to pass onto each Variable's to_df() call (e.g., condition, entities, and timing). include_sparse (bool): Whether or not to include sparse Variables. include_dense (bool): Whether or not to include dense Variables. Returns: A pandas DataFrame. ''' if not include_sparse and not include_dense: raise ValueError("You can't exclude both dense and sparse " "variables! That leaves nothing!") if variables is None: variables = list(self.variables.keys()) if not include_sparse: variables = [v for v in variables if isinstance(self.variables[v], DenseRunVariable)] if not include_dense: variables = [v for v in variables if not isinstance(self.variables[v], DenseRunVariable)] if not variables: return None _vars = [self.variables[v] for v in variables] if sparse and all(isinstance(v, SimpleVariable) for v in _vars): variables = _vars else: sampling_rate = sampling_rate or self.sampling_rate # Make sure all variables have the same sampling rate variables = list(self.resample(sampling_rate, variables, force_dense=True, in_place=False).values()) return super(BIDSRunVariableCollection, self).to_df(variables, format, **kwargs)
Merge columns into a single pandas DataFrame. Args: variables (list): Optional list of variable names to retain; if None, all variables are written out. format (str): Whether to return a DataFrame in 'wide' or 'long' format. In 'wide' format, each row is defined by a unique onset/duration, and each variable is in a separate column. In 'long' format, each row is a unique combination of onset, duration, and variable name, and a single 'amplitude' column provides the value. sparse (bool): If True, variables will be kept in a sparse format provided they are all internally represented as such. If False, a dense matrix (i.e., uniform sampling rate for all events) will be exported. Will be ignored if at least one variable is dense. sampling_rate (float): If a dense matrix is written out, the sampling rate (in Hz) to use for downsampling. Defaults to the value currently set in the instance. kwargs: Optional keyword arguments to pass onto each Variable's to_df() call (e.g., condition, entities, and timing). include_sparse (bool): Whether or not to include sparse Variables. include_dense (bool): Whether or not to include dense Variables. Returns: A pandas DataFrame.
def input_object(prompt_text, cast = None, default = None, prompt_ext = ': ', castarg = [], castkwarg = {}): """Gets input from the command line and validates it. prompt_text A string. Used to prompt the user. Do not include a trailing space. prompt_ext Added on to the prompt at the end. At the moment this must not include any control stuff because it is send directly to raw_input cast This can be any callable object (class, function, type, etc). It simply calls the cast with the given arguements and returns the result. If a ValueError is raised, it will output an error message and prompt the user again. Because some builtin python objects don't do casting in the way that we might like you can easily write a wrapper function that looks and the input and returns the appropriate object or exception. Look in the cast submodule for examples. If cast is None, then it will do nothing (and you will have a string) default function returns this value if the user types nothing in. This is can be used to cancel the input so-to-speek castarg, castkwarg list and dictionary. Extra arguments passed on to the cast. """ while True: stdout.write(prompt_text) value = stdout.raw_input(prompt_ext) if value == '': return default try: if cast != None: value = cast(value, *castarg, **castkwarg) except ValueError as details: if cast in NICE_INPUT_ERRORS: # see comment above this constant stderr.write(ERROR_MESSAGE % (NICE_INPUT_ERRORS[cast] % details)) else: stderr.write(ERROR_MESSAGE % (DEFAULT_INPUT_ERRORS % str(details))) continue return value
Gets input from the command line and validates it. prompt_text A string. Used to prompt the user. Do not include a trailing space. prompt_ext Added on to the prompt at the end. At the moment this must not include any control stuff because it is send directly to raw_input cast This can be any callable object (class, function, type, etc). It simply calls the cast with the given arguements and returns the result. If a ValueError is raised, it will output an error message and prompt the user again. Because some builtin python objects don't do casting in the way that we might like you can easily write a wrapper function that looks and the input and returns the appropriate object or exception. Look in the cast submodule for examples. If cast is None, then it will do nothing (and you will have a string) default function returns this value if the user types nothing in. This is can be used to cancel the input so-to-speek castarg, castkwarg list and dictionary. Extra arguments passed on to the cast.
def commit_index(self, message): """ Commit the current index. :param message: str :return: str the generated commit sha """ tree_id = self.write_tree() args = ['commit-tree', tree_id, '-p', self.ref_head] # todo, this can end in a race-condition with other processes adding commits commit = self.command_exec(args, message)[0].decode('utf-8').strip() self.command_exec(['update-ref', self.ref_head, commit]) return commit
Commit the current index. :param message: str :return: str the generated commit sha
def _oval_string(self, p1, p2, p3, p4): """Return /AP string defining an oval within a 4-polygon provided as points """ def bezier(p, q, r): f = "%f %f %f %f %f %f c\n" return f % (p.x, p.y, q.x, q.y, r.x, r.y) kappa = 0.55228474983 # magic number ml = p1 + (p4 - p1) * 0.5 # middle points ... mo = p1 + (p2 - p1) * 0.5 # for each ... mr = p2 + (p3 - p2) * 0.5 # polygon ... mu = p4 + (p3 - p4) * 0.5 # side ol1 = ml + (p1 - ml) * kappa # the 8 bezier ol2 = mo + (p1 - mo) * kappa # helper points or1 = mo + (p2 - mo) * kappa or2 = mr + (p2 - mr) * kappa ur1 = mr + (p3 - mr) * kappa ur2 = mu + (p3 - mu) * kappa ul1 = mu + (p4 - mu) * kappa ul2 = ml + (p4 - ml) * kappa # now draw, starting from middle point of left side ap = "%f %f m\n" % (ml.x, ml.y) ap += bezier(ol1, ol2, mo) ap += bezier(or1, or2, mr) ap += bezier(ur1, ur2, mu) ap += bezier(ul1, ul2, ml) return ap
Return /AP string defining an oval within a 4-polygon provided as points
def validiate_webhook_signature(self, webhook, signature): """Validates a webhook signature from a webhook body + client secret Parameters webhook (string) The request body of the webhook. signature (string) The webhook signature specified in X-Uber-Signature header. """ digester = hmac.new(self.session.oauth2credential.client_secret, webhook, hashlib.sha256 ) return (signature == digester.hexdigest())
Validates a webhook signature from a webhook body + client secret Parameters webhook (string) The request body of the webhook. signature (string) The webhook signature specified in X-Uber-Signature header.
def prettyPrintPacket(pkt): """ not done """ s = 'packet ID: {} instr: {} len: {}'.format(pkt[4], pkt[7], int((pkt[6] << 8) + pkt[5])) if len(s) > 10: params = pkt[8:-2] s += ' params: {}'.format(params) return s
not done
def Get(self,key): """Get alert by providing name, ID, or other unique key. If key is not unique and finds multiple matches only the first will be returned """ for alert in self.alerts: if alert.id == key: return(alert) elif alert.name == key: return(alert)
Get alert by providing name, ID, or other unique key. If key is not unique and finds multiple matches only the first will be returned
def _render(self): """ Render the text. Avoid using this fonction too many time as it is slow as it is low to render text and blit it. """ self._last_text = self.text self._surface = self.font.render(self.text, True, self.color, self.bg_color) rect = self._surface.get_rect() self.size = rect.size
Render the text. Avoid using this fonction too many time as it is slow as it is low to render text and blit it.
def upload(request): """ Displays an upload form Creates upload url and token from youtube api and uses them on the form """ # Get the optional parameters title = request.GET.get("title", "%s's video on %s" % ( request.user.username, request.get_host())) description = request.GET.get("description", "") keywords = request.GET.get("keywords", "") # Try to create post_url and token to create an upload form try: api = Api() # upload method needs authentication api.authenticate() # Customize following line to your needs, you can add description, keywords or developer_keys # I prefer to update video information after upload finishes data = api.upload(title, description=description, keywords=keywords, access_control=AccessControl.Unlisted) except ApiError as e: # An api error happened, redirect to homepage messages.add_message(request, messages.ERROR, e.message) return HttpResponseRedirect("/") except: # An error happened, redirect to homepage messages.add_message(request, messages.ERROR, _( 'An error occurred during the upload, Please try again.')) return HttpResponseRedirect("/") # Create the form instance form = YoutubeUploadForm(initial={"token": data["youtube_token"]}) protocol = 'https' if request.is_secure() else 'http' next_url = '%s://%s%s/' % (protocol, request.get_host(), reverse("django_youtube.views.upload_return")) return render_to_response( "django_youtube/upload.html", {"form": form, "post_url": data["post_url"], "next_url": next_url}, context_instance=RequestContext(request) )
Displays an upload form Creates upload url and token from youtube api and uses them on the form
def namedb_get_name_preorder( db, preorder_hash, current_block ): """ Get a (singular) name preorder record outstanding at the given block, given the preorder hash. NOTE: returns expired preorders. Return the preorder record on success. Return None if not found. """ select_query = "SELECT * FROM preorders WHERE preorder_hash = ? AND op = ? AND block_number < ?;" args = (preorder_hash, NAME_PREORDER, current_block + NAME_PREORDER_EXPIRE) cur = db.cursor() preorder_rows = namedb_query_execute( cur, select_query, args ) preorder_row = preorder_rows.fetchone() if preorder_row is None: # no such preorder return None preorder_rec = {} preorder_rec.update( preorder_row ) unexpired_query, unexpired_args = namedb_select_where_unexpired_names( current_block ) # make sure that the name doesn't already exist select_query = "SELECT name_records.preorder_hash " + \ "FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id " + \ "WHERE name_records.preorder_hash = ? AND " + \ unexpired_query + ";" args = (preorder_hash,) + unexpired_args cur = db.cursor() nm_rows = namedb_query_execute( cur, select_query, args ) nm_row = nm_rows.fetchone() if nm_row is not None: # name with this preorder exists return None return preorder_rec
Get a (singular) name preorder record outstanding at the given block, given the preorder hash. NOTE: returns expired preorders. Return the preorder record on success. Return None if not found.
def get_votes(self): """ Get all votes for this election. """ candidate_elections = CandidateElection.objects.filter(election=self) votes = None for ce in candidate_elections: votes = votes | ce.votes.all() return votes
Get all votes for this election.
def save(self): """ Saves or updates the current tailored audience permission. """ if self.id: method = 'put' resource = self.RESOURCE.format( account_id=self.account.id, tailored_audience_id=self.tailored_audience_id, id=self.id) else: method = 'post' resource = self.RESOURCE_COLLECTION.format( account_id=self.account.id, tailored_audience_id=self.tailored_audience_id) response = Request( self.account.client, method, resource, params=self.to_params()).perform() return self.from_response(response.body['data'])
Saves or updates the current tailored audience permission.
def codemirror_html(self, config_name, varname, element_id): """ Render HTML for a CodeMirror instance. Since a CodeMirror instance have to be attached to a HTML element, this method requires a HTML element identifier with or without the ``#`` prefix, it depends from template in ``settings.CODEMIRROR_FIELD_INIT_JS`` (default one require to not prefix with ``#``). Arguments: config_name (string): A registred config name. varname (string): A Javascript variable name. element_id (string): An HTML element identifier (without leading ``#``) to attach to a CodeMirror instance. Returns: string: HTML to instanciate CodeMirror for a field input. """ parameters = json.dumps(self.get_codemirror_parameters(config_name), sort_keys=True) return settings.CODEMIRROR_FIELD_INIT_JS.format( varname=varname, inputid=element_id, settings=parameters, )
Render HTML for a CodeMirror instance. Since a CodeMirror instance have to be attached to a HTML element, this method requires a HTML element identifier with or without the ``#`` prefix, it depends from template in ``settings.CODEMIRROR_FIELD_INIT_JS`` (default one require to not prefix with ``#``). Arguments: config_name (string): A registred config name. varname (string): A Javascript variable name. element_id (string): An HTML element identifier (without leading ``#``) to attach to a CodeMirror instance. Returns: string: HTML to instanciate CodeMirror for a field input.
def _filter_child_model_fields(cls, fields): """ Keep only related model fields. Example: Inherited models: A -> B -> C B has one-to-many relationship to BMany. after inspection BMany would have links to B and C. Keep only B. Parent model A could not be used (It would not be in fields) :param list fields: model fields. :return list fields: filtered fields. """ indexes_to_remove = set([]) for index1, field1 in enumerate(fields): for index2, field2 in enumerate(fields): if index1 < index2 and index1 not in indexes_to_remove and\ index2 not in indexes_to_remove: if issubclass(field1.related_model, field2.related_model): indexes_to_remove.add(index1) if issubclass(field2.related_model, field1.related_model): indexes_to_remove.add(index2) fields = [field for index, field in enumerate(fields) if index not in indexes_to_remove] return fields
Keep only related model fields. Example: Inherited models: A -> B -> C B has one-to-many relationship to BMany. after inspection BMany would have links to B and C. Keep only B. Parent model A could not be used (It would not be in fields) :param list fields: model fields. :return list fields: filtered fields.
def make_selector(value): '''Create a selector callable from the supplied value. Args: value: If is a callable, then returned unchanged. If a string is used then create an attribute selector. If in an integer is used then create a key selector. Returns: A callable selector based on the supplied value. Raises: ValueError: If a selector cannot be created from the value. ''' if is_callable(value): return value if is_string(value): return a_(value) raise ValueError("Unable to create callable selector from '{0}'".format(value))
Create a selector callable from the supplied value. Args: value: If is a callable, then returned unchanged. If a string is used then create an attribute selector. If in an integer is used then create a key selector. Returns: A callable selector based on the supplied value. Raises: ValueError: If a selector cannot be created from the value.
def _log(self, monitors, iteration, label='', suffix=''): '''Log the state of the optimizer on the console. Parameters ---------- monitors : OrderedDict A dictionary of monitor names mapped to values. These names and values are what is being logged. iteration : int Optimization iteration that we are logging. label : str, optional A label for the name of the optimizer creating the log line. Defaults to the name of the current class. suffix : str, optional A suffix to add to the end of the log line, if any. ''' label = label or self.__class__.__name__ fields = (('{}={:.6f}').format(k, v) for k, v in monitors.items()) util.log('{} {} {}{}'.format(label, iteration, ' '.join(fields), suffix))
Log the state of the optimizer on the console. Parameters ---------- monitors : OrderedDict A dictionary of monitor names mapped to values. These names and values are what is being logged. iteration : int Optimization iteration that we are logging. label : str, optional A label for the name of the optimizer creating the log line. Defaults to the name of the current class. suffix : str, optional A suffix to add to the end of the log line, if any.
def load_text_file(self, filename, encoding="utf-8", tokenizer=None): """ Load in a text file from which to generate a word frequency list Args: filename (str): The filepath to the text file to be loaded encoding (str): The encoding of the text file tokenizer (function): The function to use to tokenize a string """ with load_file(filename, encoding=encoding) as data: self.load_text(data, tokenizer)
Load in a text file from which to generate a word frequency list Args: filename (str): The filepath to the text file to be loaded encoding (str): The encoding of the text file tokenizer (function): The function to use to tokenize a string
def frames(self, key=None, orig_order=False): """Returns a list of frames in this tag. If KEY is None, returns all frames in the tag; otherwise returns all frames whose frameid matches KEY. If ORIG_ORDER is True, then the frames are returned in their original order. Otherwise the frames are sorted in canonical order according to the frame_order field of this tag. """ if key is not None: # If there are multiple frames, then they are already in original order. key = self._normalize_key(key) if len(self._frames[key]) == 0: raise KeyError("Key not found: " + repr(key)) return self._frames[key] frames = [] for frameid in self._frames.keys(): for frame in self._frames[frameid]: frames.append(frame) if orig_order: key = (lambda frame: (0, frame.frameno) if frame.frameno is not None else (1,)) else: key = self.frame_order.key frames.sort(key=key) return frames
Returns a list of frames in this tag. If KEY is None, returns all frames in the tag; otherwise returns all frames whose frameid matches KEY. If ORIG_ORDER is True, then the frames are returned in their original order. Otherwise the frames are sorted in canonical order according to the frame_order field of this tag.
def get_fmt_v4(data_type, size, channel_type=v4c.CHANNEL_TYPE_VALUE): """convert mdf version 4 channel data type to numpy dtype format string Parameters ---------- data_type : int mdf channel data type size : int data bit size channel_type: int mdf channel type Returns ------- fmt : str numpy compatible data type format string """ if data_type in v4c.NON_SCALAR_TYPES: size = size // 8 if data_type == v4c.DATA_TYPE_BYTEARRAY: if channel_type == v4c.CHANNEL_TYPE_VALUE: fmt = f"({size},)u1" else: if size == 4: fmt = "<u4" elif size == 8: fmt = "<u8" elif data_type in v4c.STRING_TYPES: if channel_type == v4c.CHANNEL_TYPE_VALUE: fmt = f"S{size}" else: if size == 4: fmt = "<u4" elif size == 8: fmt = "<u8" elif data_type == v4c.DATA_TYPE_CANOPEN_DATE: fmt = "V7" elif data_type == v4c.DATA_TYPE_CANOPEN_TIME: fmt = "V6" else: if size <= 8: size = 1 elif size <= 16: size = 2 elif size <= 32: size = 4 elif size <= 64: size = 8 else: size = size // 8 if data_type == v4c.DATA_TYPE_UNSIGNED_INTEL: fmt = f"<u{size}" elif data_type == v4c.DATA_TYPE_UNSIGNED_MOTOROLA: fmt = f">u{size}" elif data_type == v4c.DATA_TYPE_SIGNED_INTEL: fmt = f"<i{size}" elif data_type == v4c.DATA_TYPE_SIGNED_MOTOROLA: fmt = f">i{size}" elif data_type == v4c.DATA_TYPE_REAL_INTEL: fmt = f"<f{size}" elif data_type == v4c.DATA_TYPE_REAL_MOTOROLA: fmt = f">f{size}" return fmt
convert mdf version 4 channel data type to numpy dtype format string Parameters ---------- data_type : int mdf channel data type size : int data bit size channel_type: int mdf channel type Returns ------- fmt : str numpy compatible data type format string
def crypt(word, salt=None, rounds=_ROUNDS_DEFAULT): """Return a string representing the one-way hash of a password, with a salt prepended. If ``salt`` is not specified or is ``None``, the strongest available method will be selected and a salt generated. Otherwise, ``salt`` may be one of the ``crypt.METHOD_*`` values, or a string as returned by ``crypt.mksalt()``. """ if salt is None or isinstance(salt, _Method): salt = mksalt(salt, rounds) algo, rounds, salt = extract_components_from_salt(salt) if algo == 5: hashfunc = hashlib.sha256 elif algo == 6: hashfunc = hashlib.sha512 else: raise ValueError('Unsupported algorithm, must be either 5 (sha256) or 6 (sha512)') return sha2_crypt(word, salt, hashfunc, rounds)
Return a string representing the one-way hash of a password, with a salt prepended. If ``salt`` is not specified or is ``None``, the strongest available method will be selected and a salt generated. Otherwise, ``salt`` may be one of the ``crypt.METHOD_*`` values, or a string as returned by ``crypt.mksalt()``.
def match(self, environ): ''' Return a (target, url_agrs) tuple or raise HTTPError(404/405). ''' targets, urlargs = self._match_path(environ) if not targets: raise HTTPError(404, "Not found: " + repr(environ['PATH_INFO'])) method = environ['REQUEST_METHOD'].upper() if method in targets: return targets[method], urlargs if method == 'HEAD' and 'GET' in targets: return targets['GET'], urlargs if 'ANY' in targets: return targets['ANY'], urlargs allowed = [verb for verb in targets if verb != 'ANY'] if 'GET' in allowed and 'HEAD' not in allowed: allowed.append('HEAD') raise HTTPError(405, "Method not allowed.", header=[('Allow',",".join(allowed))])
Return a (target, url_agrs) tuple or raise HTTPError(404/405).
def install_python_module(name): """ instals a python module using pip """ with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=False, capture=True): run('pip --quiet install %s' % name)
instals a python module using pip
def _get_plugin_stats(self, name): ''' Used for getting stats for Plugin based stuff, like Kafka Monitor and Redis Monitor @param name: the main class stats name @return: A formatted dict of stats ''' the_dict = {} keys = self.redis_conn.keys('stats:{n}:*'.format(n=name)) for key in keys: # break down key elements = key.split(":") main = elements[2] end = elements[3] if main == 'total' or main == 'fail': if main not in the_dict: the_dict[main] = {} the_dict[main][end] = self._get_key_value(key, end == 'lifetime') elif main == 'self': if 'nodes' not in the_dict: # main is self, end is machine, true_tail is uuid the_dict['nodes'] = {} true_tail = elements[4] if end not in the_dict['nodes']: the_dict['nodes'][end] = [] the_dict['nodes'][end].append(true_tail) else: if 'plugins' not in the_dict: the_dict['plugins'] = {} if main not in the_dict['plugins']: the_dict['plugins'][main] = {} the_dict['plugins'][main][end] = self._get_key_value(key, end == 'lifetime') return the_dict
Used for getting stats for Plugin based stuff, like Kafka Monitor and Redis Monitor @param name: the main class stats name @return: A formatted dict of stats
def issue(self, CorpNum, MgtKey, Memo=None, UserID=None): """ 발행 args CorpNum : 팝빌회원 사업자번호 MgtKey : 원본 현금영수증 문서관리번호 Memo : 발행 메모 UserID : 팝빌회원 아이디 return 처리결과. consist of code and message raise PopbillException """ if MgtKey == None or MgtKey == "": raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.") postData = "" req = {} if Memo != None or Memo != '': req["memo"] = Memo postData = self._stringtify(req) return self._httppost('/Cashbill/' + MgtKey, postData, CorpNum, UserID, "ISSUE")
발행 args CorpNum : 팝빌회원 사업자번호 MgtKey : 원본 현금영수증 문서관리번호 Memo : 발행 메모 UserID : 팝빌회원 아이디 return 처리결과. consist of code and message raise PopbillException
def account_weight(self, account): """ Returns the voting weight for **account** :param account: Account to get voting weight for :type account: str :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.account_weight( ... account="xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000000" ... ) 10000 """ account = self._process_value(account, 'account') payload = {"account": account} resp = self.call('account_weight', payload) return int(resp['weight'])
Returns the voting weight for **account** :param account: Account to get voting weight for :type account: str :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.account_weight( ... account="xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000000" ... ) 10000
def _add_log_handler( handler, level=None, fmt=None, datefmt=None, propagate=None): """ Add a logging handler to Orca. Parameters ---------- handler : logging.Handler subclass level : int, optional An optional logging level that will apply only to this stream handler. fmt : str, optional An optional format string that will be used for the log messages. datefmt : str, optional An optional format string for formatting dates in the log messages. propagate : bool, optional Whether the Orca logger should propagate. If None the propagation will not be modified, otherwise it will be set to this value. """ if not fmt: fmt = US_LOG_FMT if not datefmt: datefmt = US_LOG_DATE_FMT handler.setFormatter(logging.Formatter(fmt=fmt, datefmt=datefmt)) if level is not None: handler.setLevel(level) logger = logging.getLogger('orca') logger.addHandler(handler) if propagate is not None: logger.propagate = propagate
Add a logging handler to Orca. Parameters ---------- handler : logging.Handler subclass level : int, optional An optional logging level that will apply only to this stream handler. fmt : str, optional An optional format string that will be used for the log messages. datefmt : str, optional An optional format string for formatting dates in the log messages. propagate : bool, optional Whether the Orca logger should propagate. If None the propagation will not be modified, otherwise it will be set to this value.
def extract_values(query): """ Extract values from insert or update query. Supports bulk_create """ # pylint if isinstance(query, subqueries.UpdateQuery): row = query.values return extract_values_inner(row, query) if isinstance(query, subqueries.InsertQuery): ret = [] for row in query.objs: ret.append(extract_values_inner(row, query)) return ret raise NotSupportedError
Extract values from insert or update query. Supports bulk_create
def graham(meshes, xs, ys, zs, expose_horizon=False): """ convex_graham """ distance_factor = 1.0 # TODO: make this an option (what does it even do)? # first lets handle the horizon visibilities, weights, horizon = only_horizon(meshes, xs, ys, zs) # Order the bodies from front to back. We can do this through zs (which are # the z-coordinate positions of each body in the system. # Those indices are in the same order as the meshes['comp_no'] # (needing to add 1 because comp_no starts at 1 not 0) # TODO: I don't think this whole comp_no thing is going to work with nested # triples.. may need to rethink the whole meshes.component_by_no thing front_to_back_comp_nos = np.argsort(zs)[::-1]+1 nbodies = len(front_to_back_comp_nos) for i_front in range(0, nbodies-1): # for a binary, i_front will only be 0 comp_no_front = front_to_back_comp_nos[i_front] comp_front = meshes.component_by_no(comp_no_front) mesh_front = meshes[comp_front] visibility_front = visibilities[comp_front] for i_back in range(i_front+1, nbodies): # for a binary, i_back will only be 1 comp_no_back = front_to_back_comp_nos[i_back] comp_back = meshes.component_by_no(comp_no_back) mesh_back = meshes[comp_back] visibility_back = visibilities[comp_back] # If mesh_back is entirely hidden, then we can skip any checks and # leave it hidden. Note that here we need to check visibility instead # of mesh['visibility'] or mesh_back['visibility'] since we are not # adjusting those values in memory. if np.all(visibility_back==0.0): continue # Determine a scale factor for the triangle min_size_back = mesh_back.areas.min() distance = distance_factor * 2.0/3**0.25*np.sqrt(min_size_back) # Select only those triangles that are not hidden tri_back_vis = mesh_back.vertices_per_triangle[visibility_back > 0.0].reshape(-1,9) tri_front_vis = mesh_front.vertices_per_triangle[visibility_front > 0.0].reshape(-1,9) back = np.vstack([tri_back_vis[:,0:2], tri_back_vis[:,3:5], tri_back_vis[:,6:8]]) front = np.vstack([tri_front_vis[:,0:2], tri_front_vis[:,3:5], tri_front_vis[:,6:8]]) # Star in front ---> star in back if not front.shape[0]: continue hull, inside = _graham_scan_inside_hull(front, back) hidden = inside.reshape(3,-1).all(axis=0) visible = ~(inside.reshape(3,-1).any(axis=0)) # Triangles that are partially hidden are those that are not # completely hidden, but do have at least one vertex hidden partial = ~hidden & ~visible # These returned visibilities are only from mesh_back_vis # So to apply back to our master visibilities parameter, we need # to find the correct inds. visibility_back[visibility_back > 0.0] = 1.0*visible + 0.5*partial ################################################### ## TODO: port the following code to be included ## ################################################### # It's possible that some triangles that do not have overlapping # vertices, still are partially covered (i.e. if the objects fall # in between the vertices) #visible1 = ~star1.mesh['hidden'] #backc = star1.mesh['center'][visible1,0:2] #if not len(backc): # continue # #subset_partial = closest_points(hull, backc, distance) # #if not len(subset_partial): # continue # #arg = np.where(visible1)[0][subset_partial] #star1.mesh['visible'][arg] = False #star1.mesh['partial'][arg] = True return visibilities, None, None
convex_graham
def make_unix_filename(fname): """ :param fname: the basename of a file (e.g., xxx in /zzz/yyy/xxx). :returns: a valid unix filename :rtype: string :raises: DXError if the filename is invalid on a Unix system The problem being solved here is that *fname* is a python string, it may contain characters that are invalid for a file name. We replace all the slashes with %2F. Another issue, is that the user may choose an invalid name. Since we focus on Unix systems, the only possibilies are "." and "..". """ # sanity check for filenames bad_filenames = [".", ".."] if fname in bad_filenames: raise DXError("Invalid filename {}".format(fname)) return fname.replace('/', '%2F')
:param fname: the basename of a file (e.g., xxx in /zzz/yyy/xxx). :returns: a valid unix filename :rtype: string :raises: DXError if the filename is invalid on a Unix system The problem being solved here is that *fname* is a python string, it may contain characters that are invalid for a file name. We replace all the slashes with %2F. Another issue, is that the user may choose an invalid name. Since we focus on Unix systems, the only possibilies are "." and "..".
def strip_illumina_suffix(self): '''Removes any trailing /1 or /2 off the end of the name''' if self.id.endswith('/1') or self.id.endswith('/2'): self.id = self.id[:-2]
Removes any trailing /1 or /2 off the end of the name
def _compile_tag_re(self): """ Compile regex strings from device_tag_re option and return list of compiled regex/tag pairs """ device_tag_list = [] for regex_str, tags in iteritems(self._device_tag_re): try: device_tag_list.append([re.compile(regex_str, IGNORE_CASE), [t.strip() for t in tags.split(',')]]) except TypeError: self.log.warning('{} is not a valid regular expression and will be ignored'.format(regex_str)) self._device_tag_re = device_tag_list
Compile regex strings from device_tag_re option and return list of compiled regex/tag pairs
def _recv(self): '''read some bytes into self.buf''' from . import mavutil start_time = time.time() while time.time() < start_time + self.timeout: m = self.mav.recv_match(condition='SERIAL_CONTROL.count!=0', type='SERIAL_CONTROL', blocking=False, timeout=0) if m is not None and m.count != 0: break self.mav.mav.serial_control_send(self.port, mavutil.mavlink.SERIAL_CONTROL_FLAG_EXCLUSIVE | mavutil.mavlink.SERIAL_CONTROL_FLAG_RESPOND, 0, 0, 0, [0]*70) m = self.mav.recv_match(condition='SERIAL_CONTROL.count!=0', type='SERIAL_CONTROL', blocking=True, timeout=0.01) if m is not None and m.count != 0: break if m is not None: if self._debug > 2: print(m) data = m.data[:m.count] self.buf += ''.join(str(chr(x)) for x in data)
read some bytes into self.buf
def getkeyword(self, keyword): """Get the value of a table keyword. The value of a keyword can be a: - scalar which is returned as a normal python scalar. - an array which is returned as a numpy array. - a reference to a table which is returned as a string containing its name prefixed by 'Table :'. It can be opened using the normal table constructor which will remove the prefix. - a struct which is returned as a dict. A struct is fully nestable, thus each field in the struct can have one of the values described here. Similar to method :func:`fieldnames` a keyword name can be given consisting of multiple parts separated by dots. This represents nested structs, thus gives the value of a field in a struct (in a struct, etc.). Instead of a keyword name an index can be given which returns the value of the i-th keyword. """ if isinstance(keyword, str): return self._getkeyword('', keyword, -1) else: return self._getkeyword('', '', keyword)
Get the value of a table keyword. The value of a keyword can be a: - scalar which is returned as a normal python scalar. - an array which is returned as a numpy array. - a reference to a table which is returned as a string containing its name prefixed by 'Table :'. It can be opened using the normal table constructor which will remove the prefix. - a struct which is returned as a dict. A struct is fully nestable, thus each field in the struct can have one of the values described here. Similar to method :func:`fieldnames` a keyword name can be given consisting of multiple parts separated by dots. This represents nested structs, thus gives the value of a field in a struct (in a struct, etc.). Instead of a keyword name an index can be given which returns the value of the i-th keyword.
def object_download(self, bucket, key, start_offset=0, byte_count=None): """Reads the contents of an object as text. Args: bucket: the name of the bucket containing the object. key: the key of the object to be read. start_offset: the start offset of bytes to read. byte_count: the number of bytes to read. If None, it reads to the end. Returns: The text content within the object. Raises: Exception if the object could not be read from. """ args = {'alt': 'media'} headers = {} if start_offset > 0 or byte_count is not None: header = 'bytes=%d-' % start_offset if byte_count is not None: header += '%d' % byte_count headers['Range'] = header url = Api._DOWNLOAD_ENDPOINT + (Api._OBJECT_PATH % (bucket, Api._escape_key(key))) return google.datalab.utils.Http.request(url, args=args, headers=headers, credentials=self._credentials, raw_response=True)
Reads the contents of an object as text. Args: bucket: the name of the bucket containing the object. key: the key of the object to be read. start_offset: the start offset of bytes to read. byte_count: the number of bytes to read. If None, it reads to the end. Returns: The text content within the object. Raises: Exception if the object could not be read from.
def locus_of_gene_id(self, gene_id): """ Given a gene ID returns Locus with: chromosome, start, stop, strand """ return self.db.query_locus( filter_column="gene_id", filter_value=gene_id, feature="gene")
Given a gene ID returns Locus with: chromosome, start, stop, strand
def mangle(self, name, x): """ Mangle the name by hashing the I{name} and appending I{x}. @return: the mangled name. """ h = abs(hash(name)) return '%s-%s' % (h, x)
Mangle the name by hashing the I{name} and appending I{x}. @return: the mangled name.
def Pipe(self, *sequence, **kwargs): """ `Pipe` runs any `phi.dsl.Expression`. Its highly inspired by Elixir's [|> (pipe)](https://hexdocs.pm/elixir/Kernel.html#%7C%3E/2) operator. **Arguments** * ***sequence**: any variable amount of expressions. All expressions inside of `sequence` will be composed together using `phi.dsl.Expression.Seq`. * ****kwargs**: `Pipe` forwards all `kwargs` to `phi.builder.Builder.Seq`, visit its documentation for more info. The expression Pipe(*sequence, **kwargs) is equivalent to Seq(*sequence, **kwargs)(None) Normally the first argument or `Pipe` is a value, that is reinterpreted as a `phi.dsl.Expression.Val`, therfore, the input `None` is discarded. **Examples** from phi import P def add1(x): return x + 1 def mul3(x): return x * 3 x = P.Pipe( 1, #input add1, #1 + 1 == 2 mul3 #2 * 3 == 6 ) assert x == 6 The previous using [lambdas](https://cgarciae.github.io/phi/lambdas.m.html) to create the functions from phi import P x = P.Pipe( 1, #input P + 1, #1 + 1 == 2 P * 3 #2 * 3 == 6 ) assert x == 6 **Also see** * `phi.builder.Builder.Seq` * [dsl](https://cgarciae.github.io/phi/dsl.m.html) * [Compile](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Compile) * [lambdas](https://cgarciae.github.io/phi/lambdas.m.html) """ state = kwargs.pop("refs", {}) return self.Seq(*sequence, **kwargs)(None, **state)
`Pipe` runs any `phi.dsl.Expression`. Its highly inspired by Elixir's [|> (pipe)](https://hexdocs.pm/elixir/Kernel.html#%7C%3E/2) operator. **Arguments** * ***sequence**: any variable amount of expressions. All expressions inside of `sequence` will be composed together using `phi.dsl.Expression.Seq`. * ****kwargs**: `Pipe` forwards all `kwargs` to `phi.builder.Builder.Seq`, visit its documentation for more info. The expression Pipe(*sequence, **kwargs) is equivalent to Seq(*sequence, **kwargs)(None) Normally the first argument or `Pipe` is a value, that is reinterpreted as a `phi.dsl.Expression.Val`, therfore, the input `None` is discarded. **Examples** from phi import P def add1(x): return x + 1 def mul3(x): return x * 3 x = P.Pipe( 1, #input add1, #1 + 1 == 2 mul3 #2 * 3 == 6 ) assert x == 6 The previous using [lambdas](https://cgarciae.github.io/phi/lambdas.m.html) to create the functions from phi import P x = P.Pipe( 1, #input P + 1, #1 + 1 == 2 P * 3 #2 * 3 == 6 ) assert x == 6 **Also see** * `phi.builder.Builder.Seq` * [dsl](https://cgarciae.github.io/phi/dsl.m.html) * [Compile](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Compile) * [lambdas](https://cgarciae.github.io/phi/lambdas.m.html)
def drawQuad(page, quad, color=None, fill=None, dashes=None, width=1, roundCap=False, morph=None, overlay=True): """Draw a quadrilateral. """ img = page.newShape() Q = img.drawQuad(Quad(quad)) img.finish(color=color, fill=fill, dashes=dashes, width=width, roundCap=roundCap, morph=morph) img.commit(overlay) return Q
Draw a quadrilateral.
def login_oauth2_user(valid, oauth): """Log in a user after having been verified.""" if valid: oauth.user.login_via_oauth2 = True _request_ctx_stack.top.user = oauth.user identity_changed.send(current_app._get_current_object(), identity=Identity(oauth.user.id)) return valid, oauth
Log in a user after having been verified.
def fragments_fromstring(html, no_leading_text=False, base_url=None, parser=None, **kw): """ Parses several HTML elements, returning a list of elements. The first item in the list may be a string (though leading whitespace is removed). If no_leading_text is true, then it will be an error if there is leading text, and it will always be a list of only elements. base_url will set the document's base_url attribute (and the tree's docinfo.URL) """ if parser is None: parser = html_parser # FIXME: check what happens when you give html with a body, head, etc. if isinstance(html, bytes): if not _looks_like_full_html_bytes(html): # can't use %-formatting in early Py3 versions html = ('<html><body>'.encode('ascii') + html + '</body></html>'.encode('ascii')) else: if not _looks_like_full_html_unicode(html): html = '<html><body>%s</body></html>' % html doc = document_fromstring(html, parser=parser, base_url=base_url, **kw) assert _nons(doc.tag) == 'html' bodies = [e for e in doc if _nons(e.tag) == 'body'] assert len(bodies) == 1, ("too many bodies: %r in %r" % (bodies, html)) body = bodies[0] elements = [] if no_leading_text and body.text and body.text.strip(): raise etree.ParserError( "There is leading text: %r" % body.text) if body.text and body.text.strip(): elements.append(body.text) elements.extend(body) # FIXME: removing the reference to the parent artificial document # would be nice return elements
Parses several HTML elements, returning a list of elements. The first item in the list may be a string (though leading whitespace is removed). If no_leading_text is true, then it will be an error if there is leading text, and it will always be a list of only elements. base_url will set the document's base_url attribute (and the tree's docinfo.URL)
def save(markov, fname, args): """Save a generator. Parameters ---------- markov : `markovchain.Markov` Generator to save. fname : `str` Output file path. args : `argparse.Namespace` Command arguments. """ if isinstance(markov.storage, JsonStorage): if fname is None: markov.save(sys.stdout) else: if fname.endswith('.bz2'): open_ = bz2.open else: open_ = open if args.progress: print('Saving JSON data...') with open_(fname, 'wt') as fp: markov.save(fp) else: markov.save()
Save a generator. Parameters ---------- markov : `markovchain.Markov` Generator to save. fname : `str` Output file path. args : `argparse.Namespace` Command arguments.
def make_non_negative_axis(axis, rank): """Make (possibly negatively indexed) `axis` argument non-negative.""" axis = tf.convert_to_tensor(value=axis, name="axis") rank = tf.convert_to_tensor(value=rank, name="rank") axis_ = tf.get_static_value(axis) rank_ = tf.get_static_value(rank) # Static case. if axis_ is not None and rank_ is not None: is_scalar = axis_.ndim == 0 if is_scalar: axis_ = [axis_] positive_axis = [] for a_ in axis_: if a_ < 0: positive_axis.append(rank_ + a_) else: positive_axis.append(a_) if is_scalar: positive_axis = positive_axis[0] return tf.convert_to_tensor(value=positive_axis, dtype=axis.dtype) # Dynamic case. # Unfortunately static values are lost by this tf.where. return tf.where(axis < 0, rank + axis, axis)
Make (possibly negatively indexed) `axis` argument non-negative.
def random_string(**kwargs): """ By default generates a random string of 10 chars composed of digits and ascii lowercase letters. String length and pool can be override by using kwargs. Pool must be a list of strings """ n = kwargs.get('length', 10) pool = kwargs.get('pool') or string.digits + string.ascii_lowercase return ''.join(random.SystemRandom().choice(pool) for _ in range(n))
By default generates a random string of 10 chars composed of digits and ascii lowercase letters. String length and pool can be override by using kwargs. Pool must be a list of strings
def golfclap(rest): "Clap for something" clapv = random.choice(phrases.clapvl) adv = random.choice(phrases.advl) adj = random.choice(phrases.adjl) if rest: clapee = rest.strip() karma.Karma.store.change(clapee, 1) return "/me claps %s for %s, %s %s." % (clapv, rest, adv, adj) return "/me claps %s, %s %s." % (clapv, adv, adj)
Clap for something
def currentRegion(self): """ Returns the current region based on the current cursor position. :return <XDropZoneWidget> """ pos = QtGui.QCursor.pos() pos = self.mapFromGlobal(pos) for region in self.regions(): if region.testHovered(pos): return region return None
Returns the current region based on the current cursor position. :return <XDropZoneWidget>
def busco_plot (self, lin): """ Make the HighCharts HTML for the BUSCO plot for a particular lineage """ data = {} for s_name in self.busco_data: if self.busco_data[s_name].get('lineage_dataset') == lin: data[s_name] = self.busco_data[s_name] plot_keys = ['complete_single_copy','complete_duplicated','fragmented','missing'] plot_cols = ['#7CB5EC', '#434348', '#F7A35C', '#FF3C50'] keys = OrderedDict() for k, col in zip(plot_keys, plot_cols): keys[k] = {'name': self.busco_keys[k], 'color': col} # Config for the plot config = { 'id': 'busco_plot_{}'.format(re.sub('\W+', '_', str(lin))), 'title': 'BUSCO: Assessment Results' if lin is None else 'BUSCO Assessment Results: {}'.format(lin), 'ylab': '# BUSCOs', 'cpswitch_counts_label': 'Number of BUSCOs' } return bargraph.plot(data, keys, config)
Make the HighCharts HTML for the BUSCO plot for a particular lineage
def get_qemu_version(qemu_path): """ Gets the Qemu version. :param qemu_path: path to Qemu executable. """ if sys.platform.startswith("win"): # Qemu on Windows doesn't return anything with parameter -version # look for a version number in version.txt file in the same directory instead version_file = os.path.join(os.path.dirname(qemu_path), "version.txt") if os.path.isfile(version_file): try: with open(version_file, "rb") as file: version = file.read().decode("utf-8").strip() match = re.search("[0-9\.]+", version) if match: return version except (UnicodeDecodeError, OSError) as e: log.warn("could not read {}: {}".format(version_file, e)) return "" else: try: output = yield from subprocess_check_output(qemu_path, "-version") match = re.search("version\s+([0-9a-z\-\.]+)", output) if match: version = match.group(1) return version else: raise QemuError("Could not determine the Qemu version for {}".format(qemu_path)) except subprocess.SubprocessError as e: raise QemuError("Error while looking for the Qemu version: {}".format(e))
Gets the Qemu version. :param qemu_path: path to Qemu executable.
def parse(self): """Fully parses game summary report. :returns: boolean success indicator :rtype: bool """ r = super(GameSummRep, self).parse() try: self.parse_scoring_summary() return r and False except: return False
Fully parses game summary report. :returns: boolean success indicator :rtype: bool
def save(filename=ConfigPath): """Saves this module's changed attributes to INI configuration.""" default_values = defaults() parser = configparser.RawConfigParser() parser.optionxform = str # Force case-sensitivity on names try: save_types = basestring, int, float, tuple, list, dict, type(None) for k, v in sorted(globals().items()): if not isinstance(v, save_types) or k.startswith("_") \ or default_values.get(k, parser) == v: continue # for k, v try: parser.set("DEFAULT", k, json.dumps(v)) except Exception: pass if parser.defaults(): with open(filename, "wb") as f: f.write("# %s %s configuration written on %s.\n" % (Title, Version, datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))) parser.write(f) else: # Nothing to write: delete configuration file try: os.unlink(filename) except Exception: pass except Exception: logging.warn("Error writing config to %s.", filename, exc_info=True)
Saves this module's changed attributes to INI configuration.
def __complete_imports_and_aliases( self, prefix: str, name_in_module: Optional[str] = None ) -> Iterable[str]: """Return an iterable of possible completions matching the given prefix from the list of imports and aliased imports. If name_in_module is given, further attempt to refine the list to matching names in that namespace.""" imports = self.imports aliases = lmap.map( { alias: imports.entry(import_name) for alias, import_name in self.import_aliases } ) candidates = filter( Namespace.__completion_matcher(prefix), itertools.chain(aliases, imports) ) if name_in_module is not None: for _, module in candidates: for name in module.__dict__: if name.startswith(name_in_module): yield f"{prefix}/{name}" else: for candidate_name, _ in candidates: yield f"{candidate_name}/"
Return an iterable of possible completions matching the given prefix from the list of imports and aliased imports. If name_in_module is given, further attempt to refine the list to matching names in that namespace.
def _updater_wrapper(updater): """A wrapper for the user-defined handle.""" def updater_handle(key, lhs_handle, rhs_handle, _): """ ctypes function """ lhs = _ndarray_cls(NDArrayHandle(lhs_handle)) rhs = _ndarray_cls(NDArrayHandle(rhs_handle)) updater(key, lhs, rhs) return updater_handle
A wrapper for the user-defined handle.
def _locked_refresh_doc_ids(self): """Assumes that the caller has the _index_lock ! """ d = {} for s in self._shards: for k in s.doc_index.keys(): if k in d: raise KeyError('doc "{i}" found in multiple repos'.format(i=k)) d[k] = s self._doc2shard_map = d
Assumes that the caller has the _index_lock !
def display(self, ret, indent, out, rows_key=None, labels_key=None): '''Display table(s).''' rows = [] labels = None if isinstance(ret, dict): if not rows_key or (rows_key and rows_key in list(ret.keys())): # either not looking for a specific key # either looking and found in the current root for key in sorted(ret): if rows_key and key != rows_key: continue # if searching specifics, ignore anything else val = ret[key] if not rows_key: out.append( self.ustring( indent, self.DARK_GRAY, # pylint: disable=no-member key, suffix=':' ) ) out.append( self.ustring( indent, self.DARK_GRAY, # pylint: disable=no-member '----------' ) ) if isinstance(val, (list, tuple)): rows = val if labels_key: # at the same depth labels = ret.get(labels_key) # if any out.extend(self.display_rows(rows, labels, indent)) else: self.display(val, indent + 4, out, rows_key=rows_key, labels_key=labels_key) elif rows_key: # dig deeper for key in sorted(ret): val = ret[key] self.display(val, indent, out, rows_key=rows_key, labels_key=labels_key) # same indent elif isinstance(ret, (list, tuple)): if not rows_key: rows = ret out.extend(self.display_rows(rows, labels, indent)) return out
Display table(s).
def _validate_danglers(self): """ Checks for rows that are not referenced in the the tables that should be linked stops <> stop_times using stop_I stop_times <> trips <> days, using trip_I trips <> routes, using route_I :return: """ for query, warning in zip(DANGLER_QUERIES, DANGLER_WARNINGS): dangler_count = self.gtfs.execute_custom_query(query).fetchone()[0] if dangler_count > 0: if self.verbose: print(str(dangler_count) + " " + warning) self.warnings_container.add_warning(warning, self.location, count=dangler_count)
Checks for rows that are not referenced in the the tables that should be linked stops <> stop_times using stop_I stop_times <> trips <> days, using trip_I trips <> routes, using route_I :return:
def get_network(model=None, std=0.005, disable_reinforce=False, random_glimpse=False): """ Get baseline model. Parameters: model - model path Returns: network """ network = NeuralClassifier(input_dim=28 * 28) network.stack_layer(FirstGlimpseLayer(std=std, disable_reinforce=disable_reinforce, random_glimpse=random_glimpse)) if model and os.path.exists(model): network.load_params(model) return network
Get baseline model. Parameters: model - model path Returns: network
def remove_escapes(self): """Removes everything except number and letters from string :return: All numbers and letters in string """ chars = [] i = 0 while i < len(self.string): char = self.string[i] if char == "\\": i += 1 else: chars.append(char) i += 1 return "".join(chars)
Removes everything except number and letters from string :return: All numbers and letters in string
def insert(self, context): """ Add Vagrant box to the calling user. :param resort.engine.execution.Context context: Current execution context. """ self.write([ "box", "add", "--name", context.resolve(self.__name), self.__path(context) ])
Add Vagrant box to the calling user. :param resort.engine.execution.Context context: Current execution context.
def tokenize(text, custom_dict=None): """ Tokenize given Thai text string Input ===== text: str, Thai text string custom_dict: str (or list), path to customized dictionary file It allows the function not to tokenize given dictionary wrongly. The file should contain custom words separated by line. Alternatively, you can provide list of custom words too. Output ====== tokens: list, list of tokenized words Example ======= >> deepcut.tokenize('ตัดคำได้ดีมาก') >> ['ตัดคำ','ได้','ดี','มาก'] """ global TOKENIZER if not TOKENIZER: TOKENIZER = DeepcutTokenizer() return TOKENIZER.tokenize(text, custom_dict=custom_dict)
Tokenize given Thai text string Input ===== text: str, Thai text string custom_dict: str (or list), path to customized dictionary file It allows the function not to tokenize given dictionary wrongly. The file should contain custom words separated by line. Alternatively, you can provide list of custom words too. Output ====== tokens: list, list of tokenized words Example ======= >> deepcut.tokenize('ตัดคำได้ดีมาก') >> ['ตัดคำ','ได้','ดี','มาก']
def _divide(divisor, remainder, quotient, remainders, base, precision=None): """ Given a divisor and dividend, continue until precision in is reached. :param int divisor: the divisor :param int remainder: the remainder :param int base: the base :param precision: maximum number of fractional digits to compute :type precision: int or NoneType :returns: the remainder :rtype: int ``quotient`` and ``remainders`` are set by side effects Complexity: O(precision) if precision is not None else O(divisor) """ # pylint: disable=too-many-arguments indices = itertools.count() if precision is None else range(precision) for _ in indices: if remainder == 0 or remainder in remainders: break remainders.append(remainder) (quot, rem) = divmod(remainder, divisor) quotient.append(quot) if quot > 0: remainder = rem * base else: remainder = remainder * base return remainder
Given a divisor and dividend, continue until precision in is reached. :param int divisor: the divisor :param int remainder: the remainder :param int base: the base :param precision: maximum number of fractional digits to compute :type precision: int or NoneType :returns: the remainder :rtype: int ``quotient`` and ``remainders`` are set by side effects Complexity: O(precision) if precision is not None else O(divisor)
def request_access_token(self, code, redirect_uri=None): ''' Return access token as a JsonDict: {"access_token":"your-access-token","expires":12345678,"uid":1234}, expires is represented using standard unix-epoch-time ''' redirect = redirect_uri or self._redirect_uri resp_text = _http('POST', 'https://graph.qq.com/oauth2.0/token', client_id=self._client_id, client_secret=self._client_secret, redirect_uri=redirect, code=code, grant_type='authorization_code') return self._parse_access_token(resp_text)
Return access token as a JsonDict: {"access_token":"your-access-token","expires":12345678,"uid":1234}, expires is represented using standard unix-epoch-time
def synphot(self, wlen, flam): """`wlen` and `flam` give a tabulated model spectrum in wavelength and f_λ units. We interpolate linearly over both the model and the bandpass since they're both discretely sampled. Note that quadratic interpolation is both much slower and can blow up fatally in some cases. The latter issue might have to do with really large X values that aren't zero-centered, maybe? I used to use the quadrature integrator, but Romberg doesn't issue complaints the way quadrature did. I should probably acquire some idea about what's going on under the hood. """ from scipy.interpolate import interp1d from scipy.integrate import romberg d = self._ensure_data() mflam = interp1d(wlen, flam, kind='linear', bounds_error=False, fill_value=0) mresp = interp1d(d.wlen, d.resp, kind='linear', bounds_error=False, fill_value=0) bmin = d.wlen.min() bmax = d.wlen.max() numer = romberg(lambda x: mresp(x) * mflam(x), bmin, bmax, divmax=20) denom = romberg(lambda x: mresp(x), bmin, bmax, divmax=20) return numer / denom
`wlen` and `flam` give a tabulated model spectrum in wavelength and f_λ units. We interpolate linearly over both the model and the bandpass since they're both discretely sampled. Note that quadratic interpolation is both much slower and can blow up fatally in some cases. The latter issue might have to do with really large X values that aren't zero-centered, maybe? I used to use the quadrature integrator, but Romberg doesn't issue complaints the way quadrature did. I should probably acquire some idea about what's going on under the hood.
def pyx2obj(pyxpath, objpath=None, interm_c_dir=None, cwd=None, logger=None, full_module_name=None, only_update=False, metadir=None, include_numpy=False, include_dirs=None, cy_kwargs=None, gdb=False, cplus=None, **kwargs): """ Convenience function If cwd is specified, pyxpath and dst are taken to be relative If only_update is set to `True` the modification time is checked and compilation is only run if the source is newer than the destination Parameters ---------- pyxpath: path string path to Cython source file objpath: path string (optional) path to object file to generate interm_c_dir: path string (optional) directory to put generated C file. cwd: path string (optional) working directory and root of relative paths logger: logging.Logger (optional) passed onto `simple_cythonize` and `src2obj` full_module_name: string (optional) passed onto `simple_cythonize` only_update: bool (optional) passed onto `simple_cythonize` and `src2obj` metadir: path string (optional) passed onto src2obj include_numpy: bool (optional) Add numpy include directory to include_dirs. default: False include_dirs: iterable of path strings (optional) Passed onto src2obj and via cy_kwargs['include_path'] to simple_cythonize. cy_kwargs: dict (optional) keyword arguments passed onto `simple_cythonize` gdb: bool (optional) convenience: cy_kwargs['gdb_debug'] is set True if gdb=True, default: False cplus: bool (optional) Indicate whether C++ is used. default: auto-detect using `pyx_is_cplus` **kwargs: dict keyword arguments passed onto src2obj Returns ------- Absolute path of generated object file. """ assert pyxpath.endswith('.pyx') cwd = cwd or '.' objpath = objpath or '.' interm_c_dir = interm_c_dir or os.path.dirname(objpath) abs_objpath = get_abspath(objpath, cwd=cwd) if os.path.isdir(abs_objpath): pyx_fname = os.path.basename(pyxpath) name, ext = os.path.splitext(pyx_fname) objpath = os.path.join(objpath, name+objext) cy_kwargs = cy_kwargs or {} cy_kwargs['output_dir'] = cwd if cplus is None: cplus = pyx_is_cplus(pyxpath) cy_kwargs['cplus'] = cplus if gdb: cy_kwargs['gdb_debug'] = True if include_dirs: cy_kwargs['include_path'] = include_dirs interm_c_file = simple_cythonize( pyxpath, destdir=interm_c_dir, cwd=cwd, logger=logger, full_module_name=full_module_name, only_update=only_update, **cy_kwargs) include_dirs = include_dirs or [] if include_numpy: import numpy numpy_inc_dir = numpy.get_include() if numpy_inc_dir not in include_dirs: include_dirs.append(numpy_inc_dir) flags = kwargs.pop('flags', []) needed_flags = ('-fwrapv', '-pthread') if not cplus: needed_flags += ('-Wstrict-prototypes',) # not really needed.. for flag in needed_flags: if flag not in flags: flags.append(flag) options = kwargs.pop('options', []) if kwargs.pop('strict_aliasing', False): raise CompilationError("Cython req. strict aliasing to be disabled.") if 'pic' not in options: options.append('pic') if 'warn' not in options: options.append('warn') # Let's be explicit about standard if cplus: std = kwargs.pop('std', 'c++98') else: std = kwargs.pop('std', 'c99') return src2obj( interm_c_file, objpath=objpath, cwd=cwd, only_update=only_update, metadir=metadir, include_dirs=include_dirs, flags=flags, std=std, options=options, logger=logger, inc_py=True, strict_aliasing=False, **kwargs)
Convenience function If cwd is specified, pyxpath and dst are taken to be relative If only_update is set to `True` the modification time is checked and compilation is only run if the source is newer than the destination Parameters ---------- pyxpath: path string path to Cython source file objpath: path string (optional) path to object file to generate interm_c_dir: path string (optional) directory to put generated C file. cwd: path string (optional) working directory and root of relative paths logger: logging.Logger (optional) passed onto `simple_cythonize` and `src2obj` full_module_name: string (optional) passed onto `simple_cythonize` only_update: bool (optional) passed onto `simple_cythonize` and `src2obj` metadir: path string (optional) passed onto src2obj include_numpy: bool (optional) Add numpy include directory to include_dirs. default: False include_dirs: iterable of path strings (optional) Passed onto src2obj and via cy_kwargs['include_path'] to simple_cythonize. cy_kwargs: dict (optional) keyword arguments passed onto `simple_cythonize` gdb: bool (optional) convenience: cy_kwargs['gdb_debug'] is set True if gdb=True, default: False cplus: bool (optional) Indicate whether C++ is used. default: auto-detect using `pyx_is_cplus` **kwargs: dict keyword arguments passed onto src2obj Returns ------- Absolute path of generated object file.
def format_all(format_string, env): """ Format the input string using each possible combination of lists in the provided environment. Returns a list of formated strings. """ prepared_env = parse_pattern(format_string, env, lambda x, y: [FormatWrapper(x, z) for z in y]) # Generate each possible combination, format the string with it and yield # the resulting string: for field_values in product(*prepared_env.itervalues()): format_env = dict(izip(prepared_env.iterkeys(), field_values)) yield format_string.format(**format_env)
Format the input string using each possible combination of lists in the provided environment. Returns a list of formated strings.
def get_holiday_label(self, day): """Return the label of the holiday, if the date is a holiday""" day = cleaned_date(day) return {day: label for day, label in self.holidays(day.year) }.get(day)
Return the label of the holiday, if the date is a holiday
def genrows(cursor: Cursor, arraysize: int = 1000) \ -> Generator[List[Any], None, None]: """ Generate all rows from a cursor. Args: cursor: the cursor arraysize: split fetches into chunks of this many records Yields: each row """ # http://code.activestate.com/recipes/137270-use-generators-for-fetching-large-db-record-sets/ # noqa while True: results = cursor.fetchmany(arraysize) if not results: break for result in results: yield result
Generate all rows from a cursor. Args: cursor: the cursor arraysize: split fetches into chunks of this many records Yields: each row
def create_dataclass_loader(cls, registry, field_getters): """create a loader for a dataclass type""" fields = cls.__dataclass_fields__ item_loaders = map(registry, map(attrgetter('type'), fields.values())) getters = map(field_getters.__getitem__, fields) loaders = list(starmap(compose, zip(item_loaders, getters))) def dloader(obj): return cls(*(g(obj) for g in loaders)) return dloader
create a loader for a dataclass type
def set_log_format(self, log_type, log_format): '''Configures log format Arguments: log_type (:obj:`str`): log type (error, debug or stream) log_format (:obj:`str`): log format (ex:"Log: %(message)s | Log level:%(levelname)s | Date:%(asctime)s',datefmt='%m/%d/%Y %I:%M:%S") ''' if not (log_type == 'error' or log_type == 'stream' or log_type == 'debug'): self.log.debug('Log type must be error, stream, or debug') else: self.default_formatter = logging.Formatter(log_format) if log_type == 'error': self.error_handler.setFormatter(self.default_formatter) elif log_type == 'debug': self.debug_handler.setFormatter(self.default_formatter) elif log_type == 'stream': self.stream_handler.setFormatter(self.default_formatter)
Configures log format Arguments: log_type (:obj:`str`): log type (error, debug or stream) log_format (:obj:`str`): log format (ex:"Log: %(message)s | Log level:%(levelname)s | Date:%(asctime)s',datefmt='%m/%d/%Y %I:%M:%S")
def get_type_len(self): """Retrieve the type and length for a data record.""" # Check types and set type/len self.get_sql() return self.type, self.len, self.len_decimal
Retrieve the type and length for a data record.