Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
382,900
def process_rpc(self, rpc): p = "/nc:rpc/" + self.qname(rpc) tmpl = self.xsl_template(p) inp = rpc.search_one("input") if inp is not None: ct = self.xsl_calltemplate("rpc-input", tmpl) self.xsl_withparam("nsid", rpc.i_module.i_modulename + ":", ct) self.process_children(inp, p, 2) outp = rpc.search_one("output") if outp is not None: self.process_children(outp, "/nc:rpc-reply", 1)
Process input and output parts of `rpc`.
382,901
def file_link(self, instance): sfile = instance.file_upload if not sfile: return mark_safe() else: return mark_safe( % (sfile.get_absolute_url(), sfile.basename(), sfile.get_preview_url()))
Renders the link to the student upload file.
382,902
def _validate_ctypes(self, from_obj, to_obj): if from_obj: from_ctype = ContentType.objects.get_for_model(from_obj) assert from_ctype.natural_key() == self.from_content_type.natural_key(), ( % (self.name, from_ctype)) if to_obj: to_ctype = ContentType.objects.get_for_model(to_obj) assert to_ctype.natural_key() == self.to_content_type.natural_key(), ( % (self.name, to_ctype))
Asserts that the content types for the given object are valid for this relationship. If validation fails, ``AssertionError`` will be raised.
382,903
def add_media(dest, media): if django.VERSION >= (2, 2): dest._css_lists += media._css_lists dest._js_lists += media._js_lists elif django.VERSION >= (2, 0): combined = dest + media dest._css = combined._css dest._js = combined._js else: dest.add_css(media._css) dest.add_js(media._js)
Optimized version of django.forms.Media.__add__() that doesn't create new objects.
382,904
def range(cls, collection, attribute, left, right, closed, index_id, skip=None, limit=None): kwargs = { : index_id, : attribute, : left, : right, : closed, : skip, : limit, } return cls._construct_query(name=, collection=collection, multiple=True, **kwargs)
This will find all documents within a given range. In order to execute a range query, a skip-list index on the queried attribute must be present. :param collection Collection instance :param attribute The attribute path to check :param left The lower bound :param right The upper bound :param closed If true, use interval including left and right, otherwise exclude right, but include left :param index_id ID of the index which should be used for the query :param skip The number of documents to skip in the query :param limit The maximal amount of documents to return. The skip is applied before the limit restriction. :returns Document list
382,905
def get_op_version(name): * cmd = .format(name) root = _gluster_xml(cmd) if not _gluster_ok(root): return False, root.find().text result = {} for op_version in _iter(root, ): for item in op_version: if item.tag == : result = item.text elif item.tag == : for child in item: if child.tag == : result = child.text return result
.. versionadded:: 2019.2.0 Returns the glusterfs volume op-version name Name of the glusterfs volume CLI Example: .. code-block:: bash salt '*' glusterfs.get_op_version <volume>
382,906
def count_mapped_reads(self, file_name, paired_end): if file_name.endswith("bam"): return self.samtools_view(file_name, param="-c -F4") if file_name.endswith("sam"): return self.samtools_view(file_name, param="-c -F4 -S") return -1
Mapped_reads are not in fastq format, so this one doesn't need to accommodate fastq, and therefore, doesn't require a paired-end parameter because it only uses samtools view. Therefore, it's ok that it has a default parameter, since this is discarded. :param str file_name: File for which to count mapped reads. :param bool paired_end: This parameter is ignored; samtools automatically correctly responds depending on the data in the bamfile. We leave the option here just for consistency, since all the other counting functions require the parameter. This makes it easier to swap counting functions during pipeline development. :return int: Either return code from samtools view command, or -1 to indicate an error state.
382,907
def reshape(tt_array, shape, eps=1e-14, rl=1, rr=1): tt1 = _cp.deepcopy(tt_array) sz = _cp.deepcopy(shape) ismatrix = False if isinstance(tt1, _matrix.matrix): d1 = tt1.tt.d d2 = sz.shape[0] ismatrix = True restn2_n = sz[:, 0] restn2_m = sz[:, 1] sz_n = _cp.copy(sz[:, 0]) sz_m = _cp.copy(sz[:, 1]) n1_n = tt1.n n1_m = tt1.m sz = _np.prod(sz, axis=1) tt1 = tt1.tt else: d1 = tt1.d d2 = len(sz) sz[0] = sz[0] * rl sz[d2 - 1] = sz[d2 - 1] * rr tt1.n[0] = tt1.n[0] * tt1.r[0] tt1.n[d1 - 1] = tt1.n[d1 - 1] * tt1.r[d1] if ismatrix: restn2_n[0] = restn2_n[0] * rl restn2_m[d2 - 1] = restn2_m[d2 - 1] * rr n1_n[0] = n1_n[0] * tt1.r[0] n1_m[d1 - 1] = n1_m[d1 - 1] * tt1.r[d1] tt1.r[0] = 1 tt1.r[d1] = 1 n1 = tt1.n assert _np.prod(n1) == _np.prod(sz), needQRs = False if d2 > d1: needQRs = True if d2 <= d1: i2 = 0 n2 = _cp.deepcopy(sz) for i1 in range(d1): if n2[i2] == 1: i2 = i2 + 1 if i2 > d2: break if n2[i2] % n1[i1] == 0: n2[i2] = n2[i2] // n1[i1] else: needQRs = True break r1 = tt1.r tt1 = tt1.to_list(tt1) if needQRs: for i in range(d1 - 1, 0, -1): cr = tt1[i] cr = _np.reshape(cr, (r1[i], n1[i] * r1[i + 1]), order=) [cr, rv] = _np.linalg.qr(cr.T) cr0 = tt1[i - 1] cr0 = _np.reshape(cr0, (r1[i - 1] * n1[i - 1], r1[i]), order=) cr0 = _np.dot(cr0, rv.T) r1[i] = cr.shape[1] cr0 = _np.reshape(cr0, (r1[i - 1], n1[i - 1], r1[i]), order=) cr = _np.reshape(cr.T, (r1[i], n1[i], r1[i + 1]), order=) tt1[i] = cr tt1[i - 1] = cr0 r2 = _np.ones(d2 + 1, dtype=_np.int32) i1 = 0 i2 = 0 core2 = _np.zeros((0)) curcr2 = 1 restn2 = sz n2 = _np.ones(d2, dtype=_np.int32) if ismatrix: n2_n = _np.ones(d2, dtype=_np.int32) n2_m = _np.ones(d2, dtype=_np.int32) while i1 < d1: curcr1 = tt1[i1] if _gcd(restn2[i2], n1[i1]) == n1[i1]: if (i1 < d1 - 1) and (needQRs): curcr1 = _np.reshape( curcr1, (r1[i1] * n1[i1], r1[i1 + 1]), order=) [curcr1, rv] = _np.linalg.qr(curcr1) curcr12 = tt1[i1 + 1] curcr12 = _np.reshape( curcr12, (r1[i1 + 1], n1[i1 + 1] * r1[i1 + 2]), order=) curcr12 = _np.dot(rv, curcr12) r1[i1 + 1] = curcr12.shape[0] tt1[i1 + 1] = _np.reshape(curcr12, (r1[i1 + 1], n1[i1 + 1], r1[i1 + 2]), order=) curcr1 = _np.reshape( curcr1, (r1[i1], n1[i1] * r1[i1 + 1]), order=) curcr2 = _np.dot(curcr2, curcr1) if ismatrix: curcr2 = _np.reshape(curcr2, (r2[i2], n2_n[i2], n2_m[i2], n1_n[ i1], n1_m[i1], r1[i1 + 1]), order=) curcr2 = _np.transpose(curcr2, [0, 1, 3, 2, 4, 5]) n2_n[i2] = n2_n[i2] * n1_n[i1] n2_m[i2] = n2_m[i2] * n1_m[i1] restn2_n[i2] = restn2_n[i2] // n1_n[i1] restn2_m[i2] = restn2_m[i2] // n1_m[i1] r2[i2 + 1] = r1[i1 + 1] n2[i2] = n2[i2] * n1[i1] restn2[i2] = restn2[i2] // n1[i1] curcr2 = _np.reshape( curcr2, (r2[i2] * n2[i2], r2[i2 + 1]), order=) i1 = i1 + 1 else: if (_gcd(restn2[i2], n1[i1]) != 1) or (restn2[i2] == 1): n12 = _gcd(restn2[i2], n1[i1]) if ismatrix: n12_n = _gcd(restn2_n[i2], n1_n[i1]) n12_m = _gcd(restn2_m[i2], n1_m[i1]) curcr1 = _np.reshape(curcr1, (r1[i1], n12_n, n1_n[i1] // n12_n, n12_m, n1_m[i1] // n12_m, r1[i1 + 1]), order=) curcr1 = _np.transpose(curcr1, [0, 1, 3, 2, 4, 5]) n2_n[i2] = n2_n[i2] * n12_n n2_m[i2] = n2_m[i2] * n12_m restn2_n[i2] = restn2_n[i2] // n12_n restn2_m[i2] = restn2_m[i2] // n12_m n1_n[i1] = n1_n[i1] // n12_n n1_m[i1] = n1_m[i1] // n12_m curcr1 = _np.reshape( curcr1, (r1[i1] * n12, (n1[i1] // n12) * r1[i1 + 1]), order=) [u, s, v] = _np.linalg.svd(curcr1, full_matrices=False) r = _my_chop2(s, eps * _np.linalg.norm(s) / (d2 - 1) ** 0.5) u = u[:, :r] v = v.T v = v[:, :r] * s[:r] u = _np.reshape(u, (r1[i1], n12 * r), order=) curcr2 = _np.dot(curcr2, u) r2[i2 + 1] = r n2[i2] = n2[i2] * n12 restn2[i2] = restn2[i2] // n12 curcr2 = _np.reshape( curcr2, (r2[i2] * n2[i2], r2[i2 + 1]), order=) r1[i1] = r n1[i1] = n1[i1] // n12 curcr1 = _np.reshape( v.T, (r1[i1], n1[i1], r1[i1 + 1]), order=) tt1[i1] = curcr1 else: i1new = i1 + 1 curcr1 = _np.reshape( curcr1, (r1[i1] * n1[i1], r1[i1 + 1]), order=) while (_gcd(restn2[i2], n1[i1]) == 1) and (i1new < d1): cr1new = tt1[i1new] cr1new = _np.reshape( cr1new, (r1[i1new], n1[i1new] * r1[i1new + 1]), order=) curcr1 = _np.dot(curcr1, cr1new) if ismatrix: curcr1 = _np.reshape(curcr1, (r1[i1], n1_n[i1], n1_m[i1], n1_n[ i1new], n1_m[i1new], r1[i1new + 1]), order=) curcr1 = _np.transpose(curcr1, [0, 1, 3, 2, 4, 5]) n1_n[i1] = n1_n[i1] * n1_n[i1new] n1_m[i1] = n1_m[i1] * n1_m[i1new] n1[i1] = n1[i1] * n1[i1new] curcr1 = _np.reshape( curcr1, (r1[i1] * n1[i1], r1[i1new + 1]), order=) i1new = i1new + 1 n1 = _np.concatenate((n1[:i1], n1[i1new:])) r1 = _np.concatenate((r1[:i1], r1[i1new:])) tt1[i] = _np.reshape( curcr1, (r1[i1], n1[i1], r1[i1new]), order=) tt1 = tt1[:i1] + tt1[i1new:] d1 = len(n1) if (restn2[i2] == 1) and ((i1 >= d1) or ((i1 < d1) and (n1[i1] != 1))): curcr2 = curcr2.flatten(order=) core2 = _np.concatenate((core2, curcr2)) i2 = i2 + 1 curcr2 = 1 while (i2 < d2): core2 = _np.concatenate((core2, _np.ones(1))) r2[i2] = 1 i2 = i2 + 1 tt2 = ones(2, 1) tt2.d = d2 tt2.n = n2 tt2.r = r2 tt2.core = core2 tt2.ps = _np.int32(_np.cumsum(_np.concatenate((_np.ones(1), r2[:-1] * n2 * r2[1:])))) tt2.n[0] = tt2.n[0] // rl tt2.n[d2 - 1] = tt2.n[d2 - 1] // rr tt2.r[0] = rl tt2.r[d2] = rr if ismatrix: ttt = eye(1, 1) ttt.n = sz_n ttt.m = sz_m ttt.tt = tt2 return ttt else: return tt2
Reshape of the TT-vector [TT1]=TT_RESHAPE(TT,SZ) reshapes TT-vector or TT-matrix into another with mode sizes SZ, accuracy 1e-14 [TT1]=TT_RESHAPE(TT,SZ,EPS) reshapes TT-vector/matrix into another with mode sizes SZ and accuracy EPS [TT1]=TT_RESHAPE(TT,SZ,EPS, RL) reshapes TT-vector/matrix into another with mode size SZ and left tail rank RL [TT1]=TT_RESHAPE(TT,SZ,EPS, RL, RR) reshapes TT-vector/matrix into another with mode size SZ and tail ranks RL*RR Reshapes TT-vector/matrix into a new one, with dimensions specified by SZ. If the i_nput is TT-matrix, SZ must have the sizes for both modes, so it is a _matrix if sizes d2-by-2. If the i_nput is TT-vector, SZ may be either a column or a row _vector.
382,908
def get_instance(self, payload): return TaskInstance(self._version, payload, assistant_sid=self._solution[], )
Build an instance of TaskInstance :param dict payload: Payload response from the API :returns: twilio.rest.autopilot.v1.assistant.task.TaskInstance :rtype: twilio.rest.autopilot.v1.assistant.task.TaskInstance
382,909
def init_app_context(): try: from invenio.base.factory import create_app app = create_app() app.test_request_context().push() app.preprocess_request() except ImportError: pass
Initialize app context for Invenio 2.x.
382,910
def detect_encoding(filename, limit_byte_check=-1): try: with open(filename, ) as input_file: encoding = _detect_encoding(input_file.readline) with open_with_encoding(filename, encoding) as input_file: input_file.read(limit_byte_check) return encoding except (LookupError, SyntaxError, UnicodeDecodeError): return
Return file encoding.
382,911
async def tuple(self, elem=None, elem_type=None, params=None): if hasattr(elem_type, ): container = elem_type() if elem is None else elem return await container.blob_serialize(self, elem=elem, elem_type=elem_type, params=params) if self.writing: return await self.dump_tuple(elem, elem_type, params) else: return await self.load_tuple(elem_type, params=params, elem=elem)
Loads/dumps tuple :return:
382,912
def _end(ins): global FLAG_end_emitted output = _16bit_oper(ins.quad[1]) output.append() output.append() if FLAG_end_emitted: return output + [ % END_LABEL] FLAG_end_emitted = True output.append( % END_LABEL) if OPTIONS.headerless.value: return output + [] output.append() output.append( % CALL_BACK) output.append() output.append() output.append() output.append() output.append() output.append() output.append() output.append() output.append( % CALL_BACK) output.append() return output
Outputs the ending sequence
382,913
def _walk_directory(root_directory): paths = [os.path.join(root, name) for root, dirs, files in os.walk(root_directory) for name in files] paths.sort() return paths
Generates the paths of all files that are ancestors of `root_directory`.
382,914
def latex_to_img(tex): with tempfile.TemporaryDirectory() as tmpdirname: with open(tmpdirname + r, ) as f: f.write(tex) os.system(r"latex {0}\tex.tex -halt-on-error -interaction=batchmode -disable-installer -aux-directory={0} " r"-output-directory={0}".format(tmpdirname)) os.system(r"dvipng -T tight -z 9 --truecolor -o {0}\tex.png {0}\tex.dvi".format(tmpdirname)) image = pygame.image.load(tmpdirname + r) return image
Return a pygame image from a latex template.
382,915
def _get_model_instance(model_cls, data): if not isinstance(data, (model_cls, dict)): raise TypeError( .format(data, model_cls)) return model_cls(**data) if isinstance(data, dict) else data
Convert dict into object of class of passed model. :param class model_cls: :param object data: :rtype DomainModel:
382,916
def host(self, value=None): if value is not None: return URL._mutate(self, host=value) return self._tuple.host
Return the host :param string value: new host string
382,917
def visit_Dict(self, node): self.generic_visit(node) if node.keys: for key, value in zip(node.keys, node.values): value_type = self.result[value] self.combine(node, key, unary_op=partial(self.builder.DictType, of_val=value_type)) else: self.result[node] = self.builder.NamedType( "pythonic::types::empty_dict")
Define set type from all elements type (or empty_dict type).
382,918
def one_hot(cls, ij, sz): if isinstance(sz, int): sz = (sz, sz) if isinstance(ij, int): ij = (ij, ij) m = np.zeros(sz) m[ij[0], ij[1]] = 1.0 return Matrix(m)
ij: postion sz: size of matrix
382,919
def vsubg(v1, v2, ndim): v1 = stypes.toDoubleVector(v1) v2 = stypes.toDoubleVector(v2) vout = stypes.emptyDoubleVector(ndim) ndim = ctypes.c_int(ndim) libspice.vsubg_c(v1, v2, ndim, vout) return stypes.cVectorToPython(vout)
Compute the difference between two double precision vectors of arbitrary dimension. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vsubg_c.html :param v1: First vector (minuend). :type v1: Array of floats :param v2: Second vector (subtrahend). :type v2: Array of floats :param ndim: Dimension of v1, v2, and vout. :type ndim: int :return: Difference vector, v1 - v2. :rtype: Array of floats
382,920
def send_raw_transaction(self, hextx, **kwargs): return self._call(JSONRPCMethods.SEND_RAW_TRANSACTION.value, [hextx, ], **kwargs)
Broadcasts a transaction over the NEO network and returns the result. :param hextx: hexadecimal string that has been serialized :type hextx: str :return: result of the transaction :rtype: bool
382,921
def sanitize_dict(input_dict): r plain_dict = dict() for key in input_dict.keys(): value = input_dict[key] if hasattr(value, ): plain_dict[key] = sanitize_dict(value) else: plain_dict[key] = value return plain_dict
r""" Given a nested dictionary, ensures that all nested dicts are normal Python dicts. This is necessary for pickling, or just converting an 'auto-vivifying' dict to something that acts normal.
382,922
def _split_regex(regex): if regex[0] == : regex = regex[1:] if regex[-1] == : regex = regex[0:-1] results = [] line = for c in regex: if c == : results.append(line) line = elif c == : line = else: line = line + c if len(line) > 0: results.append(line) return results
Return an array of the URL split at each regex match like (?P<id>[\d]+) Call with a regex of '^/foo/(?P<id>[\d]+)/bar/$' and you will receive ['/foo/', '/bar/']
382,923
def dynacRepresentation(self): details = [ self.voltage.val, self.phase.val, self.harmonicNum.val, self.apertureRadius.val, ] return [, [details]]
Return the Pynac representation of this Set4DAperture instance.
382,924
def padding_oracle_encrypt(oracle, plaintext, block_size=128, pool=None): plaintext = bytearray(plaintext) block_len = block_size // 8 padding_len = block_len - (len(plaintext) % block_len) plaintext.extend([padding_len] * padding_len) ciphertext = bytearray() chunk = bytearray(os.urandom(block_len)) ciphertext[0:0] = chunk for plain_start in range(len(plaintext) - block_len, -1, -block_len): plain = plaintext[plain_start:plain_start + block_len] chunk = ciphertext[0:0] = encrypt_block(oracle, block_len, chunk, plain, pool) return bytes(ciphertext)
Encrypt plaintext using an oracle function that returns ``True`` if the provided ciphertext is correctly PKCS#7 padded after decryption. The cipher needs to operate in CBC mode. Args: oracle(callable): The oracle function. Will be called repeatedly with a chunk of ciphertext. plaintext(bytes): The plaintext data to encrypt. block_size(int): The cipher's block size in bits. pool(multiprocessing.Pool): A multiprocessing pool to use to parallelize the encryption. This pool is used to call the oracle function. Fairly heavy due to the required inter-process state synchronization. If ``None`` (the default), no multiprocessing will be used. Returns: bytes: The encrypted data. Raises: RuntimeError: Raised if the oracle behaves unpredictable.
382,925
def plot_mag(fignum, datablock, s, num, units, norm): global globals, graphmenu Ints = [] for plotrec in datablock: Ints.append(plotrec[3]) Ints.sort() plt.figure(num=fignum) T, M, Tv, recnum = [], [], [], 0 Mex, Tex, Vdif = [], [], [] recbak = [] for rec in datablock: if rec[5] == : if units == "T": T.append(rec[0] * 1e3) Tv.append(rec[0] * 1e3) if recnum > 0: Tv.append(rec[0] * 1e3) elif units == "U": T.append(rec[0]) Tv.append(rec[0]) if recnum > 0: Tv.append(rec[0]) elif units == "K": T.append(rec[0] - 273) Tv.append(rec[0] - 273) if recnum > 0: Tv.append(rec[0] - 273) elif "T" in units and "K" in units: if rec[0] < 1.: T.append(rec[0] * 1e3) Tv.append(rec[0] * 1e3) else: T.append(rec[0] - 273) Tv.append(rec[0] - 273) if recnum > 0: Tv.append(rec[0] - 273) else: T.append(rec[0]) Tv.append(rec[0]) if recnum > 0: Tv.append(rec[0]) if norm: M.append(old_div(rec[3], Ints[-1])) else: M.append(rec[3]) if recnum > 0 and len(rec) > 0 and len(recbak) > 0: v = [] if recbak[0] != rec[0]: V0 = pmag.dir2cart([recbak[1], recbak[2], recbak[3]]) V1 = pmag.dir2cart([rec[1], rec[2], rec[3]]) for el in range(3): v.append(abs(V1[el] - V0[el])) vdir = pmag.cart2dir(v) Vdif.append(old_div(vdir[2], Ints[-1])) Vdif.append(old_div(vdir[2], Ints[-1])) recbak = [] for el in rec: recbak.append(el) delta = .005 * M[0] if num == 1: if recnum % 2 == 0: plt.text(T[-1] + delta, M[-1], ( + str(recnum)), fontsize=9) recnum += 1 else: if rec[0] < 200: Tex.append(rec[0] * 1e3) if rec[0] >= 200: Tex.append(rec[0] - 273) Mex.append(old_div(rec[3], Ints[-1])) recnum += 1 if globals != 0: globals.MTlist = T globals.MTlisty = M if len(Mex) > 0 and len(Tex) > 0: plt.scatter(Tex, Mex, marker=, color=) if len(Vdif) > 0: Vdif.append(old_div(vdir[2], Ints[-1])) Vdif.append(0) if Tv: Tv.append(Tv[-1]) plt.plot(T, M) plt.plot(T, M, ) if len(Tv) == len(Vdif) and norm: plt.plot(Tv, Vdif, ) if units == "T": plt.xlabel("Step (mT)") elif units == "K": plt.xlabel("Step (C)") elif units == "J": plt.xlabel("Step (J)") else: plt.xlabel("Step [mT,C]") if norm == 1: plt.ylabel("Fractional Magnetization") if norm == 0: plt.ylabel("Magnetization") plt.axvline(0, color=) plt.axhline(0, color=) tstring = s plt.title(tstring) plt.draw()
plots magnetization against (de)magnetizing temperature or field Parameters _________________ fignum : matplotlib figure number for plotting datablock : nested list of [step, 0, 0, magnetization, 1,quality] s : string for title num : matplotlib figure number, can set to 1 units : [T,K,U] for tesla, kelvin or arbitrary norm : [True,False] if True, normalize Effects ______ plots figure
382,926
def load_json_fixture(fixture_path: str) -> Dict[str, Any]: with open(fixture_path) as fixture_file: file_fixtures = json.load(fixture_file) return file_fixtures
Loads a fixture file, caching the most recent files it loaded.
382,927
def execute_sql( self, sql, params=None, param_types=None, query_mode=None, partition=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, ): if self._read_request_count > 0: if not self._multi_use: raise ValueError("Cannot re-use single-use snapshot.") if self._transaction_id is None: raise ValueError("Transaction ID pending.") if params is not None: if param_types is None: raise ValueError("Specify when passing .") params_pb = Struct( fields={key: _make_value_pb(value) for key, value in params.items()} ) else: params_pb = None database = self._session._database metadata = _metadata_with_prefix(database.name) transaction = self._make_txn_selector() api = database.spanner_api restart = functools.partial( api.execute_streaming_sql, self._session.name, sql, transaction=transaction, params=params_pb, param_types=param_types, query_mode=query_mode, partition_token=partition, seqno=self._execute_sql_count, metadata=metadata, retry=retry, timeout=timeout, ) iterator = _restart_on_unavailable(restart) self._read_request_count += 1 self._execute_sql_count += 1 if self._multi_use: return StreamedResultSet(iterator, source=self) else: return StreamedResultSet(iterator)
Perform an ``ExecuteStreamingSql`` API request. :type sql: str :param sql: SQL query statement :type params: dict, {str -> column value} :param params: values for parameter replacement. Keys must match the names used in ``sql``. :type param_types: dict[str -> Union[dict, .types.Type]] :param param_types: (Optional) maps explicit types for one or more param values; required if parameters are passed. :type query_mode: :class:`google.cloud.spanner_v1.proto.ExecuteSqlRequest.QueryMode` :param query_mode: Mode governing return of results / query plan. See https://cloud.google.com/spanner/reference/rpc/google.spanner.v1#google.spanner.v1.ExecuteSqlRequest.QueryMode1 :type partition: bytes :param partition: (Optional) one of the partition tokens returned from :meth:`partition_query`. :rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet` :returns: a result set instance which can be used to consume rows. :raises ValueError: for reuse of single-use snapshots, or if a transaction ID is already pending for multiple-use snapshots.
382,928
def unredirect_stdout(self): if hasattr(self, ) and hasattr(self, ): sys.stdout = self.hijacked_stdout sys.stderr = self.hijacked_stderr
Redirect stdout and stderr back to screen.
382,929
def _push_new_state(self): try: base = self._states[-1] except IndexError: graph = DirectedGraph() graph.add(None) state = State(mapping={}, graph=graph) else: state = State( mapping=base.mapping.copy(), graph=base.graph.copy(), ) self._states.append(state)
Push a new state into history. This new state will be used to hold resolution results of the next coming round.
382,930
def available_metrics(self): req = self.request(self.mist_client.uri+"/clouds/"+self.cloud.id+"/machines/"+self.id+"/metrics") metrics = req.get().json() return metrics
List all available metrics that you can add to this machine :returns: A list of dicts, each of which is a metric that you can add to a monitored machine
382,931
def average(iterator): count = 0 total = 0 for num in iterator: count += 1 total += num return float(total)/count
Iterative mean.
382,932
def to_df(self, variables=None, format=, sparse=True, sampling_rate=None, include_sparse=True, include_dense=True, **kwargs): widelongwidelongamplitudes to_df() call (e.g., condition, entities, and timing). include_sparse (bool): Whether or not to include sparse Variables. include_dense (bool): Whether or not to include dense Variables. Returns: A pandas DataFrame. t exclude both dense and sparse " "variables! That leaves nothing!") if variables is None: variables = list(self.variables.keys()) if not include_sparse: variables = [v for v in variables if isinstance(self.variables[v], DenseRunVariable)] if not include_dense: variables = [v for v in variables if not isinstance(self.variables[v], DenseRunVariable)] if not variables: return None _vars = [self.variables[v] for v in variables] if sparse and all(isinstance(v, SimpleVariable) for v in _vars): variables = _vars else: sampling_rate = sampling_rate or self.sampling_rate variables = list(self.resample(sampling_rate, variables, force_dense=True, in_place=False).values()) return super(BIDSRunVariableCollection, self).to_df(variables, format, **kwargs)
Merge columns into a single pandas DataFrame. Args: variables (list): Optional list of variable names to retain; if None, all variables are written out. format (str): Whether to return a DataFrame in 'wide' or 'long' format. In 'wide' format, each row is defined by a unique onset/duration, and each variable is in a separate column. In 'long' format, each row is a unique combination of onset, duration, and variable name, and a single 'amplitude' column provides the value. sparse (bool): If True, variables will be kept in a sparse format provided they are all internally represented as such. If False, a dense matrix (i.e., uniform sampling rate for all events) will be exported. Will be ignored if at least one variable is dense. sampling_rate (float): If a dense matrix is written out, the sampling rate (in Hz) to use for downsampling. Defaults to the value currently set in the instance. kwargs: Optional keyword arguments to pass onto each Variable's to_df() call (e.g., condition, entities, and timing). include_sparse (bool): Whether or not to include sparse Variables. include_dense (bool): Whether or not to include dense Variables. Returns: A pandas DataFrame.
382,933
def input_object(prompt_text, cast = None, default = None, prompt_ext = , castarg = [], castkwarg = {}): while True: stdout.write(prompt_text) value = stdout.raw_input(prompt_ext) if value == : return default try: if cast != None: value = cast(value, *castarg, **castkwarg) except ValueError as details: if cast in NICE_INPUT_ERRORS: stderr.write(ERROR_MESSAGE % (NICE_INPUT_ERRORS[cast] % details)) else: stderr.write(ERROR_MESSAGE % (DEFAULT_INPUT_ERRORS % str(details))) continue return value
Gets input from the command line and validates it. prompt_text A string. Used to prompt the user. Do not include a trailing space. prompt_ext Added on to the prompt at the end. At the moment this must not include any control stuff because it is send directly to raw_input cast This can be any callable object (class, function, type, etc). It simply calls the cast with the given arguements and returns the result. If a ValueError is raised, it will output an error message and prompt the user again. Because some builtin python objects don't do casting in the way that we might like you can easily write a wrapper function that looks and the input and returns the appropriate object or exception. Look in the cast submodule for examples. If cast is None, then it will do nothing (and you will have a string) default function returns this value if the user types nothing in. This is can be used to cancel the input so-to-speek castarg, castkwarg list and dictionary. Extra arguments passed on to the cast.
382,934
def commit_index(self, message): tree_id = self.write_tree() args = [, tree_id, , self.ref_head] commit = self.command_exec(args, message)[0].decode().strip() self.command_exec([, self.ref_head, commit]) return commit
Commit the current index. :param message: str :return: str the generated commit sha
382,935
def _oval_string(self, p1, p2, p3, p4): def bezier(p, q, r): f = "%f %f %f %f %f %f c\n" return f % (p.x, p.y, q.x, q.y, r.x, r.y) kappa = 0.55228474983 ml = p1 + (p4 - p1) * 0.5 mo = p1 + (p2 - p1) * 0.5 mr = p2 + (p3 - p2) * 0.5 mu = p4 + (p3 - p4) * 0.5 ol1 = ml + (p1 - ml) * kappa ol2 = mo + (p1 - mo) * kappa or1 = mo + (p2 - mo) * kappa or2 = mr + (p2 - mr) * kappa ur1 = mr + (p3 - mr) * kappa ur2 = mu + (p3 - mu) * kappa ul1 = mu + (p4 - mu) * kappa ul2 = ml + (p4 - ml) * kappa ap = "%f %f m\n" % (ml.x, ml.y) ap += bezier(ol1, ol2, mo) ap += bezier(or1, or2, mr) ap += bezier(ur1, ur2, mu) ap += bezier(ul1, ul2, ml) return ap
Return /AP string defining an oval within a 4-polygon provided as points
382,936
def validiate_webhook_signature(self, webhook, signature): digester = hmac.new(self.session.oauth2credential.client_secret, webhook, hashlib.sha256 ) return (signature == digester.hexdigest())
Validates a webhook signature from a webhook body + client secret Parameters webhook (string) The request body of the webhook. signature (string) The webhook signature specified in X-Uber-Signature header.
382,937
def prettyPrintPacket(pkt): s = .format(pkt[4], pkt[7], int((pkt[6] << 8) + pkt[5])) if len(s) > 10: params = pkt[8:-2] s += .format(params) return s
not done
382,938
def Get(self,key): for alert in self.alerts: if alert.id == key: return(alert) elif alert.name == key: return(alert)
Get alert by providing name, ID, or other unique key. If key is not unique and finds multiple matches only the first will be returned
382,939
def _render(self): self._last_text = self.text self._surface = self.font.render(self.text, True, self.color, self.bg_color) rect = self._surface.get_rect() self.size = rect.size
Render the text. Avoid using this fonction too many time as it is slow as it is low to render text and blit it.
382,940
def upload(request): title = request.GET.get("title", "%sAn error occurred during the upload, Please try again.httpshttp%s://%s%s/' % (protocol, request.get_host(), reverse("django_youtube.views.upload_return")) return render_to_response( "django_youtube/upload.html", {"form": form, "post_url": data["post_url"], "next_url": next_url}, context_instance=RequestContext(request) )
Displays an upload form Creates upload url and token from youtube api and uses them on the form
382,941
def namedb_get_name_preorder( db, preorder_hash, current_block ): select_query = "SELECT * FROM preorders WHERE preorder_hash = ? AND op = ? AND block_number < ?;" args = (preorder_hash, NAME_PREORDER, current_block + NAME_PREORDER_EXPIRE) cur = db.cursor() preorder_rows = namedb_query_execute( cur, select_query, args ) preorder_row = preorder_rows.fetchone() if preorder_row is None: return None preorder_rec = {} preorder_rec.update( preorder_row ) unexpired_query, unexpired_args = namedb_select_where_unexpired_names( current_block ) select_query = "SELECT name_records.preorder_hash " + \ "FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id " + \ "WHERE name_records.preorder_hash = ? AND " + \ unexpired_query + ";" args = (preorder_hash,) + unexpired_args cur = db.cursor() nm_rows = namedb_query_execute( cur, select_query, args ) nm_row = nm_rows.fetchone() if nm_row is not None: return None return preorder_rec
Get a (singular) name preorder record outstanding at the given block, given the preorder hash. NOTE: returns expired preorders. Return the preorder record on success. Return None if not found.
382,942
def get_votes(self): candidate_elections = CandidateElection.objects.filter(election=self) votes = None for ce in candidate_elections: votes = votes | ce.votes.all() return votes
Get all votes for this election.
382,943
def save(self): if self.id: method = resource = self.RESOURCE.format( account_id=self.account.id, tailored_audience_id=self.tailored_audience_id, id=self.id) else: method = resource = self.RESOURCE_COLLECTION.format( account_id=self.account.id, tailored_audience_id=self.tailored_audience_id) response = Request( self.account.client, method, resource, params=self.to_params()).perform() return self.from_response(response.body[])
Saves or updates the current tailored audience permission.
382,944
def codemirror_html(self, config_name, varname, element_id): parameters = json.dumps(self.get_codemirror_parameters(config_name), sort_keys=True) return settings.CODEMIRROR_FIELD_INIT_JS.format( varname=varname, inputid=element_id, settings=parameters, )
Render HTML for a CodeMirror instance. Since a CodeMirror instance have to be attached to a HTML element, this method requires a HTML element identifier with or without the ``#`` prefix, it depends from template in ``settings.CODEMIRROR_FIELD_INIT_JS`` (default one require to not prefix with ``#``). Arguments: config_name (string): A registred config name. varname (string): A Javascript variable name. element_id (string): An HTML element identifier (without leading ``#``) to attach to a CodeMirror instance. Returns: string: HTML to instanciate CodeMirror for a field input.
382,945
def _filter_child_model_fields(cls, fields): indexes_to_remove = set([]) for index1, field1 in enumerate(fields): for index2, field2 in enumerate(fields): if index1 < index2 and index1 not in indexes_to_remove and\ index2 not in indexes_to_remove: if issubclass(field1.related_model, field2.related_model): indexes_to_remove.add(index1) if issubclass(field2.related_model, field1.related_model): indexes_to_remove.add(index2) fields = [field for index, field in enumerate(fields) if index not in indexes_to_remove] return fields
Keep only related model fields. Example: Inherited models: A -> B -> C B has one-to-many relationship to BMany. after inspection BMany would have links to B and C. Keep only B. Parent model A could not be used (It would not be in fields) :param list fields: model fields. :return list fields: filtered fields.
382,946
def make_selector(value): if is_callable(value): return value if is_string(value): return a_(value) raise ValueError("Unable to create callable selector from ".format(value))
Create a selector callable from the supplied value. Args: value: If is a callable, then returned unchanged. If a string is used then create an attribute selector. If in an integer is used then create a key selector. Returns: A callable selector based on the supplied value. Raises: ValueError: If a selector cannot be created from the value.
382,947
def _log(self, monitors, iteration, label=, suffix=): label = label or self.__class__.__name__ fields = (().format(k, v) for k, v in monitors.items()) util.log(.format(label, iteration, .join(fields), suffix))
Log the state of the optimizer on the console. Parameters ---------- monitors : OrderedDict A dictionary of monitor names mapped to values. These names and values are what is being logged. iteration : int Optimization iteration that we are logging. label : str, optional A label for the name of the optimizer creating the log line. Defaults to the name of the current class. suffix : str, optional A suffix to add to the end of the log line, if any.
382,948
def load_text_file(self, filename, encoding="utf-8", tokenizer=None): with load_file(filename, encoding=encoding) as data: self.load_text(data, tokenizer)
Load in a text file from which to generate a word frequency list Args: filename (str): The filepath to the text file to be loaded encoding (str): The encoding of the text file tokenizer (function): The function to use to tokenize a string
382,949
def frames(self, key=None, orig_order=False): if key is not None: key = self._normalize_key(key) if len(self._frames[key]) == 0: raise KeyError("Key not found: " + repr(key)) return self._frames[key] frames = [] for frameid in self._frames.keys(): for frame in self._frames[frameid]: frames.append(frame) if orig_order: key = (lambda frame: (0, frame.frameno) if frame.frameno is not None else (1,)) else: key = self.frame_order.key frames.sort(key=key) return frames
Returns a list of frames in this tag. If KEY is None, returns all frames in the tag; otherwise returns all frames whose frameid matches KEY. If ORIG_ORDER is True, then the frames are returned in their original order. Otherwise the frames are sorted in canonical order according to the frame_order field of this tag.
382,950
def get_fmt_v4(data_type, size, channel_type=v4c.CHANNEL_TYPE_VALUE): if data_type in v4c.NON_SCALAR_TYPES: size = size // 8 if data_type == v4c.DATA_TYPE_BYTEARRAY: if channel_type == v4c.CHANNEL_TYPE_VALUE: fmt = f"({size},)u1" else: if size == 4: fmt = "<u4" elif size == 8: fmt = "<u8" elif data_type in v4c.STRING_TYPES: if channel_type == v4c.CHANNEL_TYPE_VALUE: fmt = f"S{size}" else: if size == 4: fmt = "<u4" elif size == 8: fmt = "<u8" elif data_type == v4c.DATA_TYPE_CANOPEN_DATE: fmt = "V7" elif data_type == v4c.DATA_TYPE_CANOPEN_TIME: fmt = "V6" else: if size <= 8: size = 1 elif size <= 16: size = 2 elif size <= 32: size = 4 elif size <= 64: size = 8 else: size = size // 8 if data_type == v4c.DATA_TYPE_UNSIGNED_INTEL: fmt = f"<u{size}" elif data_type == v4c.DATA_TYPE_UNSIGNED_MOTOROLA: fmt = f">u{size}" elif data_type == v4c.DATA_TYPE_SIGNED_INTEL: fmt = f"<i{size}" elif data_type == v4c.DATA_TYPE_SIGNED_MOTOROLA: fmt = f">i{size}" elif data_type == v4c.DATA_TYPE_REAL_INTEL: fmt = f"<f{size}" elif data_type == v4c.DATA_TYPE_REAL_MOTOROLA: fmt = f">f{size}" return fmt
convert mdf version 4 channel data type to numpy dtype format string Parameters ---------- data_type : int mdf channel data type size : int data bit size channel_type: int mdf channel type Returns ------- fmt : str numpy compatible data type format string
382,951
def crypt(word, salt=None, rounds=_ROUNDS_DEFAULT): if salt is None or isinstance(salt, _Method): salt = mksalt(salt, rounds) algo, rounds, salt = extract_components_from_salt(salt) if algo == 5: hashfunc = hashlib.sha256 elif algo == 6: hashfunc = hashlib.sha512 else: raise ValueError() return sha2_crypt(word, salt, hashfunc, rounds)
Return a string representing the one-way hash of a password, with a salt prepended. If ``salt`` is not specified or is ``None``, the strongest available method will be selected and a salt generated. Otherwise, ``salt`` may be one of the ``crypt.METHOD_*`` values, or a string as returned by ``crypt.mksalt()``.
382,952
def match(self, environ): targets, urlargs = self._match_path(environ) if not targets: raise HTTPError(404, "Not found: " + repr(environ[])) method = environ[].upper() if method in targets: return targets[method], urlargs if method == and in targets: return targets[], urlargs if in targets: return targets[], urlargs allowed = [verb for verb in targets if verb != ] if in allowed and not in allowed: allowed.append() raise HTTPError(405, "Method not allowed.", header=[(,",".join(allowed))])
Return a (target, url_agrs) tuple or raise HTTPError(404/405).
382,953
def install_python_module(name): with settings(hide(, , , ), warn_only=False, capture=True): run( % name)
instals a python module using pip
382,954
def _get_plugin_stats(self, name): the_dict = {} keys = self.redis_conn.keys(.format(n=name)) for key in keys: elements = key.split(":") main = elements[2] end = elements[3] if main == or main == : if main not in the_dict: the_dict[main] = {} the_dict[main][end] = self._get_key_value(key, end == ) elif main == : if not in the_dict: the_dict[] = {} true_tail = elements[4] if end not in the_dict[]: the_dict[][end] = [] the_dict[][end].append(true_tail) else: if not in the_dict: the_dict[] = {} if main not in the_dict[]: the_dict[][main] = {} the_dict[][main][end] = self._get_key_value(key, end == ) return the_dict
Used for getting stats for Plugin based stuff, like Kafka Monitor and Redis Monitor @param name: the main class stats name @return: A formatted dict of stats
382,955
def issue(self, CorpNum, MgtKey, Memo=None, UserID=None): if MgtKey == None or MgtKey == "": raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.") postData = "" req = {} if Memo != None or Memo != : req["memo"] = Memo postData = self._stringtify(req) return self._httppost( + MgtKey, postData, CorpNum, UserID, "ISSUE")
발행 args CorpNum : 팝빌회원 사업자번호 MgtKey : 원본 현금영수증 문서관리번호 Memo : 발행 메모 UserID : 팝빌회원 아이디 return 처리결과. consist of code and message raise PopbillException
382,956
def account_weight(self, account): account = self._process_value(account, ) payload = {"account": account} resp = self.call(, payload) return int(resp[])
Returns the voting weight for **account** :param account: Account to get voting weight for :type account: str :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.account_weight( ... account="xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000000" ... ) 10000
382,957
def _add_log_handler( handler, level=None, fmt=None, datefmt=None, propagate=None): if not fmt: fmt = US_LOG_FMT if not datefmt: datefmt = US_LOG_DATE_FMT handler.setFormatter(logging.Formatter(fmt=fmt, datefmt=datefmt)) if level is not None: handler.setLevel(level) logger = logging.getLogger() logger.addHandler(handler) if propagate is not None: logger.propagate = propagate
Add a logging handler to Orca. Parameters ---------- handler : logging.Handler subclass level : int, optional An optional logging level that will apply only to this stream handler. fmt : str, optional An optional format string that will be used for the log messages. datefmt : str, optional An optional format string for formatting dates in the log messages. propagate : bool, optional Whether the Orca logger should propagate. If None the propagation will not be modified, otherwise it will be set to this value.
382,958
def extract_values(query): if isinstance(query, subqueries.UpdateQuery): row = query.values return extract_values_inner(row, query) if isinstance(query, subqueries.InsertQuery): ret = [] for row in query.objs: ret.append(extract_values_inner(row, query)) return ret raise NotSupportedError
Extract values from insert or update query. Supports bulk_create
382,959
def graham(meshes, xs, ys, zs, expose_horizon=False): distance_factor = 1.0 visibilities, weights, horizon = only_horizon(meshes, xs, ys, zs) return visibilities, None, None
convex_graham
382,960
def make_unix_filename(fname): bad_filenames = [".", ".."] if fname in bad_filenames: raise DXError("Invalid filename {}".format(fname)) return fname.replace(, )
:param fname: the basename of a file (e.g., xxx in /zzz/yyy/xxx). :returns: a valid unix filename :rtype: string :raises: DXError if the filename is invalid on a Unix system The problem being solved here is that *fname* is a python string, it may contain characters that are invalid for a file name. We replace all the slashes with %2F. Another issue, is that the user may choose an invalid name. Since we focus on Unix systems, the only possibilies are "." and "..".
382,961
def strip_illumina_suffix(self): if self.id.endswith() or self.id.endswith(): self.id = self.id[:-2]
Removes any trailing /1 or /2 off the end of the name
382,962
def _compile_tag_re(self): device_tag_list = [] for regex_str, tags in iteritems(self._device_tag_re): try: device_tag_list.append([re.compile(regex_str, IGNORE_CASE), [t.strip() for t in tags.split()]]) except TypeError: self.log.warning(.format(regex_str)) self._device_tag_re = device_tag_list
Compile regex strings from device_tag_re option and return list of compiled regex/tag pairs
382,963
def _recv(self): from . import mavutil start_time = time.time() while time.time() < start_time + self.timeout: m = self.mav.recv_match(condition=, type=, blocking=False, timeout=0) if m is not None and m.count != 0: break self.mav.mav.serial_control_send(self.port, mavutil.mavlink.SERIAL_CONTROL_FLAG_EXCLUSIVE | mavutil.mavlink.SERIAL_CONTROL_FLAG_RESPOND, 0, 0, 0, [0]*70) m = self.mav.recv_match(condition=, type=, blocking=True, timeout=0.01) if m is not None and m.count != 0: break if m is not None: if self._debug > 2: print(m) data = m.data[:m.count] self.buf += .join(str(chr(x)) for x in data)
read some bytes into self.buf
382,964
def getkeyword(self, keyword): if isinstance(keyword, str): return self._getkeyword(, keyword, -1) else: return self._getkeyword(, , keyword)
Get the value of a table keyword. The value of a keyword can be a: - scalar which is returned as a normal python scalar. - an array which is returned as a numpy array. - a reference to a table which is returned as a string containing its name prefixed by 'Table :'. It can be opened using the normal table constructor which will remove the prefix. - a struct which is returned as a dict. A struct is fully nestable, thus each field in the struct can have one of the values described here. Similar to method :func:`fieldnames` a keyword name can be given consisting of multiple parts separated by dots. This represents nested structs, thus gives the value of a field in a struct (in a struct, etc.). Instead of a keyword name an index can be given which returns the value of the i-th keyword.
382,965
def object_download(self, bucket, key, start_offset=0, byte_count=None): args = {: } headers = {} if start_offset > 0 or byte_count is not None: header = % start_offset if byte_count is not None: header += % byte_count headers[] = header url = Api._DOWNLOAD_ENDPOINT + (Api._OBJECT_PATH % (bucket, Api._escape_key(key))) return google.datalab.utils.Http.request(url, args=args, headers=headers, credentials=self._credentials, raw_response=True)
Reads the contents of an object as text. Args: bucket: the name of the bucket containing the object. key: the key of the object to be read. start_offset: the start offset of bytes to read. byte_count: the number of bytes to read. If None, it reads to the end. Returns: The text content within the object. Raises: Exception if the object could not be read from.
382,966
def locus_of_gene_id(self, gene_id): return self.db.query_locus( filter_column="gene_id", filter_value=gene_id, feature="gene")
Given a gene ID returns Locus with: chromosome, start, stop, strand
382,967
def mangle(self, name, x): h = abs(hash(name)) return % (h, x)
Mangle the name by hashing the I{name} and appending I{x}. @return: the mangled name.
382,968
def Pipe(self, *sequence, **kwargs): state = kwargs.pop("refs", {}) return self.Seq(*sequence, **kwargs)(None, **state)
`Pipe` runs any `phi.dsl.Expression`. Its highly inspired by Elixir's [|> (pipe)](https://hexdocs.pm/elixir/Kernel.html#%7C%3E/2) operator. **Arguments** * ***sequence**: any variable amount of expressions. All expressions inside of `sequence` will be composed together using `phi.dsl.Expression.Seq`. * ****kwargs**: `Pipe` forwards all `kwargs` to `phi.builder.Builder.Seq`, visit its documentation for more info. The expression Pipe(*sequence, **kwargs) is equivalent to Seq(*sequence, **kwargs)(None) Normally the first argument or `Pipe` is a value, that is reinterpreted as a `phi.dsl.Expression.Val`, therfore, the input `None` is discarded. **Examples** from phi import P def add1(x): return x + 1 def mul3(x): return x * 3 x = P.Pipe( 1, #input add1, #1 + 1 == 2 mul3 #2 * 3 == 6 ) assert x == 6 The previous using [lambdas](https://cgarciae.github.io/phi/lambdas.m.html) to create the functions from phi import P x = P.Pipe( 1, #input P + 1, #1 + 1 == 2 P * 3 #2 * 3 == 6 ) assert x == 6 **Also see** * `phi.builder.Builder.Seq` * [dsl](https://cgarciae.github.io/phi/dsl.m.html) * [Compile](https://cgarciae.github.io/phi/dsl.m.html#phi.dsl.Compile) * [lambdas](https://cgarciae.github.io/phi/lambdas.m.html)
382,969
def drawQuad(page, quad, color=None, fill=None, dashes=None, width=1, roundCap=False, morph=None, overlay=True): img = page.newShape() Q = img.drawQuad(Quad(quad)) img.finish(color=color, fill=fill, dashes=dashes, width=width, roundCap=roundCap, morph=morph) img.commit(overlay) return Q
Draw a quadrilateral.
382,970
def login_oauth2_user(valid, oauth): if valid: oauth.user.login_via_oauth2 = True _request_ctx_stack.top.user = oauth.user identity_changed.send(current_app._get_current_object(), identity=Identity(oauth.user.id)) return valid, oauth
Log in a user after having been verified.
382,971
def fragments_fromstring(html, no_leading_text=False, base_url=None, parser=None, **kw): if parser is None: parser = html_parser if isinstance(html, bytes): if not _looks_like_full_html_bytes(html): assert len(bodies) == 1, ("too many bodies: %r in %r" % (bodies, html)) body = bodies[0] elements = [] if no_leading_text and body.text and body.text.strip(): raise etree.ParserError( "There is leading text: %r" % body.text) if body.text and body.text.strip(): elements.append(body.text) elements.extend(body) return elements
Parses several HTML elements, returning a list of elements. The first item in the list may be a string (though leading whitespace is removed). If no_leading_text is true, then it will be an error if there is leading text, and it will always be a list of only elements. base_url will set the document's base_url attribute (and the tree's docinfo.URL)
382,972
def save(markov, fname, args): if isinstance(markov.storage, JsonStorage): if fname is None: markov.save(sys.stdout) else: if fname.endswith(): open_ = bz2.open else: open_ = open if args.progress: print() with open_(fname, ) as fp: markov.save(fp) else: markov.save()
Save a generator. Parameters ---------- markov : `markovchain.Markov` Generator to save. fname : `str` Output file path. args : `argparse.Namespace` Command arguments.
382,973
def make_non_negative_axis(axis, rank): axis = tf.convert_to_tensor(value=axis, name="axis") rank = tf.convert_to_tensor(value=rank, name="rank") axis_ = tf.get_static_value(axis) rank_ = tf.get_static_value(rank) if axis_ is not None and rank_ is not None: is_scalar = axis_.ndim == 0 if is_scalar: axis_ = [axis_] positive_axis = [] for a_ in axis_: if a_ < 0: positive_axis.append(rank_ + a_) else: positive_axis.append(a_) if is_scalar: positive_axis = positive_axis[0] return tf.convert_to_tensor(value=positive_axis, dtype=axis.dtype) return tf.where(axis < 0, rank + axis, axis)
Make (possibly negatively indexed) `axis` argument non-negative.
382,974
def random_string(**kwargs): n = kwargs.get(, 10) pool = kwargs.get() or string.digits + string.ascii_lowercase return .join(random.SystemRandom().choice(pool) for _ in range(n))
By default generates a random string of 10 chars composed of digits and ascii lowercase letters. String length and pool can be override by using kwargs. Pool must be a list of strings
382,975
def golfclap(rest): "Clap for something" clapv = random.choice(phrases.clapvl) adv = random.choice(phrases.advl) adj = random.choice(phrases.adjl) if rest: clapee = rest.strip() karma.Karma.store.change(clapee, 1) return "/me claps %s for %s, %s %s." % (clapv, rest, adv, adj) return "/me claps %s, %s %s." % (clapv, adv, adj)
Clap for something
382,976
def currentRegion(self): pos = QtGui.QCursor.pos() pos = self.mapFromGlobal(pos) for region in self.regions(): if region.testHovered(pos): return region return None
Returns the current region based on the current cursor position. :return <XDropZoneWidget>
382,977
def busco_plot (self, lin): data = {} for s_name in self.busco_data: if self.busco_data[s_name].get() == lin: data[s_name] = self.busco_data[s_name] plot_keys = [,,,] plot_cols = [, , , ] keys = OrderedDict() for k, col in zip(plot_keys, plot_cols): keys[k] = {: self.busco_keys[k], : col} config = { : .format(re.sub(, , str(lin))), : if lin is None else .format(lin), : , : } return bargraph.plot(data, keys, config)
Make the HighCharts HTML for the BUSCO plot for a particular lineage
382,978
def get_qemu_version(qemu_path): if sys.platform.startswith("win"): version_file = os.path.join(os.path.dirname(qemu_path), "version.txt") if os.path.isfile(version_file): try: with open(version_file, "rb") as file: version = file.read().decode("utf-8").strip() match = re.search("[0-9\.]+", version) if match: return version except (UnicodeDecodeError, OSError) as e: log.warn("could not read {}: {}".format(version_file, e)) return "" else: try: output = yield from subprocess_check_output(qemu_path, "-version") match = re.search("version\s+([0-9a-z\-\.]+)", output) if match: version = match.group(1) return version else: raise QemuError("Could not determine the Qemu version for {}".format(qemu_path)) except subprocess.SubprocessError as e: raise QemuError("Error while looking for the Qemu version: {}".format(e))
Gets the Qemu version. :param qemu_path: path to Qemu executable.
382,979
def parse(self): r = super(GameSummRep, self).parse() try: self.parse_scoring_summary() return r and False except: return False
Fully parses game summary report. :returns: boolean success indicator :rtype: bool
382,980
def save(filename=ConfigPath): default_values = defaults() parser = configparser.RawConfigParser() parser.optionxform = str try: save_types = basestring, int, float, tuple, list, dict, type(None) for k, v in sorted(globals().items()): if not isinstance(v, save_types) or k.startswith("_") \ or default_values.get(k, parser) == v: continue try: parser.set("DEFAULT", k, json.dumps(v)) except Exception: pass if parser.defaults(): with open(filename, "wb") as f: f.write(" datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))) parser.write(f) else: try: os.unlink(filename) except Exception: pass except Exception: logging.warn("Error writing config to %s.", filename, exc_info=True)
Saves this module's changed attributes to INI configuration.
382,981
def __complete_imports_and_aliases( self, prefix: str, name_in_module: Optional[str] = None ) -> Iterable[str]: imports = self.imports aliases = lmap.map( { alias: imports.entry(import_name) for alias, import_name in self.import_aliases } ) candidates = filter( Namespace.__completion_matcher(prefix), itertools.chain(aliases, imports) ) if name_in_module is not None: for _, module in candidates: for name in module.__dict__: if name.startswith(name_in_module): yield f"{prefix}/{name}" else: for candidate_name, _ in candidates: yield f"{candidate_name}/"
Return an iterable of possible completions matching the given prefix from the list of imports and aliased imports. If name_in_module is given, further attempt to refine the list to matching names in that namespace.
382,982
def _updater_wrapper(updater): def updater_handle(key, lhs_handle, rhs_handle, _): lhs = _ndarray_cls(NDArrayHandle(lhs_handle)) rhs = _ndarray_cls(NDArrayHandle(rhs_handle)) updater(key, lhs, rhs) return updater_handle
A wrapper for the user-defined handle.
382,983
def _locked_refresh_doc_ids(self): d = {} for s in self._shards: for k in s.doc_index.keys(): if k in d: raise KeyError(.format(i=k)) d[k] = s self._doc2shard_map = d
Assumes that the caller has the _index_lock !
382,984
def display(self, ret, indent, out, rows_key=None, labels_key=None): rows = [] labels = None if isinstance(ret, dict): if not rows_key or (rows_key and rows_key in list(ret.keys())): for key in sorted(ret): if rows_key and key != rows_key: continue val = ret[key] if not rows_key: out.append( self.ustring( indent, self.DARK_GRAY, key, suffix= ) ) out.append( self.ustring( indent, self.DARK_GRAY, ) ) if isinstance(val, (list, tuple)): rows = val if labels_key: labels = ret.get(labels_key) out.extend(self.display_rows(rows, labels, indent)) else: self.display(val, indent + 4, out, rows_key=rows_key, labels_key=labels_key) elif rows_key: for key in sorted(ret): val = ret[key] self.display(val, indent, out, rows_key=rows_key, labels_key=labels_key) elif isinstance(ret, (list, tuple)): if not rows_key: rows = ret out.extend(self.display_rows(rows, labels, indent)) return out
Display table(s).
382,985
def _validate_danglers(self): for query, warning in zip(DANGLER_QUERIES, DANGLER_WARNINGS): dangler_count = self.gtfs.execute_custom_query(query).fetchone()[0] if dangler_count > 0: if self.verbose: print(str(dangler_count) + " " + warning) self.warnings_container.add_warning(warning, self.location, count=dangler_count)
Checks for rows that are not referenced in the the tables that should be linked stops <> stop_times using stop_I stop_times <> trips <> days, using trip_I trips <> routes, using route_I :return:
382,986
def get_network(model=None, std=0.005, disable_reinforce=False, random_glimpse=False): network = NeuralClassifier(input_dim=28 * 28) network.stack_layer(FirstGlimpseLayer(std=std, disable_reinforce=disable_reinforce, random_glimpse=random_glimpse)) if model and os.path.exists(model): network.load_params(model) return network
Get baseline model. Parameters: model - model path Returns: network
382,987
def remove_escapes(self): chars = [] i = 0 while i < len(self.string): char = self.string[i] if char == "\\": i += 1 else: chars.append(char) i += 1 return "".join(chars)
Removes everything except number and letters from string :return: All numbers and letters in string
382,988
def insert(self, context): self.write([ "box", "add", "--name", context.resolve(self.__name), self.__path(context) ])
Add Vagrant box to the calling user. :param resort.engine.execution.Context context: Current execution context.
382,989
def tokenize(text, custom_dict=None): global TOKENIZER if not TOKENIZER: TOKENIZER = DeepcutTokenizer() return TOKENIZER.tokenize(text, custom_dict=custom_dict)
Tokenize given Thai text string Input ===== text: str, Thai text string custom_dict: str (or list), path to customized dictionary file It allows the function not to tokenize given dictionary wrongly. The file should contain custom words separated by line. Alternatively, you can provide list of custom words too. Output ====== tokens: list, list of tokenized words Example ======= >> deepcut.tokenize('ตัดคำได้ดีมาก') >> ['ตัดคำ','ได้','ดี','มาก']
382,990
def _divide(divisor, remainder, quotient, remainders, base, precision=None): indices = itertools.count() if precision is None else range(precision) for _ in indices: if remainder == 0 or remainder in remainders: break remainders.append(remainder) (quot, rem) = divmod(remainder, divisor) quotient.append(quot) if quot > 0: remainder = rem * base else: remainder = remainder * base return remainder
Given a divisor and dividend, continue until precision in is reached. :param int divisor: the divisor :param int remainder: the remainder :param int base: the base :param precision: maximum number of fractional digits to compute :type precision: int or NoneType :returns: the remainder :rtype: int ``quotient`` and ``remainders`` are set by side effects Complexity: O(precision) if precision is not None else O(divisor)
382,991
def request_access_token(self, code, redirect_uri=None): redirect = redirect_uri or self._redirect_uri resp_text = _http(, , client_id=self._client_id, client_secret=self._client_secret, redirect_uri=redirect, code=code, grant_type=) return self._parse_access_token(resp_text)
Return access token as a JsonDict: {"access_token":"your-access-token","expires":12345678,"uid":1234}, expires is represented using standard unix-epoch-time
382,992
def synphot(self, wlen, flam): from scipy.interpolate import interp1d from scipy.integrate import romberg d = self._ensure_data() mflam = interp1d(wlen, flam, kind=, bounds_error=False, fill_value=0) mresp = interp1d(d.wlen, d.resp, kind=, bounds_error=False, fill_value=0) bmin = d.wlen.min() bmax = d.wlen.max() numer = romberg(lambda x: mresp(x) * mflam(x), bmin, bmax, divmax=20) denom = romberg(lambda x: mresp(x), bmin, bmax, divmax=20) return numer / denom
`wlen` and `flam` give a tabulated model spectrum in wavelength and f_λ units. We interpolate linearly over both the model and the bandpass since they're both discretely sampled. Note that quadratic interpolation is both much slower and can blow up fatally in some cases. The latter issue might have to do with really large X values that aren't zero-centered, maybe? I used to use the quadrature integrator, but Romberg doesn't issue complaints the way quadrature did. I should probably acquire some idea about what's going on under the hood.
382,993
def pyx2obj(pyxpath, objpath=None, interm_c_dir=None, cwd=None, logger=None, full_module_name=None, only_update=False, metadir=None, include_numpy=False, include_dirs=None, cy_kwargs=None, gdb=False, cplus=None, **kwargs): assert pyxpath.endswith() cwd = cwd or objpath = objpath or interm_c_dir = interm_c_dir or os.path.dirname(objpath) abs_objpath = get_abspath(objpath, cwd=cwd) if os.path.isdir(abs_objpath): pyx_fname = os.path.basename(pyxpath) name, ext = os.path.splitext(pyx_fname) objpath = os.path.join(objpath, name+objext) cy_kwargs = cy_kwargs or {} cy_kwargs[] = cwd if cplus is None: cplus = pyx_is_cplus(pyxpath) cy_kwargs[] = cplus if gdb: cy_kwargs[] = True if include_dirs: cy_kwargs[] = include_dirs interm_c_file = simple_cythonize( pyxpath, destdir=interm_c_dir, cwd=cwd, logger=logger, full_module_name=full_module_name, only_update=only_update, **cy_kwargs) include_dirs = include_dirs or [] if include_numpy: import numpy numpy_inc_dir = numpy.get_include() if numpy_inc_dir not in include_dirs: include_dirs.append(numpy_inc_dir) flags = kwargs.pop(, []) needed_flags = (, ) if not cplus: needed_flags += (,) for flag in needed_flags: if flag not in flags: flags.append(flag) options = kwargs.pop(, []) if kwargs.pop(, False): raise CompilationError("Cython req. strict aliasing to be disabled.") if not in options: options.append() if not in options: options.append() return src2obj( interm_c_file, objpath=objpath, cwd=cwd, only_update=only_update, metadir=metadir, include_dirs=include_dirs, flags=flags, std=std, options=options, logger=logger, inc_py=True, strict_aliasing=False, **kwargs)
Convenience function If cwd is specified, pyxpath and dst are taken to be relative If only_update is set to `True` the modification time is checked and compilation is only run if the source is newer than the destination Parameters ---------- pyxpath: path string path to Cython source file objpath: path string (optional) path to object file to generate interm_c_dir: path string (optional) directory to put generated C file. cwd: path string (optional) working directory and root of relative paths logger: logging.Logger (optional) passed onto `simple_cythonize` and `src2obj` full_module_name: string (optional) passed onto `simple_cythonize` only_update: bool (optional) passed onto `simple_cythonize` and `src2obj` metadir: path string (optional) passed onto src2obj include_numpy: bool (optional) Add numpy include directory to include_dirs. default: False include_dirs: iterable of path strings (optional) Passed onto src2obj and via cy_kwargs['include_path'] to simple_cythonize. cy_kwargs: dict (optional) keyword arguments passed onto `simple_cythonize` gdb: bool (optional) convenience: cy_kwargs['gdb_debug'] is set True if gdb=True, default: False cplus: bool (optional) Indicate whether C++ is used. default: auto-detect using `pyx_is_cplus` **kwargs: dict keyword arguments passed onto src2obj Returns ------- Absolute path of generated object file.
382,994
def format_all(format_string, env): prepared_env = parse_pattern(format_string, env, lambda x, y: [FormatWrapper(x, z) for z in y]) for field_values in product(*prepared_env.itervalues()): format_env = dict(izip(prepared_env.iterkeys(), field_values)) yield format_string.format(**format_env)
Format the input string using each possible combination of lists in the provided environment. Returns a list of formated strings.
382,995
def get_holiday_label(self, day): day = cleaned_date(day) return {day: label for day, label in self.holidays(day.year) }.get(day)
Return the label of the holiday, if the date is a holiday
382,996
def genrows(cursor: Cursor, arraysize: int = 1000) \ -> Generator[List[Any], None, None]: while True: results = cursor.fetchmany(arraysize) if not results: break for result in results: yield result
Generate all rows from a cursor. Args: cursor: the cursor arraysize: split fetches into chunks of this many records Yields: each row
382,997
def create_dataclass_loader(cls, registry, field_getters): fields = cls.__dataclass_fields__ item_loaders = map(registry, map(attrgetter(), fields.values())) getters = map(field_getters.__getitem__, fields) loaders = list(starmap(compose, zip(item_loaders, getters))) def dloader(obj): return cls(*(g(obj) for g in loaders)) return dloader
create a loader for a dataclass type
382,998
def set_log_format(self, log_type, log_format): ,datefmt= if not (log_type == or log_type == or log_type == ): self.log.debug() else: self.default_formatter = logging.Formatter(log_format) if log_type == : self.error_handler.setFormatter(self.default_formatter) elif log_type == : self.debug_handler.setFormatter(self.default_formatter) elif log_type == : self.stream_handler.setFormatter(self.default_formatter)
Configures log format Arguments: log_type (:obj:`str`): log type (error, debug or stream) log_format (:obj:`str`): log format (ex:"Log: %(message)s | Log level:%(levelname)s | Date:%(asctime)s',datefmt='%m/%d/%Y %I:%M:%S")
382,999
def get_type_len(self): self.get_sql() return self.type, self.len, self.len_decimal
Retrieve the type and length for a data record.