Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
7,300
def run(self, data, rewrap=False, prefetch=0): if rewrap: data = [data] for _filter in self._filters: _filter.feed(data) data = _filter else: iterable = self._prefetch_callable(data, prefetch) if prefetch else data for out_data in iterable: yield out_data
Wires the pipeline and returns a lazy object of the transformed data. :param data: must be an iterable, where a full document must be returned for each loop :param rewrap: (optional) is a bool that indicates the need to rewrap data in cases where iterating over it produces undesired results, for instance ``dict`` instances. :param prefetch: (optional) is an int defining the number of items to be prefetched once the pipeline starts yielding data. The default prefetching mechanism is based on threads, so be careful with CPU-bound processing pipelines.
7,301
def get_edge(self, edge_id): try: edge_object = self.edges[edge_id] except KeyError: raise NonexistentEdgeError(edge_id) return edge_object
Returns the edge object identified by "edge_id".
7,302
def copy(self): vec1 = np.copy(self.scoef1._vec) vec2 = np.copy(self.scoef2._vec) return VectorCoefs(vec1, vec2, self.nmax, self.mmax)
Make a deep copy of this object. Example:: >>> c2 = c.copy()
7,303
def DiamAns(cmd, **fields): upfields, name = getCmdParams(cmd, False, **fields) p = DiamG(**upfields) p.name = name return p
Craft Diameter answer commands
7,304
def downstream(self, step_name): return list(self.steps[dep] for dep in self.dag.downstream(step_name))
Returns the direct dependencies of the given step
7,305
def git_get_title_and_message(begin, end): titles = git_get_log_titles(begin, end) title = "Pull request for " + end if len(titles) == 1: title = titles[0] pr_template = find_pull_request_template() if pr_template: message = get_pr_template_message(pr_template) else: if len(titles) == 1: message = git_get_commit_body(end) else: message = "\n".join(titles) return (len(titles), title, message)
Get title and message summary for patches between 2 commits. :param begin: first commit to look at :param end: last commit to look at :return: number of commits, title, message
7,306
def _intersect(start1, end1, start2, end2): start = max(start1, start2) end = min(end1, end2) if start > end: return None, None return start, end
Returns the intersection of two intervals. Returns (None,None) if the intersection is empty. :param int start1: The start date of the first interval. :param int end1: The end date of the first interval. :param int start2: The start date of the second interval. :param int end2: The end date of the second interval. :rtype: tuple[int|None,int|None]
7,307
def assemble_to_object(self, in_filename, verbose=False): file_base_name = os.path.splitext(os.path.basename(in_filename))[0] out_filename, already_exists = self._get_intermediate_file(file_base_name + , binary=True, fp=False) if already_exists: pass compiler, compiler_args = self._machine.get_compiler() compiler_args.append() cmd = [compiler] + [ in_filename] + \ compiler_args + [, out_filename] if verbose: print(, .join(cmd)) try: subprocess.check_output(cmd) except subprocess.CalledProcessError as e: print("Assembly failed:", e, file=sys.stderr) sys.exit(1) return out_filename
Assemble *in_filename* assembly into *out_filename* object. If *iaca_marked* is set to true, markers are inserted around the block with most packed instructions or (if no packed instr. were found) the largest block and modified file is saved to *in_file*. *asm_block* controls how the to-be-marked block is chosen. "auto" (default) results in the largest block, "manual" results in interactive and a number in the according block. *pointer_increment* is the number of bytes the pointer is incremented after the loop or - 'auto': automatic detection, RuntimeError is raised in case of failure - 'auto_with_manual_fallback': automatic detection, fallback to manual input - 'manual': prompt user Returns two-tuple (filepointer, filename) to temp binary file.
7,308
def deconstruct(self): name, path, args, kwargs = super( HStoreField, self).deconstruct() if self.uniqueness is not None: kwargs[] = self.uniqueness if self.required is not None: kwargs[] = self.required return name, path, args, kwargs
Gets the values to pass to :see:__init__ when re-creating this object.
7,309
def save(self, *args, **kwargs): self.slug = slugify(self.name) self.uid = .format(self.slug) super(ElectionCycle, self).save(*args, **kwargs)
**uid**: :code:`cycle:{year}`
7,310
def _state_delete(self): try: os.remove(self._state_file) except OSError as err: if err.errno not in (errno.EPERM, errno.ENOENT): raise try: os.rmdir(self._state_dir) except OSError as err: if err.errno not in (errno.ENOTEMPTY, errno.ENOENT): raise
Try to delete the state.yml file and the folder .blockade
7,311
def _check_response_status(self, response): if response.status_code != requests.codes.ok: self._logger.error("%s %s", response.status_code, response.text) raise exceptions.RequestNonSuccessException( "Url {0} had status_code {1}".format( response.url, response.status_code))
Checks the speficied HTTP response from the requests package and raises an exception if a non-200 HTTP code was returned by the server.
7,312
def corr_flat_und(a1, a2): n = len(a1) if len(a2) != n: raise BCTParamError("Cannot calculate flattened correlation on " "matrices of different size") triu_ix = np.where(np.triu(np.ones((n, n)), 1)) return np.corrcoef(a1[triu_ix].flat, a2[triu_ix].flat)[0][1]
Returns the correlation coefficient between two flattened adjacency matrices. Only the upper triangular part is used to avoid double counting undirected matrices. Similarity metric for weighted matrices. Parameters ---------- A1 : NxN np.ndarray undirected matrix 1 A2 : NxN np.ndarray undirected matrix 2 Returns ------- r : float Correlation coefficient describing edgewise similarity of a1 and a2
7,313
def time_range(self, start, end): self._set_query(self.time_query, time_start=self._format_time(start), time_end=self._format_time(end)) return self
Add a request for a time range to the query. This modifies the query in-place, but returns `self` so that multiple queries can be chained together on one line. This replaces any existing temporal queries that have been set. Parameters ---------- start : datetime.datetime The start of the requested time range end : datetime.datetime The end of the requested time range Returns ------- self : DataQuery Returns self for chaining calls
7,314
def _construct_derivatives(self, coefs, **kwargs): return [self.basis_functions.derivatives_factory(coef, **kwargs) for coef in coefs]
Return a list of derivatives given a list of coefficients.
7,315
def single_html(epub_file_path, html_out=sys.stdout, mathjax_version=None, numchapters=None, includes=None): epub = cnxepub.EPUB.from_file(epub_file_path) if len(epub) != 1: raise Exception() package = epub[0] binder = cnxepub.adapt_package(package) partcount.update({}.fromkeys(parts, 0)) partcount[] += 1 html = cnxepub.SingleHTMLFormatter(binder, includes=includes) logger.debug(.format(cnxepub.model_to_tree(binder))) if numchapters is not None: apply_numchapters(html.get_node_type, binder, numchapters) logger.debug(.format( cnxepub.model_to_tree(binder))) if mathjax_version: etree.SubElement( html.head, , src=MATHJAX_URL.format(mathjax_version=mathjax_version)) print(str(html), file=html_out) if hasattr(html_out, ): html_out.close()
Generate complete book HTML.
7,316
def writeToCheckpoint(self, checkpointDir): proto = self.getSchema().new_message() self.write(proto) checkpointPath = self._getModelCheckpointFilePath(checkpointDir) if os.path.exists(checkpointDir): if not os.path.isdir(checkpointDir): raise Exception(("Existing filesystem entry <%s> is not a model" " checkpoint -- refusing to delete (not a directory)") \ % checkpointDir) if not os.path.isfile(checkpointPath): raise Exception(("Existing filesystem entry <%s> is not a model" " checkpoint -- refusing to delete"\ " (%s missing or not a file)") % \ (checkpointDir, checkpointPath)) shutil.rmtree(checkpointDir) self.__makeDirectoryFromAbsolutePath(checkpointDir) with open(checkpointPath, ) as f: proto.write(f)
Serializes model using capnproto and writes data to ``checkpointDir``
7,317
def notify(self): if self.__method is not None: self.__method(self.__peer) return True return False
Calls the notification method :return: True if the notification method has been called
7,318
def node_vectors(node_id): exp = Experiment(session) direction = request_parameter(parameter="direction", default="all") failed = request_parameter(parameter="failed", parameter_type="bool", default=False) for x in [direction, failed]: if type(x) == Response: return x node = models.Node.query.get(node_id) if node is None: return error_response(error_type="/node/vectors, node does not exist") try: vectors = node.vectors(direction=direction, failed=failed) exp.vector_get_request(node=node, vectors=vectors) session.commit() except Exception: return error_response( error_type="/node/vectors GET server error", status=403, participant=node.participant, ) return success_response(vectors=[v.__json__() for v in vectors])
Get the vectors of a node. You must specify the node id in the url. You can pass direction (incoming/outgoing/all) and failed (True/False/all).
7,319
def resolve_type(arg): arg_type = type(arg) if arg_type == list: assert isinstance(arg, list) sample = arg[:min(4, len(arg))] tentative_type = TentativeType() for sample_item in sample: tentative_type.add(resolve_type(sample_item)) return ListType(tentative_type) elif arg_type == set: assert isinstance(arg, set) sample = [] iterator = iter(arg) for i in range(0, min(4, len(arg))): sample.append(next(iterator)) tentative_type = TentativeType() for sample_item in sample: tentative_type.add(resolve_type(sample_item)) return SetType(tentative_type) elif arg_type == FakeIterator: assert isinstance(arg, FakeIterator) sample = [] iterator = iter(arg) for i in range(0, min(4, len(arg))): sample.append(next(iterator)) tentative_type = TentativeType() for sample_item in sample: tentative_type.add(resolve_type(sample_item)) return IteratorType(tentative_type) elif arg_type == tuple: assert isinstance(arg, tuple) sample = list(arg[:min(10, len(arg))]) return TupleType([resolve_type(sample_item) for sample_item in sample]) elif arg_type == dict: assert isinstance(arg, dict) key_tt = TentativeType() val_tt = TentativeType() for i, (k, v) in enumerate(iteritems(arg)): if i > 4: break key_tt.add(resolve_type(k)) val_tt.add(resolve_type(v)) return DictType(key_tt, val_tt) else: return type(arg)
Resolve object to one of our internal collection types or generic built-in type. Args: arg: object to resolve
7,320
def put(self, name, value): pm = ndarray_to_mxarray(self._libmx, value) self._libeng.engPutVariable(self._ep, name, pm) self._libmx.mxDestroyArray(pm)
Put a variable to MATLAB workspace.
7,321
def notify(self, resource): observers = self._observeLayer.notify(resource) logger.debug("Notify") for transaction in observers: with transaction: transaction.response = None transaction = self._requestLayer.receive_request(transaction) transaction = self._observeLayer.send_response(transaction) transaction = self._blockLayer.send_response(transaction) transaction = self._messageLayer.send_response(transaction) if transaction.response is not None: if transaction.response.type == defines.Types["CON"]: self._start_retransmission(transaction, transaction.response) self.send_datagram(transaction.response)
Notifies the observers of a certain resource. :param resource: the resource
7,322
def getFailureMessage(failure): str(failure.type) failure.getErrorMessage() if len(failure.frames) == 0: return "failure %(exc)s: %(msg)s" % locals() (func, filename, line, some, other) = failure.frames[-1] filename = scrubFilename(filename) return "failure %(exc)s at %(filename)s:%(line)s: %(func)s(): %(msg)s" \ % locals()
Return a short message based on L{twisted.python.failure.Failure}. Tries to find where the exception was triggered.
7,323
def to_html(data): base_html_template = Template() code = to_json(data, indent=4) if PYGMENTS_INSTALLED: c = Context({ : highlight(code, JSONLexer(), HtmlFormatter()), : HtmlFormatter().get_style_defs() }) html = base_html_template.render(c) else: c = Context({: code}) html = base_html_template.render(c) return html
Serializes a python object as HTML This method uses the to_json method to turn the given data object into formatted JSON that is displayed in an HTML page. If pygments in installed, syntax highlighting will also be applied to the JSON.
7,324
def get_config_value(self, name, defaultValue): self.send_get_config_value(name, defaultValue) return self.recv_get_config_value()
Parameters: - name - defaultValue
7,325
def random(self, cascadeFetch=False): matchedKeys = list(self.getPrimaryKeys()) obj = None while matchedKeys and not obj: key = matchedKeys.pop(random.randint(0, len(matchedKeys)-1)) obj = self.get(key, cascadeFetch=cascadeFetch) return obj
Random - Returns a random record in current filterset. @param cascadeFetch <bool> Default False, If True, all Foreign objects associated with this model will be fetched immediately. If False, foreign objects will be fetched on-access. @return - Instance of Model object, or None if no items math current filters
7,326
def get_mac_addr(mac_addr): mac_addr = bytearray(mac_addr) mac = b.join([( % o).encode() for o in mac_addr]) return mac
converts bytes to mac addr format :mac_addr: ctypes.structure :return: str mac addr in format 11:22:33:aa:bb:cc
7,327
def partof(self, ns1, id1, ns2, id2): rel_fun = lambda node, graph: self.partof_objects(node) return self.directly_or_indirectly_related(ns1, id1, ns2, id2, self.partof_closure, rel_fun)
Return True if one entity is "partof" another. Parameters ---------- ns1 : str Namespace code for an entity. id1 : str URI for an entity. ns2 : str Namespace code for an entity. id2 : str URI for an entity. Returns ------- bool True if t1 has a "partof" relationship with t2, either directly or through a series of intermediates; False otherwise.
7,328
def bck_from_spt (spt): spt = np.asfarray (spt) return np.piecewise (spt, [spt < 30, spt < 19, spt <= 14, spt < 10, (spt < 2) | (spt >= 30)], [lambda s: 3.41 - 0.21 * (s - 20), lambda s: 3.42 - 0.075 * (s - 14), lambda s: 3.42 + 0.075 * (s - 14), lambda s: 2.43 + 0.0895 * s, np.nan])
Calculate a bolometric correction constant for a J band magnitude based on a spectral type, using the fits of Wilking+ (1999AJ....117..469W), Dahn+ (2002AJ....124.1170D), and Nakajima+ (2004ApJ...607..499N). spt - Numerical spectral type. M0=0, M9=9, L0=10, ... Returns: the correction `bck` such that `m_bol = k_abs + bck`, or NaN if `spt` is out of range. Valid values of `spt` are between 2 and 30.
7,329
def liftover_to_genome(pass_pos, gtf): fixed_pos = [] for pos in pass_pos: if pos["chrom"] not in gtf: continue db_pos = gtf[pos["chrom"]][0] mut = _parse_mut(pos["sv"]) print([db_pos, pos]) if db_pos[3] == "+": pos[] = db_pos[1] + pos["pre_pos"] + 1 else: pos[] = db_pos[2] - (pos["pre_pos"] - 1) pos[] = db_pos[0] pos[] = list(mut[0]) fixed_pos.append(pos) _print_header(fixed_pos) for pos in fixed_pos: print_vcf(pos)
Liftover from precursor to genome
7,330
def get_config(self): config = { : _serialize_function(self._make_distribution_fn), : _serialize(self._convert_to_tensor_fn), } base_config = super(DistributionLambda, self).get_config() return dict(list(base_config.items()) + list(config.items()))
Returns the config of this layer. This Layer's `make_distribution_fn` is serialized via a library built on Python pickle. This serialization of Python functions is provided for convenience, but: 1. The use of this format for long-term storage of models is discouraged. In particular, it may not be possible to deserialize in a different version of Python. 2. While serialization is generally supported for lambdas, local functions, and static methods (and closures over these constructs), complex functions may fail to serialize. 3. `Tensor` objects (and functions referencing `Tensor` objects) can only be serialized when the tensor value is statically known. (Such Tensors are serialized as numpy arrays.) Instead of relying on `DistributionLambda.get_config`, consider subclassing `DistributionLambda` and directly implementing Keras serialization via `get_config` / `from_config`. NOTE: At the moment, `DistributionLambda` can only be serialized if the `convert_to_tensor_fn` is a serializable Keras object (i.e., implements `get_config`) or one of the standard values: - `Distribution.sample` (or `"sample"`) - `Distribution.mean` (or `"mean"`) - `Distribution.mode` (or `"mode"`) - `Distribution.stddev` (or `"stddev"`) - `Distribution.variance` (or `"variance"`)
7,331
def maybeparens(lparen, item, rparen): return item | lparen.suppress() + item + rparen.suppress()
Wrap an item in optional parentheses, only applying them if necessary.
7,332
async def _get_security_token(self) -> None: _LOGGER.debug() if self._credentials is None: return async with self._security_token_lock: if self._security_token is None: login_resp = await self._request( , LOGIN_ENDPOINT, json=self._credentials, login_request=True, ) return_code = int(login_resp.get(, 1)) if return_code != 0: if return_code == 203: _LOGGER.debug() self._credentials = None raise MyQError(login_resp[]) self._security_token = login_resp[]
Request a security token.
7,333
def process_node(self, node, parent, end, open_list, open_value=True): ng = self.calc_cost(parent, node) if not node.opened or ng < node.g: node.g = ng node.h = node.h or \ self.apply_heuristic(node, end) * self.weight node.f = node.g + node.h node.parent = parent if not node.opened: heapq.heappush(open_list, node) node.opened = open_value else: open_list.remove(node) heapq.heappush(open_list, node)
we check if the given node is path of the path by calculating its cost and add or remove it from our path :param node: the node we like to test (the neighbor in A* or jump-node in JumpPointSearch) :param parent: the parent node (the current node we like to test) :param end: the end point to calculate the cost of the path :param open_list: the list that keeps track of our current path :param open_value: needed if we like to set the open list to something else than True (used for bi-directional algorithms)
7,334
def update_bounds(self, bounds): self.bounds = np.array(bounds, dtype=) vertices, directions = self._gen_bounds(self.bounds) self._verts_vbo.set_data(vertices) self._directions_vbo.set_data(directions) self.widget.update()
Update the bounds inplace
7,335
def goto_step(self, inst: InstanceNode) -> InstanceNode: return inst.look_up(**self.parse_keys(inst.schema_node))
Return member instance of `inst` addressed by the receiver. Args: inst: Current instance.
7,336
def is_valid(self, qstr=None): if qstr is None: qstr = self.currentText() return is_module_or_package(to_text_string(qstr))
Return True if string is valid
7,337
def compose(request, recipient=None, form_class=ComposeForm, template_name=, success_url=None, recipient_filter=None): if request.method == "POST": sender = request.user form = form_class(request.POST, recipient_filter=recipient_filter) if form.is_valid(): form.save(sender=request.user) messages.info(request, _(u"Message successfully sent.")) if success_url is None: success_url = reverse() if in request.GET: success_url = request.GET[] return HttpResponseRedirect(success_url) else: form = form_class() if recipient is not None: recipients = [u for u in User.objects.filter(**{ % get_username_field(): [r.strip() for r in recipient.split()]})] form.fields[].initial = recipients return render(request, template_name, { : form, })
Displays and handles the ``form_class`` form to compose new messages. Required Arguments: None Optional Arguments: ``recipient``: username of a `django.contrib.auth` User, who should receive the message, optionally multiple usernames could be separated by a '+' ``form_class``: the form-class to use ``template_name``: the template to use ``success_url``: where to redirect after successfull submission
7,338
def inference(self, kern_r, kern_c, Xr, Xc, Zr, Zc, likelihood, Y, qU_mean ,qU_var_r, qU_var_c, indexD, output_dim): N, D, Mr, Mc, Qr, Qc = Y.shape[0], output_dim,Zr.shape[0], Zc.shape[0], Zr.shape[1], Zc.shape[1] uncertain_inputs_r = isinstance(Xr, VariationalPosterior) uncertain_inputs_c = isinstance(Xc, VariationalPosterior) uncertain_outputs = isinstance(Y, VariationalPosterior) grad_dict = self._init_grad_dict(N,D,Mr,Mc) beta = 1./likelihood.variance if len(beta)==1: beta = np.zeros(D)+beta psi0_r, psi1_r, psi2_r = self.gatherPsiStat(kern_r, Xr, Zr, uncertain_inputs_r) psi0_c, psi1_c, psi2_c = self.gatherPsiStat(kern_c, Xc, Zc, uncertain_inputs_c) Kuu_r = kern_r.K(Zr).copy() diag.add(Kuu_r, self.const_jitter) Lr = jitchol(Kuu_r) Kuu_c = kern_c.K(Zc).copy() diag.add(Kuu_c, self.const_jitter) Lc = jitchol(Kuu_c) mu, Sr, Sc = qU_mean, qU_var_r, qU_var_c LSr = jitchol(Sr) LSc = jitchol(Sc) LcInvMLrInvT = dtrtrs(Lc,dtrtrs(Lr,mu.T)[0].T)[0] LcInvLSc = dtrtrs(Lc, LSc)[0] LrInvLSr = dtrtrs(Lr, LSr)[0] LcInvScLcInvT = tdot(LcInvLSc) LrInvSrLrInvT = tdot(LrInvLSr) tr_LrInvSrLrInvT = np.square(LrInvLSr).sum() tr_LcInvScLcInvT = np.square(LcInvLSc).sum() mid_res = { : psi0_r, : psi1_r, : psi2_r, : psi0_c, : psi1_c, : psi2_c, :Lr, :Lc, : LcInvMLrInvT, : LcInvScLcInvT, : LrInvSrLrInvT, } logL = 0. for d in range(D): logL += self.inference_d(d, beta, Y, indexD, grad_dict, mid_res, uncertain_inputs_r, uncertain_inputs_c, Mr, Mc) logL += -Mc * (np.log(np.diag(Lr)).sum()-np.log(np.diag(LSr)).sum()) -Mr * (np.log(np.diag(Lc)).sum()-np.log(np.diag(LSc)).sum()) \ - np.square(LcInvMLrInvT).sum()/2. - tr_LrInvSrLrInvT * tr_LcInvScLcInvT/2. + Mr*Mc/2. tmp = tdot(LcInvMLrInvT)/2. + tr_LrInvSrLrInvT/2. * LcInvScLcInvT - Mr/2.*np.eye(Mc) dL_dKuu_c = backsub_both_sides(Lc, tmp, ) dL_dKuu_c += dL_dKuu_c.T dL_dKuu_c *= 0.5 tmp = tdot(LcInvMLrInvT.T)/2. + tr_LcInvScLcInvT/2. * LrInvSrLrInvT - Mc/2.*np.eye(Mr) dL_dKuu_r = backsub_both_sides(Lr, tmp, ) dL_dKuu_r += dL_dKuu_r.T dL_dKuu_r *= 0.5 tmp = - LcInvMLrInvT dL_dqU_mean = dtrtrs(Lc, dtrtrs(Lr, tmp.T, trans=1)[0].T, trans=1)[0] LScInv = dtrtri(LSc) tmp = -tr_LrInvSrLrInvT/2.*np.eye(Mc) dL_dqU_var_c = backsub_both_sides(Lc, tmp, ) + tdot(LScInv.T) * Mr/2. LSrInv = dtrtri(LSr) tmp = -tr_LcInvScLcInvT/2.*np.eye(Mr) dL_dqU_var_r = backsub_both_sides(Lr, tmp, ) + tdot(LSrInv.T) * Mc/2. post = PosteriorMultioutput(LcInvMLrInvT=LcInvMLrInvT, LcInvScLcInvT=LcInvScLcInvT, LrInvSrLrInvT=LrInvSrLrInvT, Lr=Lr, Lc=Lc, kern_r=kern_r, Xr=Xr, Zr=Zr) grad_dict[] += dL_dqU_mean grad_dict[] += dL_dqU_var_c grad_dict[] += dL_dqU_var_r grad_dict[] += dL_dKuu_c grad_dict[] += dL_dKuu_r if not uncertain_inputs_c: grad_dict[] = grad_dict[] grad_dict[] = grad_dict[] if not uncertain_inputs_r: grad_dict[] = grad_dict[] grad_dict[] = grad_dict[] return post, logL, grad_dict
The SVI-VarDTC inference
7,339
def json_template(data, template_name, template_context): html = render_to_string(template_name, template_context) data = data or {} data[] = html return HttpResponse(json_encode(data), content_type=)
Old style, use JSONTemplateResponse instead of this.
7,340
def _blast(bvname2vals, name_map): if len(name_map) == 0: return dict() return fn.merge(*(dict(zip(names, bvname2vals[bvname])) for bvname, names in name_map))
Helper function to expand (blast) str -> int map into str -> bool map. This is used to send word level inputs to aiger.
7,341
def search_accounts(self, **kwargs): response = self.__requester.request( , , _kwargs=combine_kwargs(**kwargs) ) return response.json()
Return a list of up to 5 matching account domains. Partial matches on name and domain are supported. :calls: `GET /api/v1/accounts/search \ <https://canvas.instructure.com/doc/api/account_domain_lookups.html#method.account_domain_lookups.search>`_ :rtype: dict
7,342
def from_definition(self, table: Table, version: int): self.table(table) self.add_columns(*table.columns.get_with_version(version)) return self
Add all columns from the table added in the specified version
7,343
def _estimate_gas(self, initializer: bytes, salt_nonce: int, payment_token: str, payment_receiver: str) -> int: gas: int = self.proxy_factory_contract.functions.createProxyWithNonce(self.master_copy_address, initializer, salt_nonce).estimateGas() gas += 55000 return gas
Gas estimation done using web3 and calling the node Payment cannot be estimated, as no ether is in the address. So we add some gas later. :param initializer: Data initializer to send to GnosisSafe setup method :param salt_nonce: Nonce that will be used to generate the salt to calculate the address of the new proxy contract. :return: Total gas estimation
7,344
def items_iter(self, limit): itemsitems pages = (page.get() for page in self._pages()) items = itertools.chain.from_iterable( (p[self.ITEM_KEY] for p in pages) ) if limit is not None: items = itertools.islice(items, limit) return items
Get an iterator of the 'items' in each page. Instead of a feature collection from each page, the iterator yields the features. :param int limit: The number of 'items' to limit to. :return: iter of items in page
7,345
def preprocess_data(self, div=1, downsample=0, sum_norm=None, include_genes=None, exclude_genes=None, include_cells=None, exclude_cells=None, norm=, min_expression=1, thresh=0.01, filter_genes=True): try: D= self.adata_raw.X self.adata = self.adata_raw.copy() except AttributeError: print() cell_names = np.array(list(self.adata_raw.obs_names)) idx_cells = np.arange(D.shape[0]) if(include_cells is not None): include_cells = np.array(list(include_cells)) idx2 = np.where(np.in1d(cell_names, include_cells))[0] idx_cells = np.array(list(set(idx2) & set(idx_cells))) if(exclude_cells is not None): exclude_cells = np.array(list(exclude_cells)) idx4 = np.where(np.in1d(cell_names, exclude_cells, invert=True))[0] idx_cells = np.array(list(set(idx4) & set(idx_cells))) if downsample > 0: numcells = int(D.shape[0] / downsample) rand_ind = np.random.choice(np.arange(D.shape[0]), size=numcells, replace=False) idx_cells = np.array(list(set(rand_ind) & set(idx_cells))) else: numcells = D.shape[0] mask_cells = np.zeros(D.shape[0], dtype=) mask_cells[idx_cells] = True self.adata = self.adata_raw[mask_cells,:].copy() D = self.adata.X if isinstance(D,np.ndarray): D=sp.csr_matrix(D,dtype=) else: D=D.astype() D.sort_indices() if(D.getformat() == ): D=D.tocsr(); if (sum_norm == and norm != ): s = D.sum(1).A.flatten() sum_norm = np.median(s) D = D.multiply(1 / s[:,None] * sum_norm).tocsr() elif (sum_norm == and norm != ): s = D.sum(0).A.flatten() sum_norm = np.median(s) s[s==0]=1 D = D.multiply(1 / s[None,:] * sum_norm).tocsr() elif sum_norm is not None and norm != : D = D.multiply(1 / D.sum(1).A.flatten()[:, None] * sum_norm).tocsr() self.adata.X = D if norm is None: D.data[:] = (D.data / div) elif(norm.lower() == ): D.data[:] = np.log2(D.data / div + 1) elif(norm.lower() == ): D.data[:] = np.sqrt(D.data/div) + np.sqrt(D.data/div+1) elif norm.lower() == : ni = D.sum(1).A.flatten() pj = (D.sum(0) / D.sum()).A.flatten() col = D.indices row=[] for i in range(D.shape[0]): row.append(i*np.ones(D.indptr[i+1]-D.indptr[i])) row = np.concatenate(row).astype() mu = sp.coo_matrix((ni[row]*pj[col], (row,col))).tocsr() mu2 = mu.copy() mu2.data[:]=mu2.data**2 mu2 = mu2.multiply(1/ni[:,None]) mu.data[:] = (D.data - mu.data) / np.sqrt(mu.data - mu2.data) self.adata.X = mu if sum_norm is None: sum_norm = np.median(ni) D = D.multiply(1 / ni[:,None] * sum_norm).tocsr() D.data[:] = np.log2(D.data / div + 1) else: D.data[:] = (D.data / div) idx = np.where(D.data <= min_expression)[0] D.data[idx] = 0 gene_names = np.array(list(self.adata.var_names)) idx_genes = np.arange(D.shape[1]) if(include_genes is not None): include_genes = np.array(list(include_genes)) idx = np.where(np.in1d(gene_names, include_genes))[0] idx_genes = np.array(list(set(idx) & set(idx_genes))) if(exclude_genes is not None): exclude_genes = np.array(list(exclude_genes)) idx3 = np.where(np.in1d(gene_names, exclude_genes, invert=True))[0] idx_genes = np.array(list(set(idx3) & set(idx_genes))) if(filter_genes): a, ct = np.unique(D.indices, return_counts=True) c = np.zeros(D.shape[1]) c[a] = ct keep = np.where(np.logical_and(c / D.shape[0] > thresh, c / D.shape[0] <= 1 - thresh))[0] idx_genes = np.array(list(set(keep) & set(idx_genes))) mask_genes = np.zeros(D.shape[1], dtype=) mask_genes[idx_genes] = True self.adata.X = self.adata.X.multiply(mask_genes[None, :]).tocsr() self.adata.X.eliminate_zeros() self.adata.var[]=mask_genes if norm == : self.adata.layers[] = D.multiply(mask_genes[None, :]).tocsr() self.adata.layers[].eliminate_zeros() else: self.adata.layers[] = self.adata.X
Log-normalizes and filters the expression data. Parameters ---------- div : float, optional, default 1 The factor by which the gene expression will be divided prior to log normalization. downsample : float, optional, default 0 The factor by which to randomly downsample the data. If 0, the data will not be downsampled. sum_norm : str or float, optional, default None If a float, the total number of transcripts in each cell will be normalized to this value prior to normalization and filtering. Otherwise, nothing happens. If 'cell_median', each cell is normalized to have the median total read count per cell. If 'gene_median', each gene is normalized to have the median total read count per gene. norm : str, optional, default 'log' If 'log', log-normalizes the expression data. If 'ftt', applies the Freeman-Tukey variance-stabilization transformation. If 'multinomial', applies the Pearson-residual transformation (this is experimental and should only be used for raw, un-normalized UMI datasets). If None, the data is not normalized. include_genes : array-like of string, optional, default None A vector of gene names or indices that specifies the genes to keep. All other genes will be filtered out. Gene names are case- sensitive. exclude_genes : array-like of string, optional, default None A vector of gene names or indices that specifies the genes to exclude. These genes will be filtered out. Gene names are case- sensitive. include_cells : array-like of string, optional, default None A vector of cell names that specifies the cells to keep. All other cells will be filtered out. Cell names are case-sensitive. exclude_cells : array-like of string, optional, default None A vector of cell names that specifies the cells to exclude. Thses cells will be filtered out. Cell names are case-sensitive. min_expression : float, optional, default 1 The threshold above which a gene is considered expressed. Gene expression values less than 'min_expression' are set to zero. thresh : float, optional, default 0.2 Keep genes expressed in greater than 'thresh'*100 % of cells and less than (1-'thresh')*100 % of cells, where a gene is considered expressed if its expression value exceeds 'min_expression'. filter_genes : bool, optional, default True Setting this to False turns off filtering operations aside from removing genes with zero expression across all cells. Genes passed in exclude_genes or not passed in include_genes will still be filtered.
7,346
def imresize(img, size, interpolate="bilinear", channel_first=False): img = _imresize_before(img, size, channel_first, interpolate, list(interpolations_map.keys())) expand_flag = False if len(img.shape) == 3 and img.shape[-1] == 1: img = img.reshape(img.shape[0], img.shape[1]) expand_flag = True resample = interpolations_map[interpolate] if img.dtype == np.uint8: resized = pil_resize_from_ndarray(img, size, resample) else: dtype = img.dtype img_float32 = np.asarray(img, np.float32) if len(img.shape) == 3: resized = np.stack([pil_resize_from_ndarray(img_float32[..., i], size, resample) for i in range(img.shape[-1])], axis=2) else: resized = pil_resize_from_ndarray(img_float32, size, resample) resized = np.asarray(resized, dtype) if expand_flag: resized = resized[..., np.newaxis] return _imresize_after(resized, channel_first)
Resize image by pil module. Args: img (numpy.ndarray): Image array to save. Image shape is considered as (height, width, channel) for RGB or (height, width) for gray-scale by default. size (tupple of int): (width, height). channel_first (bool): This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width). Default value isyou can get the array whose shape is False, which means the img shape is (height, width, channels) interpolate (str): must be one of ["nearest", "box", "bilinear", "hamming", "bicubic", "lanczos"] Returns: numpy.ndarray whose shape is ('size'[1], 'size'[0], channel) or (size[1], size[0])
7,347
def build_tf_to_pytorch_map(model, config): tf_to_pt_map = {} if hasattr(model, ): tf_to_pt_map.update({ "transformer/adaptive_softmax/cutoff_0/cluster_W": model.crit.cluster_weight, "transformer/adaptive_softmax/cutoff_0/cluster_b": model.crit.cluster_bias}) for i, (out_l, proj_l, tie_proj) in enumerate(zip( model.crit.out_layers, model.crit.out_projs, config.tie_projs)): layer_str = "transformer/adaptive_softmax/cutoff_%d/" % i if config.tie_weight: tf_to_pt_map.update({ layer_str + : out_l.bias}) else: raise NotImplementedError return tf_to_pt_map
A map of modules from TF to PyTorch. This time I use a map to keep the PyTorch model as identical to the original PyTorch model as possible.
7,348
def createphysicalnetwork(type, create_processor = partial(default_processor, excluding=(, )), reorder_dict = default_iterate_dict): def walker(walk, write, timestamp, parameters_dict): for key, parameters in reorder_dict(parameters_dict): try: value = walk(key) except KeyError: pass else: id_ = parameters[] new_network = create_new(PhysicalNetwork, value, id_) new_network.type = type create_processor(new_network, walk, write, parameters=parameters) write(key, new_network) new_networkmap = PhysicalNetworkMap.create_instance(id_) new_networkmap.network = new_network.create_weakreference() write(new_networkmap.getkey(), new_networkmap) try: physet = walk(PhysicalNetworkSet.default_key()) except KeyError: pass else: physet.set.dataset().add(new_network.create_weakreference()) write(physet.getkey(), physet) return walker
:param type: physical network type :param create_processor: create_processor(physicalnetwork, walk, write, \*, parameters)
7,349
def energy_density(self, strain, convert_GPa_to_eV=True): e_density = np.sum(self.calculate_stress(strain)*strain) / self.order if convert_GPa_to_eV: e_density *= self.GPa_to_eV_A3 return e_density
Calculates the elastic energy density due to a strain
7,350
def _folder_item_instrument(self, analysis_brain, item): item[] = if not analysis_brain.getInstrumentEntryOfResults: item[] = _() item[][] = \ .format(t(_())) return is_editable = self.is_analysis_edition_allowed(analysis_brain) self.show_methodinstr_columns = True instrument = self.get_instrument(analysis_brain) if is_editable: voc = self.get_instruments_vocabulary(analysis_brain) if voc: item[] = instrument.UID() if instrument else item[][] = voc item[].append() return if instrument: instrument_title = instrument and instrument.Title() or instrument_link = get_link(instrument.absolute_url(), instrument_title) item[] = instrument_title item[][] = instrument_link return
Fills the analysis' instrument to the item passed in. :param analysis_brain: Brain that represents an analysis :param item: analysis' dictionary counterpart that represents a row
7,351
def formfield(self, form_class=None, choices_form_class=None, **kwargs): defaults = { : not self.blank, : capfirst(self.verbose_name), : self.help_text, } if self.has_default(): if callable(self.default): defaults[] = self.default defaults[] = True else: defaults[] = self.get_default() include_blank = (self.blank or not (self.has_default() or in kwargs)) choices = [BLANK_CHOICE_DASH, ] if include_blank else [] choices.extend([ ( x.name, getattr(x, , x.name) or x.name, getattr(x, , None) or None ) for x in self.choices_class.constants() ]) defaults[] = choices defaults[] = self.to_python if self.null: defaults[] = None del kwargs[k] defaults.update(kwargs) form_class = choices_form_class or ChoicesFormField return form_class(**defaults)
Returns a django.forms.Field instance for this database Field.
7,352
def extendedboldqc(auth, label, scan_ids=None, project=None, aid=None): .........AB1234C if not aid: aid = accession(auth, label, project) path = params = { : , : .join(extendedboldqc.columns.keys()) } if project: params[] = project params[] = aid _,result = _get(auth, path, , autobox=True, params=params) for result in result[][]: if scan_ids == None or result[] in scan_ids: data = dict() for k,v in iter(extendedboldqc.columns.items()): data[v] = result[k] yield data
Get ExtendedBOLDQC data as a sequence of dictionaries. Example: >>> import yaxil >>> import json >>> auth = yaxil.XnatAuth(url='...', username='...', password='...') >>> for eqc in yaxil.extendedboldqc2(auth, 'AB1234C') ... print(json.dumps(eqc, indent=2)) :param auth: XNAT authentication object :type auth: :mod:`yaxil.XnatAuth` :param label: XNAT MR Session label :type label: str :param scan_ids: Scan numbers to return :type scan_ids: list :param project: XNAT MR Session project :type project: str :param aid: XNAT Accession ID :type aid: str :returns: Generator of scan data dictionaries :rtype: :mod:`dict`
7,353
def autosave_all(self): for index in range(self.stack.get_stack_count()): self.autosave(index)
Autosave all opened files.
7,354
def comment_form(context, object): user = context.get("user") form_class = context.get("form", CommentForm) form = form_class(obj=object, user=user) return form
Usage: {% comment_form obj as comment_form %} Will read the `user` var out of the contex to know if the form should be form an auth'd user or not.
7,355
def render_image(**kwargs): html = url = kwargs.get(, None) if url: html = alt_text = kwargs.get(, None) if alt_text: html += .format(alt_text) title = kwargs.get(, None) if title: html += .format(title) html += .format(url) return html
Unstrict template block for rendering an image: <img alt="{alt_text}" title="{title}" src="{url}">
7,356
def path_order (x, y): if x == y: return 0 xg = get_grist (x) yg = get_grist (y) if yg and not xg: return -1 elif xg and not yg: return 1 else: if not xg: x = feature.expand_subfeatures([x]) y = feature.expand_subfeatures([y]) if x < y: return -1 elif x > y: return 1 else: return 0
Helper for as_path, below. Orders properties with the implicit ones first, and within the two sections in alphabetical order of feature name.
7,357
def _make_cloud_datastore_context(app_id, external_app_ids=()): from . import model if not datastore_pbs._CLOUD_DATASTORE_ENABLED: raise datastore_errors.BadArgumentError( datastore_pbs.MISSING_CLOUD_DATASTORE_MESSAGE) import googledatastore try: from google.appengine.datastore import cloud_datastore_v1_remote_stub except ImportError: from google3.apphosting.datastore import cloud_datastore_v1_remote_stub current_app_id = os.environ.get(, None) if current_app_id and current_app_id != app_id: raise ValueError( % (app_id, current_app_id)) os.environ[] = app_id id_resolver = datastore_pbs.IdResolver((app_id,) + tuple(external_app_ids)) project_id = id_resolver.resolve_project_id(app_id) endpoint = googledatastore.helper.get_project_endpoint_from_env(project_id) datastore = googledatastore.Datastore( project_endpoint=endpoint, credentials=googledatastore.helper.get_credentials_from_env()) conn = model.make_connection(_api_version=datastore_rpc._CLOUD_DATASTORE_V1, _id_resolver=id_resolver) try: stub = cloud_datastore_v1_remote_stub.CloudDatastoreV1RemoteStub(datastore) apiproxy_stub_map.apiproxy.RegisterStub(datastore_rpc._CLOUD_DATASTORE_V1, stub) except: pass try: apiproxy_stub_map.apiproxy.RegisterStub(, _ThrowingStub()) except: pass try: apiproxy_stub_map.apiproxy.RegisterStub(, _ThrowingStub()) except: pass return make_context(conn=conn)
Creates a new context to connect to a remote Cloud Datastore instance. This should only be used outside of Google App Engine. Args: app_id: The application id to connect to. This differs from the project id as it may have an additional prefix, e.g. "s~" or "e~". external_app_ids: A list of apps that may be referenced by data in your application. For example, if you are connected to s~my-app and store keys for s~my-other-app, you should include s~my-other-app in the external_apps list. Returns: An ndb.Context that can connect to a Remote Cloud Datastore. You can use this context by passing it to ndb.set_context.
7,358
def cyber_observable_check(original_function): def new_function(*args, **kwargs): if not has_cyber_observable_data(args[0]): return func = original_function(*args, **kwargs) if isinstance(func, Iterable): for x in original_function(*args, **kwargs): yield x new_function.__name__ = original_function.__name__ return new_function
Decorator for functions that require cyber observable data.
7,359
def get_warmer(self, doc_types=None, indices=None, name=None, querystring_args=None): name = name or if not querystring_args: querystring_args = {} doc_types_str = if doc_types: doc_types_str = + .join(doc_types) path = .format(.join(indices), doc_types_str, name) return self._send_request(method=, path=path, params=querystring_args)
Retrieve warmer :param doc_types: list of document types :param warmer: anything with ``serialize`` method or a dictionary :param name: warmer name. If not provided, all warmers will be returned :param querystring_args: additional arguments passed as GET params to ES
7,360
def _next_pattern(self): current_state = self.state_stack[-1] position = self._position for pattern in self.patterns: if current_state not in pattern.states: continue m = pattern.regex.match(self.source, position) if not m: continue position = m.end() token = None if pattern.next_state: self.state_stack.append(pattern.next_state) if pattern.action: callback = getattr(self, pattern.action, None) if callback is None: raise RuntimeError( "No method defined for pattern action %s!" % pattern.action) if "token" in m.groups(): value = m.group("token") else: value = m.group(0) token = callback(string=value, match=m, pattern=pattern) self._position = position return token self._error("Don't know how to match next. Did you forget quotes?", start=self._position, end=self._position + 1)
Parses the next pattern by matching each in turn.
7,361
def _start_loop(self, websocket, event_handler): log.debug() while True: try: yield from asyncio.wait_for( self._wait_for_message(websocket, event_handler), timeout=self.options[] ) except asyncio.TimeoutError: yield from websocket.pong() log.debug("Sending heartbeat...") continue
We will listen for websockets events, sending a heartbeat/pong everytime we react a TimeoutError. If we don't the webserver would close the idle connection, forcing us to reconnect.
7,362
def to_file(self, filename, format=, header=None, errors=False, **kwargs): if format is : if errors is True and self.errors is None: raise ValueError( ) if self.omega is None: omega = 0. else: omega = self.omega with open(filename, mode=) as file: if header is not None: file.write(header + ) file.write(.format( self.r0, self.gm, omega, self.lmax)) for l in range(self.lmax+1): for m in range(l+1): if errors is True: file.write( .format(l, m, self.coeffs[0, l, m], self.coeffs[1, l, m], self.errors[0, l, m], self.errors[1, l, m])) else: file.write( .format(l, m, self.coeffs[0, l, m], self.coeffs[1, l, m])) elif format is : _np.save(filename, self.coeffs, **kwargs) else: raise NotImplementedError( .format(repr(format)))
Save spherical harmonic coefficients to a file. Usage ----- x.to_file(filename, [format='shtools', header, errors]) x.to_file(filename, [format='npy', **kwargs]) Parameters ---------- filename : str Name of the output file. format : str, optional, default = 'shtools' 'shtools' or 'npy'. See method from_file() for more information. header : str, optional, default = None A header string written to an 'shtools'-formatted file directly before the spherical harmonic coefficients. errors : bool, optional, default = False If True, save the errors in the file (for 'shtools' formatted files only). **kwargs : keyword argument list, optional for format = 'npy' Keyword arguments of numpy.save(). Description ----------- If format='shtools', the coefficients and meta-data will be written to an ascii formatted file. The first line is an optional user provided header line, and the following line provides the attributes r0, gm, omega, and lmax. The spherical harmonic coefficients are then listed, with increasing degree and order, with the format l, m, coeffs[0, l, m], coeffs[1, l, m] where l and m are the spherical harmonic degree and order, respectively. If the errors are to be saved, the format of each line will be l, m, coeffs[0, l, m], coeffs[1, l, m], error[0, l, m], error[1, l, m] If format='npy', the spherical harmonic coefficients (but not the meta-data nor errors) will be saved to a binary numpy 'npy' file using numpy.save().
7,363
def clear(self): self.root = None for leaf in self.leaves: leaf.p, leaf.sib, leaf.side = (None, ) * 3
Clears the Merkle Tree by releasing the Merkle root and each leaf's references, the rest should be garbage collected. This may be useful for situations where you want to take an existing tree, make changes to the leaves, but leave it uncalculated for some time, without node references that are no longer correct still hanging around. Usually it is better just to make a new tree.
7,364
def uniqueify_all(init_reqs, *other_reqs): union = set(init_reqs) for reqs in other_reqs: union.update(reqs) return list(union)
Find the union of all the given requirements.
7,365
def msgDict(d,matching=None,sep1="=",sep2="\n",sort=True,cantEndWith=None): msg="" if "record" in str(type(d)): keys=d.dtype.names else: keys=d.keys() if sort: keys=sorted(keys) for key in keys: if key[0]=="_": continue if matching: if not key in matching: continue if cantEndWith and key[-len(cantEndWith)]==cantEndWith: continue if in str(type(d[key])): s="%.02f"%d[key] else: s=str(d[key]) if "object" in s: s= msg+=key+sep1+s+sep2 return msg.strip()
convert a dictionary to a pretty formatted string.
7,366
def create_scoped_session(self, options=None): if options is None: options = {} scopefunc = options.pop(, _app_ctx_stack.__ident_func__) options.setdefault(, self.Query) return orm.scoped_session( self.create_session(options), scopefunc=scopefunc )
Create a :class:`~sqlalchemy.orm.scoping.scoped_session` on the factory from :meth:`create_session`. An extra key ``'scopefunc'`` can be set on the ``options`` dict to specify a custom scope function. If it's not provided, Flask's app context stack identity is used. This will ensure that sessions are created and removed with the request/response cycle, and should be fine in most cases. :param options: dict of keyword arguments passed to session class in ``create_session``
7,367
def select(self, key, where=None, start=None, stop=None, columns=None, iterator=False, chunksize=None, auto_close=False, **kwargs): group = self.get_node(key) if group is None: raise KeyError(.format(key=key)) where = _ensure_term(where, scope_level=1) s = self._create_storer(group) s.infer_axes() def func(_start, _stop, _where): return s.read(start=_start, stop=_stop, where=_where, columns=columns) it = TableIterator(self, s, func, where=where, nrows=s.nrows, start=start, stop=stop, iterator=iterator, chunksize=chunksize, auto_close=auto_close) return it.get_result()
Retrieve pandas object stored in file, optionally based on where criteria Parameters ---------- key : object where : list of Term (or convertible) objects, optional start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection columns : a list of columns that if not None, will limit the return columns iterator : boolean, return an iterator, default False chunksize : nrows to include in iteration, return an iterator auto_close : boolean, should automatically close the store when finished, default is False Returns ------- The selected object
7,368
def save_to_local(self, callback_etat=print): callback_etat("Aquisition...", 0, 3) d = self.dumps() s = json.dumps(d, indent=4, cls=formats.JsonEncoder) callback_etat("Chiffrement...", 1, 3) s = security.protege_data(s, True) callback_etat("Enregistrement...", 2, 3) try: with open(self.LOCAL_DB_PATH, ) as f: f.write(s) except (FileNotFoundError): logging.exception(self.__class__.__name__) raise StructureError("Chemin de sauvegarde introuvable !")
Saved current in memory base to local file. It's a backup, not a convenient way to update datas :param callback_etat: state callback, taking str,int,int as args
7,369
def addchild(self, startip, endip, name, description): add_child_ip_scope(self.auth, self.url, startip, endip, name, description, self.id)
Method takes inpur of str startip, str endip, name, and description and adds a child scope. The startip and endip MUST be in the IP address range of the parent scope. :param startip: str of ipv4 address of the first address in the child scope :param endip: str of ipv4 address of the last address in the child scope :param name: of the owner of the child scope :param description: description of the child scope :return:
7,370
def generic_find_fk_constraint_names(table, columns, referenced, insp): names = set() for fk in insp.get_foreign_keys(table): if fk[] == referenced and set(fk[]) == columns: names.add(fk[]) return names
Utility to find foreign-key constraint names in alembic migrations
7,371
def parse(url): result = urlparse.urlparse(nstr(url)) path = result.scheme + + result.netloc if result.path: path += result.path query = {} if result.query: url_query = urlparse.parse_qs(result.query) for key, value in url_query.items(): if type(value) == list and len(value) == 1: value = value[0] query[key] = value return path, query, result.fragment
Parses out the information for this url, returning its components expanded out to Python objects. :param url | <str> :return (<str> path, <dict> query, <str> fragment)
7,372
def augment_audio_with_sox(path, sample_rate, tempo, gain): with NamedTemporaryFile(suffix=".wav") as augmented_file: augmented_filename = augmented_file.name sox_augment_params = ["tempo", "{:.3f}".format(tempo), "gain", "{:.3f}".format(gain)] sox_params = "sox \"{}\" -r {} -c 1 -b 16 {} {} >/dev/null 2>&1".format(path, sample_rate, augmented_filename, " ".join(sox_augment_params)) os.system(sox_params) y = load_audio(augmented_filename) return y
Changes tempo and gain of the recording with sox and loads it.
7,373
def get_title(): MAX_LEN = 256 buffer_ = create_unicode_buffer(MAX_LEN) kernel32.GetConsoleTitleW(buffer_, MAX_LEN) log.debug(, buffer_.value) return buffer_.value
Returns console title string. https://docs.microsoft.com/en-us/windows/console/getconsoletitle
7,374
def check(self, order, sids): payload = "{}" raw_msg = self.client.blpop(self.channel, timeout=self.timeout) if raw_msg: _, payload = raw_msg msg = json.loads(payload.replace(""utf-8ordering {} of {}skipping unknown symbol {}'.format(sid))
Check if a message is available
7,375
def value(val, transform=None): if transform: return dict(value=val, transform=transform) return dict(value=val)
Convenience function to explicitly return a "value" specification for a Bokeh :class:`~bokeh.core.properties.DataSpec` property. Args: val (any) : a fixed value to specify for a ``DataSpec`` property. transform (Transform, optional) : a transform to apply (default: None) Returns: dict : ``{ "value": name }`` .. note:: String values for property specifications are by default interpreted as field names. This function is especially useful when you want to specify a fixed value with text properties. Example: .. code-block:: python # The following will take text values to render from a data source # column "text_column", but use a fixed value "12pt" for font size p.text("x", "y", text="text_column", text_font_size=value("12pt"), source=source)
7,376
def create_geom_filter(request, mapped_class, geom_attr): tolerance = float(request.params.get(, 0.0)) epsg = None if in request.params: epsg = int(request.params[]) box = request.params.get() shape = None if box is not None: box = [float(x) for x in box.split()] shape = Polygon(((box[0], box[1]), (box[0], box[3]), (box[2], box[3]), (box[2], box[1]), (box[0], box[1]))) elif in request.params and in request.params: shape = Point(float(request.params[]), float(request.params[])) elif in request.params: shape = loads(request.params[], object_hook=GeoJSON.to_instance) shape = asShape(shape) if shape is None: return None column_epsg = _get_col_epsg(mapped_class, geom_attr) geom_attr = getattr(mapped_class, geom_attr) epsg = column_epsg if epsg is None else epsg if epsg != column_epsg: geom_attr = func.ST_Transform(geom_attr, epsg) geometry = from_shape(shape, srid=epsg) return func.ST_DWITHIN(geom_attr, geometry, tolerance)
Create MapFish geometry filter based on the request params. Either a box or within or geometry filter, depending on the request params. Additional named arguments are passed to the spatial filter. Arguments: request the request. mapped_class the SQLAlchemy mapped class. geom_attr the key of the geometry property as defined in the SQLAlchemy mapper. If you use ``declarative_base`` this is the name of the geometry attribute as defined in the mapped class.
7,377
def getref(): ans=dict(graphtable=GRAPHTABLE, comptable=COMPTABLE, thermtable=THERMTABLE, area=PRIMARY_AREA, waveset=_default_waveset_str) return ans
Current default values for graph and component tables, primary area, and wavelength set. .. note:: Also see :func:`setref`. Returns ------- ans : dict Mapping of parameter names to their current values.
7,378
def build_board_checkers(): grd = Grid(8,8, ["B","W"]) for c in range(4): grd.set_tile(0,(c*2) - 1, "B") grd.set_tile(1,(c*2) - 0, "B") grd.set_tile(6,(c*2) + 1, "W") grd.set_tile(7,(c*2) - 0, "W") print(grd) return grd
builds a checkers starting board Printing Grid 0 B 0 B 0 B 0 B B 0 B 0 B 0 B 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 W 0 W 0 W 0 W W 0 W 0 W 0 W 0
7,379
def write_config(config_data: Dict[str, Path], path: Path = None): path = Path(path) if path else infer_config_base_dir() valid_names = [ce.name for ce in CONFIG_ELEMENTS] try: os.makedirs(path, exist_ok=True) with (path/_CONFIG_FILENAME).open() as base_f: json.dump({k: str(v) for k, v in config_data.items() if k in valid_names}, base_f, indent=2) except OSError as e: sys.stderr.write("Config index write to {} failed: {}\n" .format(path/_CONFIG_FILENAME, e))
Save the config file. :param config_data: The index to save :param base_dir: The place to save the file. If ``None``, :py:meth:`infer_config_base_dir()` will be used Only keys that are in the config elements will be saved.
7,380
def copy_fields(layer, fields_to_copy): for field in fields_to_copy: index = layer.fields().lookupField(field) if index != -1: layer.startEditing() source_field = layer.fields().at(index) new_field = QgsField(source_field) new_field.setName(fields_to_copy[field]) layer.addAttribute(new_field) new_index = layer.fields().lookupField(fields_to_copy[field]) for feature in layer.getFeatures(): attributes = feature.attributes() source_value = attributes[index] layer.changeAttributeValue( feature.id(), new_index, source_value) layer.commitChanges() layer.updateFields()
Copy fields inside an attribute table. :param layer: The vector layer. :type layer: QgsVectorLayer :param fields_to_copy: Dictionary of fields to copy. :type fields_to_copy: dict
7,381
def main(sample_id, fastq_pair, gsize, minimum_coverage, opts): logger.info("Starting integrity coverage main") if "-e" in opts: skip_encoding = True else: skip_encoding = False gmin, gmax = 99, 0 encoding = [] phred = None chars = 0 nreads = 0 max_read_length = 0 file_objects = [] for fastq in fastq_pair: logger.info("Processing file {}".format(fastq)) logger.info("[{}] Guessing file compression".format(fastq)) ftype = guess_file_compression(fastq) if ftype: logger.info("[{}] Found file compression: {}".format( fastq, ftype)) file_objects.append(COPEN[ftype](fastq, "rt")) else: logger.info("[{}] File compression not found. Assuming an " "uncompressed file".format(fastq)) file_objects.append(open(fastq)) logger.info("Starting FastQ file parsing") with open("{}_encoding".format(sample_id), "w") as enc_fh, \ open("{}_phred".format(sample_id), "w") as phred_fh, \ open("{}_coverage".format(sample_id), "w") as cov_fh, \ open("{}_report".format(sample_id), "w") as cov_rep, \ open("{}_max_len".format(sample_id), "w") as len_fh, \ open(".report.json", "w") as json_report, \ open(".status", "w") as status_fh, \ open(".fail", "w") as fail_fh: try: for i, line in enumerate(chain(*file_objects)): if (i + 1) % 4 == 0 and not skip_encoding: lmin, lmax = get_qual_range(line.strip()) if lmin < gmin or lmax > gmax: gmin, gmax = min(lmin, gmin), max(lmax, gmax) encoding, phred = get_encodings_in_range(gmin, gmax) logger.debug( "Updating estimates at line {} with range {} to" " (encoding) and (phred)".format( i, [lmin, lmax], encoding, phred)) if (i + 3) % 4 == 0: read_len = len(line.strip()) chars += read_len nreads += 1 if read_len > max_read_length: logger.debug("Updating maximum read length at line " "{} to {}".format(i, read_len)) max_read_length = read_len logger.info("Finished FastQ file parsing") exp_coverage = round(chars / (gsize * 1e6), 2) if "-e" not in opts: json_dic = { "tableRow": [{ "sample": sample_id, "data": [ {"header": "Raw BP", "value": chars, "table": "qc", "columnBar": True}, {"header": "Reads", "value": nreads, "table": "qc", "columnBar": True}, {"header": "Coverage", "value": exp_coverage, "table": "qc", "columnBar": True, "failThreshold": minimum_coverage } ] }], "plotData": [{ "sample": sample_id, "data": { "sparkline": chars } }], } else: json_dic = { "tableRow": [{ "sample": sample_id, "data": [ {"header": "Coverage", "value": exp_coverage, "table": "qc", "columnBar": True, "failThreshold": minimum_coverage } ], }], } if len(encoding) > 0: encoding = set(encoding) phred = set(phred) enc = "{}".format(",".join([x for x in encoding])) phred = "{}".format(",".join(str(x) for x in phred)) logger.info("Encoding set to {}".format(enc)) logger.info("Phred set to {}".format(enc)) enc_fh.write(enc) phred_fh.write(phred) else: if not skip_encoding: encoding_msg = "Could not guess encoding and phred from " \ "FastQ" logger.warning(encoding_msg) json_dic["warnings"] = [{ "sample": sample_id, "table": "qc", "value": [encoding_msg] }] enc_fh.write("None") phred_fh.write("None") logger.info("Estimating coverage based on a genome size of " "{}".format(gsize)) logger.info("Expected coverage is {}".format(exp_coverage)) if exp_coverage >= minimum_coverage: cov_rep.write("{},{},{}\\n".format( sample_id, str(exp_coverage), "PASS")) cov_fh.write(str(exp_coverage)) status_fh.write("pass") else: fail_msg = "Sample with low coverage ({}), below the {} " \ "threshold".format(exp_coverage, minimum_coverage) logger.error(fail_msg) fail_fh.write(fail_msg) cov_fh.write("fail") status_fh.write("fail") cov_rep.write("{},{},{}\\n".format( sample_id, str(exp_coverage), "FAIL")) json_dic["fail"] = [{ "sample": sample_id, "table": "qc", "value": [fail_msg] }] json_report.write(json.dumps(json_dic, separators=(",", ":"))) len_fh.write("{}".format(max_read_length)) except EOFError: logger.error("The FastQ files could not be correctly " "parsed. They may be corrupt") for fh in [enc_fh, phred_fh, cov_fh, cov_rep, len_fh]: fh.write("corrupt") status_fh.write("fail") fail_fh.write("Could not read/parse FastQ. " "Possibly corrupt file")
Main executor of the integrity_coverage template. Parameters ---------- sample_id : str Sample Identification string. fastq_pair : list Two element list containing the paired FastQ files. gsize : float or int Estimate of genome size in Mb. minimum_coverage : float or int Minimum coverage required for a sample to pass the coverage check opts : list List of arbitrary options. See `Expected input`_.
7,382
def countries(self): if self._countries is None: self._countries = CountryList(self._version, ) return self._countries
Access the countries :returns: twilio.rest.voice.v1.dialing_permissions.country.CountryList :rtype: twilio.rest.voice.v1.dialing_permissions.country.CountryList
7,383
def download( self, file: Union[IO[bytes], asyncio.StreamWriter, None]=None, raw: bool=False, rewind: bool=True, duration_timeout: Optional[float]=None): if self._session_state != SessionState.request_sent: raise RuntimeError() if rewind and file and hasattr(file, ): original_offset = file.tell() else: original_offset = None if not hasattr(file, ): self._response.body = file if not isinstance(file, Body): self._response.body = Body(file) read_future = self._stream.read_body(self._request, self._response, file=file, raw=raw) try: yield from asyncio.wait_for(read_future, timeout=duration_timeout) except asyncio.TimeoutError as error: raise DurationTimeout( .format(duration_timeout) ) from error self._session_state = SessionState.response_received if original_offset is not None: file.seek(original_offset) self.event_dispatcher.notify(self.Event.end_response, self._response) self.recycle()
Read the response content into file. Args: file: A file object or asyncio stream. raw: Whether chunked transfer encoding should be included. rewind: Seek the given file back to its original offset after reading is finished. duration_timeout: Maximum time in seconds of which the entire file must be read. Be sure to call :meth:`start` first. Coroutine.
7,384
def check(text): err = "MSC104" msg = u"Don't fail to capitalize roman numeral abbreviations." pwd_regex = " (I(i*)|i*)" password = [ "World War{}".format(pwd_regex), ] return blacklist(text, password, err, msg)
Check the text.
7,385
def apply_noise(self, noise_weights=None, uniform_amount=0.1): for node in self.node_list: for link in node.link_list: if noise_weights is not None: noise_amount = round(weighted_rand(noise_weights), 3) else: noise_amount = round(random.uniform( 0, link.weight * uniform_amount), 3) link.weight += noise_amount
Add noise to every link in the network. Can use either a ``uniform_amount`` or a ``noise_weight`` weight profile. If ``noise_weight`` is set, ``uniform_amount`` will be ignored. Args: noise_weights (list): a list of weight tuples of form ``(float, float)`` corresponding to ``(amount, weight)`` describing the noise to be added to each link in the graph uniform_amount (float): the maximum amount of uniform noise to be applied if ``noise_weights`` is not set Returns: None Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> node_2 = Node('Two') >>> node_1.add_link(node_1, 3) >>> node_1.add_link(node_2, 5) >>> node_2.add_link(node_1, 1) >>> graph = Graph([node_1, node_2]) >>> for link in graph.node_list[0].link_list: ... print('{} {}'.format(link.target.value, link.weight)) One 3 Two 5 >>> graph.apply_noise() >>> for link in graph.node_list[0].link_list: ... print('{} {}'.format( ... link.target.value, link.weight)) # doctest: +SKIP One 3.154 Two 5.321
7,386
def raise_for_status(self): if 400 <= self.status_code < 600: message = % (self.status_code, self.url) raise HTTPError(message)
Raises HTTPError if the request got an error.
7,387
def ensure_crops(self, *required_crops): if self._can_crop(): if settings.CELERY or settings.USE_CELERY_DECORATOR: args = [self.pk]+list(required_crops) tasks.ensure_crops.apply_async(args=args, countdown=5) else: tasks.ensure_crops(None, *required_crops, asset=self)
Make sure a crop exists for each crop in required_crops. Existing crops will not be changed. If settings.ASSET_CELERY is specified then the task will be run async
7,388
def set_umr_namelist(self): arguments, valid = QInputDialog.getText(self, _(), _("Set the list of excluded modules as " "this: <i>numpy, scipy</i>"), QLineEdit.Normal, ", ".join(self.get_option())) if valid: arguments = to_text_string(arguments) if arguments: namelist = arguments.replace(, ).split() fixed_namelist = [] non_ascii_namelist = [] for module_name in namelist: if PY2: if all(ord(c) < 128 for c in module_name): if programs.is_module_installed(module_name): fixed_namelist.append(module_name) else: QMessageBox.warning(self, _(), _("You are working with Python 2, this means that " "you can not import a module that contains non-" "ascii characters."), QMessageBox.Ok) non_ascii_namelist.append(module_name) elif programs.is_module_installed(module_name): fixed_namelist.append(module_name) invalid = ", ".join(set(namelist)-set(fixed_namelist)- set(non_ascii_namelist)) if invalid: QMessageBox.warning(self, _(), _("The following modules are not " "installed on your machine:\n%s" ) % invalid, QMessageBox.Ok) QMessageBox.information(self, _(), _("Please note that these changes will " "be applied only to new Python/IPython " "consoles"), QMessageBox.Ok) else: fixed_namelist = [] self.set_option(, fixed_namelist)
Set UMR excluded modules name list
7,389
def connect(self): if self.r_session: self.session_logout() if self.admin_party: self._use_iam = False self.r_session = ClientSession( timeout=self._timeout ) elif self._use_basic_auth: self._use_iam = False self.r_session = BasicSession( self._user, self._auth_token, self.server_url, timeout=self._timeout ) elif self._use_iam: self.r_session = IAMSession( self._auth_token, self.server_url, auto_renew=self._auto_renew, client_id=self._iam_client_id, client_secret=self._iam_client_secret, timeout=self._timeout ) else: self.r_session = CookieSession( self._user, self._auth_token, self.server_url, auto_renew=self._auto_renew, timeout=self._timeout ) if self.adapter is not None: self.r_session.mount(self.server_url, self.adapter) if self._client_user_header is not None: self.r_session.headers.update(self._client_user_header) self.session_login() self.r_session.hooks[].append(append_response_error_content)
Starts up an authentication session for the client using cookie authentication if necessary.
7,390
def GenerateLabels(self, hash_information): if not hash_information: return [] projects = [] tags = [] for project, entries in iter(hash_information.items()): if not entries: continue projects.append(project) for entry in entries: if entry[]: tags.extend(entry[]) if not projects: return [] strings = [] for project_name in projects: label = events.EventTag.CopyTextToLabel( project_name, prefix=) strings.append(label) for tag_name in tags: label = events.EventTag.CopyTextToLabel(tag_name, prefix=) strings.append(label) return strings
Generates a list of strings that will be used in the event tag. Args: hash_information (dict[str, object]): JSON decoded contents of the result of a Viper lookup, as produced by the ViperAnalyzer. Returns: list[str]: list of labels to apply to events.
7,391
def convert_to_int(x: Any, default: int = None) -> int: try: return int(x) except (TypeError, ValueError): return default
Transforms its input into an integer, or returns ``default``.
7,392
def getBucketInfo(self, buckets): if self.ncategories==0: return 0 topDownMappingM = self._getTopDownMapping() categoryIndex = buckets[0] category = self.categories[categoryIndex] encoding = topDownMappingM.getRow(categoryIndex) return [EncoderResult(value=category, scalar=categoryIndex, encoding=encoding)]
See the function description in base.py
7,393
def search_device_by_id(self, deviceID) -> Device: for d in self.devices: if d.id == deviceID: return d return None
searches a device by given id Args: deviceID(str): the device to search for Returns the Device object or None if it couldn't find a device
7,394
def mkdir(self, paths, create_parent=False, mode=0o755): if not isinstance(paths, list): raise InvalidInputException("Paths should be a list") if not paths: raise InvalidInputException("mkdirs: no path given") for path in paths: if not path.startswith("/"): path = self._join_user_path(path) fileinfo = self._get_file_info(path) if not fileinfo: try: request = client_proto.MkdirsRequestProto() request.src = path request.masked.perm = mode request.createParent = create_parent response = self.service.mkdirs(request) yield {"path": path, "result": response.result} except RequestError as e: yield {"path": path, "result": False, "error": str(e)} else: yield {"path": path, "result": False, "error": "mkdir: `%s': File exists" % path}
Create a directoryCount :param paths: Paths to create :type paths: list of strings :param create_parent: Also create the parent directories :type create_parent: boolean :param mode: Mode the directory should be created with :type mode: int :returns: a generator that yields dictionaries
7,395
def remote_pdb_handler(signum, frame): try: from remote_pdb import RemotePdb rdb = RemotePdb(host="127.0.0.1", port=0) rdb.set_trace(frame=frame) except ImportError: log.warning( "remote_pdb unavailable. Please install remote_pdb to " "allow remote debugging." ) signal.signal(signum, remote_pdb_handler)
Handler to drop us into a remote debugger upon receiving SIGUSR1
7,396
def _open_file(self, filename): if not self._os_is_windows: self._fh = open(filename, "rb") self.filename = filename self._fh.seek(0, os.SEEK_SET) self.oldsize = 0 return import win32file import msvcrt handle = win32file.CreateFile(filename, win32file.GENERIC_READ, win32file.FILE_SHARE_DELETE | win32file.FILE_SHARE_READ | win32file.FILE_SHARE_WRITE, None, win32file.OPEN_EXISTING, 0, None) detached_handle = handle.Detach() file_descriptor = msvcrt.open_osfhandle( detached_handle, os.O_RDONLY) self._fh = open(file_descriptor, "rb") self.filename = filename self._fh.seek(0, os.SEEK_SET) self.oldsize = 0
Open a file to be tailed
7,397
def read_until(self, marker): if not isinstance(marker, byte_cls) and not isinstance(marker, Pattern): raise TypeError(pretty_message( , type_name(marker) )) output = b is_regex = isinstance(marker, Pattern) while True: if len(self._decrypted_bytes) > 0: chunk = self._decrypted_bytes self._decrypted_bytes = b else: to_read = self._os_buffered_size() or 8192 chunk = self.read(to_read) offset = len(output) output += chunk if is_regex: match = marker.search(output) if match is not None: end = match.end() break else: start = max(0, offset - len(marker) - 1) match = output.find(marker, start) if match != -1: end = match + len(marker) break self._decrypted_bytes = output[end:] + self._decrypted_bytes return output[0:end]
Reads data from the socket until a marker is found. Data read includes the marker. :param marker: A byte string or regex object from re.compile(). Used to determine when to stop reading. Regex objects are more inefficient since they must scan the entire byte string of read data each time data is read off the socket. :return: A byte string of the data read, including the marker
7,398
async def ensure_usable_media(self, media: BaseMedia) -> UrlMedia: if not isinstance(media, UrlMedia): raise ValueError() return media
So far, let's just accept URL media. We'll see in the future how it goes.
7,399
def prop_budget(self, budget): if self.minisat: pysolvers.minisatgh_pbudget(self.minisat, budget)
Set limit on the number of propagations.