Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
22,700
async def unsubscribe(self, container, *keys): await self._get_subscribe_connection(container) realkeys = [] for k in keys: count = self._subscribecounter.get(k, 0) if count <= 1: realkeys.append(k) try: del self._subscribecounter[k] except KeyError: pass else: self._subscribecounter[k] = count - 1 if realkeys: await self._protocol.execute_command(self._subscribeconn, container, , *realkeys)
Unsubscribe specified channels. Every subscribed key should be unsubscribed exactly once, even if duplicated subscribed. :param container: routine container :param \*keys: subscribed channels
22,701
def _parse_to_recoverable_signature(sig): assert isinstance(sig, bytes) assert len(sig) == 65 rec_sig = ffi.new("secp256k1_ecdsa_recoverable_signature *") recid = ord(sig[64:65]) parsable_sig = lib.secp256k1_ecdsa_recoverable_signature_parse_compact( ctx, rec_sig, sig, recid ) if not parsable_sig: raise InvalidSignatureError() return rec_sig
Returns a parsed recoverable signature of length 65 bytes
22,702
def _get_machines_cache_from_srv(self, srv): ret = [] for r in [, , , , , ]: protocol = if in r else endpoint = if in r else for host, port in self.get_srv_record(.format(r, srv)): url = uri(protocol, host, port, endpoint) if endpoint: try: response = requests.get(url, timeout=self.read_timeout, verify=False) if response.ok: for member in response.json(): ret.extend(member[]) break except RequestException: logger.exception(, url) else: ret.append(url) if ret: self._protocol = protocol break else: logger.warning(, srv) return list(set(ret))
Fetch list of etcd-cluster member by resolving _etcd-server._tcp. SRV record. This record should contain list of host and peer ports which could be used to run 'GET http://{host}:{port}/members' request (peer protocol)
22,703
def split_by_idxs(seq, idxs): last = 0 for idx in idxs: if not (-len(seq) <= idx < len(seq)): raise KeyError(f) yield seq[last:idx] last = idx yield seq[last:]
A generator that returns sequence pieces, seperated by indexes specified in idxs.
22,704
def handle_noargs(self, **options): is_dry_run = options.get(, False) mptt_only = options.get(, False) slugs = {} overrides = {} parents = dict( UrlNode.objects.filter(status=UrlNode.DRAFT).values_list(, ) ) self.stdout.write("Updated MPTT columns") if is_dry_run and mptt_only: self.stderr.write("Failed to determine new URL for {0}, please run with --mptt-only first.".format(old_url)) return raise if old_url != new_url: translation._cached_url = new_url if not is_dry_run: translation.save() if old_url != new_url: self.stdout.write(smart_text(u"{0} {1} {2}\n".format( col_style.format(translation.master.parent_site_id, translation.master_id, translation.language_code, translation._cached_url), "WILL CHANGE from" if is_dry_run else "UPDATED from", old_url ))) else: self.stdout.write(smart_text(col_style.format( translation.master.parent_site_id, translation.master_id, translation.language_code, translation._cached_url )))
By default this function runs on all objects. As we are using a publishing system it should only update draft objects which can be modified in the tree structure. Once published the tree preferences should remain the same to ensure the tree data structure is consistent with what was published by the user.
22,705
def _initialized(self, partitioner): self._partitioner = partitioner self._thimble = Thimble(self.reactor, self.pool, partitioner, _blocking_partitioner_methods) self._state = None
Store the partitioner and reset the internal state. Now that we successfully got an actual :class:`kazoo.recipe.partitioner.SetPartitioner` object, we store it and reset our internal ``_state`` to ``None``, causing the ``state`` property to defer to the partitioner's state.
22,706
def removeRef(self, attr): if attr is None: attr__o = None else: attr__o = attr._o ret = libxml2mod.xmlRemoveRef(self._o, attr__o) return ret
Remove the given attribute from the Ref table maintained internally.
22,707
def connect_child(self, node): if node.graph != self.graph: raise AttributeError() node.parents.add(self) self.children.add(node)
Adds the given node as a child to this one. No new nodes are created, only connections are made. :param node: a ``Node`` object to connect
22,708
def get_account(self, address, id=None, endpoint=None): return self._call_endpoint(GET_ACCOUNT_STATE, params=[address], id=id, endpoint=endpoint)
Look up an account on the blockchain. Sample output: Args: address: (str) address to lookup ( in format 'AXjaFSP23Jkbe6Pk9pPGT6NBDs1HVdqaXK') id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
22,709
def send_request(ndex_service_url, params, is_json=True, use_get=False): if use_get: res = requests.get(ndex_service_url, json=params) else: res = requests.post(ndex_service_url, json=params) status = res.status_code if status == 200: if is_json: return res.json() else: return res.text elif status != 300: logger.error( % status) return None task_id = res.json().get() logger.info() time_used = 0 try: while status != 200: res = requests.get(ndex_base_url + + task_id) status = res.status_code if status != 200: time.sleep(5) time_used += 5 except KeyError: next return None logger.info() if is_json: return res.json() else: return res.text
Send a request to the NDEx server. Parameters ---------- ndex_service_url : str The URL of the service to use for the request. params : dict A dictionary of parameters to send with the request. Parameter keys differ based on the type of request. is_json : bool True if the response is in json format, otherwise it is assumed to be text. Default: False use_get : bool True if the request needs to use GET instead of POST. Returns ------- res : str Depending on the type of service and the is_json parameter, this function either returns a text string or a json dict.
22,710
def json_encode(self, out, limit=None, sort_keys=False, indent=None): stream = self._json_stream(limit) enc = json.JSONEncoder(indent=indent, sort_keys=sort_keys) for chunk in enc.iterencode(stream): out.write(u % chunk)
Encode the results of this paged response as JSON writing to the provided file-like `out` object. This function will iteratively read as many pages as present, streaming the contents out as JSON. :param file-like out: an object with a `write` function :param int limit: optional maximum number of items to write :param bool sort_keys: if True, output keys sorted, default is False :param bool indent: if True, indent output, default is False
22,711
def bind(self): gl.glBindTexture(gl.GL_TEXTURE_2D, 0) self._old_viewport = get_viewport() gl.glBindFramebufferEXT(gl.GL_FRAMEBUFFER_EXT, self.id) gl.glViewport(0, 0, self.texture.width, self.texture.height)
Bind the FBO. Anything drawn afterward will be stored in the FBO's texture.
22,712
def monkey_patch_override_instance_method(instance): def perform_override(override_fn): fn_name = override_fn.__name__ original_fn_name = + fn_name if not hasattr(instance, original_fn_name): original_fn = getattr(instance, fn_name) setattr(instance, original_fn_name, original_fn) bound_override_fn = override_fn.__get__(instance) setattr(instance, fn_name, bound_override_fn) return perform_override
Override an instance method with a new version of the same name. The original method implementation is made available within the override method as `_original_<METHOD_NAME>`.
22,713
def url(self, key, includeToken=None): if self._token and (includeToken or self._showSecrets): delim = if in key else return % (self._baseurl, key, delim, self._token) return % (self._baseurl, key)
Build a URL string with proper token argument. Token will be appended to the URL if either includeToken is True or CONFIG.log.show_secrets is 'true'.
22,714
def get_fast_scanner(self): terms = self._terms return KronFastScanner(self._Y, self._mean.A, self._mean.X, self._cov.Ge, terms)
Return :class:`.FastScanner` for association scan. Returns ------- :class:`.FastScanner` Instance of a class designed to perform very fast association scan.
22,715
def selected_attributes(self): array = javabridge.call(self.jobject, "selectedAttributes", "()[I") if array is None: return None else: return javabridge.get_env().get_int_array_elements(array)
Returns the selected attributes from the last run. :return: the Numpy array of 0-based indices :rtype: ndarray
22,716
def _structure(self, source_code): def cutter(seq, block_size): for index in range(0, len(seq), block_size): lexem = seq[index:index+block_size] if len(lexem) == block_size: yield self.table_struct[seq[index:index+block_size]] return tuple(cutter(source_code, self.idnt_struct_size))
return structure in ACDP format.
22,717
def getInterfaceAddresses(self, node, interface): intf = self.getNode(node)[].getInterface(interface) return intf.ethaddr,intf.ipaddr,intf.netmask
Return the Ethernet and IP+mask addresses assigned to a given interface on a node.
22,718
def fit(self, **kwargs): if len(self._set_xdata)==0 or len(self._set_ydata)==0: return self._error("No data. Please use set_data() prior to fitting.") if self._f_raw is None: return self._error("No functions. Please use set_functions() prior to fitting.") self._massage_data() self.set(**kwargs) self.results = _opt.leastsq(self._studentized_residuals_concatenated, self._pguess, full_output=1) if self[]: self.plot() return self
This will try to determine fit parameters using scipy.optimize.leastsq algorithm. This function relies on a previous call of set_data() and set_functions(). Notes ----- results of the fit algorithm are stored in self.results. See scipy.optimize.leastsq for more information. Optional keyword arguments are sent to self.set() prior to fitting.
22,719
def calculate_reshape_output_shapes(operator): , H] Note that C*H*W should equal to C*W check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1) check_input_and_output_types(operator, good_input_types=[FloatTensorType]) params = operator.raw_operator.reshape output_shape = list(int(i) for i in params.targetShape) if len(output_shape) == 3: output_shape = [operator.inputs[0].type.shape[0]] + output_shape operator.outputs[0].type.shape = output_shape
Allowed input/output patterns are 1. [N, C, H, W] ---> [N, C', H', W'] Note that C*H*W should equal to C'*H'*W'.
22,720
def MakeCdf(self, steps=101): xs = [i / (steps - 1.0) for i in xrange(steps)] ps = [scipy.special.betainc(self.alpha, self.beta, x) for x in xs] cdf = Cdf(xs, ps) return cdf
Returns the CDF of this distribution.
22,721
def cluster(self, n, embed_dim=None, algo=mds.CLASSICAL, method=methods.KMEANS): if n == 1: return Partition([1] * len(self.get_dm(False))) if embed_dim is None: embed_dim = n if algo == mds.CLASSICAL: self._coords = self.dm.embedding(embed_dim, ) elif algo == mds.METRIC: self._coords = self.dm.embedding(embed_dim, ) else: raise OptionError(algo, list(mds.reverse.values())) if method == methods.KMEANS: p = self.kmeans(n, self._coords.values) elif method == methods.GMM: p = self.gmm(n, self._coords.values) elif method == methods.WARD: linkmat = fastcluster.linkage(self._coords.values, ) p = _hclust(linkmat, n) else: raise OptionError(method, list(methods.reverse.values())) return p
Cluster the embedded coordinates using multidimensional scaling Parameters ---------- n: int The number of clusters to return embed_dim int The dimensionality of the underlying coordinates Defaults to same value as n method: enum value (methods.KMEANS | methods.GMM) The clustering method to use Returns ------- Partition: Partition object describing the data partition
22,722
def distribute_covar_matrix_to_match_covariance_type( tied_cv, covariance_type, n_components): if covariance_type == : cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]), (n_components, 1)) elif covariance_type == : cv = tied_cv elif covariance_type == : cv = np.tile(np.diag(tied_cv), (n_components, 1)) elif covariance_type == : cv = np.tile(tied_cv, (n_components, 1, 1)) else: raise ValueError("covariance_type must be one of " + ", , , ") return cv
Create all the covariance matrices from a given template.
22,723
def exists(self, workflow_id): try: db = self._client[self.database] col = db[WORKFLOW_DATA_COLLECTION_NAME] return col.find_one({"_id": ObjectId(workflow_id)}) is not None except ConnectionFailure: raise DataStoreNotConnected()
Checks whether a document with the specified workflow id already exists. Args: workflow_id (str): The workflow id that should be checked. Raises: DataStoreNotConnected: If the data store is not connected to the server. Returns: bool: ``True`` if a document with the specified workflow id exists.
22,724
def main(): import optparse parser = optparse.OptionParser() parser.add_option( "-w", "--width", dest="width", type="int", default=None, help=("Width of printed image in characters. Default: %default")) (options, args) = parser.parse_args(args=sys.argv[1:]) for imgpath in args: for line in Image(imgpath, options.width): printy(line)
Main function for :command:`fabulous-image`.
22,725
def getChild(self, name, request): request.prepath = [] request.postpath.insert(0, name) return self.wsgi_resource
Postpath needs to contain all segments of the url, if it is incomplete then that incomplete url will be passed on to the child resource (in this case our wsgi application).
22,726
def login_with_api_token(api_token): response = API.sync(api_token, , ) _fail_if_contains_errors(response) user_json = response.json()[] return User(user_json)
Login to Todoist using a user's api token. .. note:: It is up to you to obtain the api token. :param api_token: A Todoist user's api token. :type api_token: str :return: The Todoist user. :rtype: :class:`pytodoist.todoist.User` >>> from pytodoist import todoist >>> api_token = 'api_token' >>> user = todoist.login_with_api_token(api_token) >>> print(user.full_name) John Doe
22,727
def find_consensus(bases): nucs = [, , , , ] total = sum([bases[nuc] for nuc in nucs if nuc in bases]) try: top = max([bases[nuc] for nuc in nucs if nuc in bases]) except: bases[] = (, ) bases[] = bases[] = return bases top = [(nuc, bases[nuc]) for nuc in bases if bases[nuc] == top] if top[0][1] == 0: bases[] = (, 0) else: bases[] = random.choice(top) if total == 0: c_freq = ref_freq = else: c_freq = float(bases[][1]) / float(total) if bases[] not in bases: ref_freq = 0 else: ref_freq = float(bases[bases[]]) / float(total) bases[] = c_freq bases[] = ref_freq return bases
find consensus base based on nucleotide frequencies
22,728
def dump_json(d: dict) -> None: import json k = d.keys() v = d.values() k1 = [str(i) for i in k] return json.dumps(dict(zip(*[k1, v])), indent=4)
Dump json when using tuples for dictionary keys Have to convert tuples to strings to dump out as json
22,729
def _GetNormalizedTimestamp(self): if self._normalized_timestamp is None: if self._number_of_seconds is not None: self._normalized_timestamp = ( decimal.Decimal(self._microseconds) / definitions.MICROSECONDS_PER_SECOND) self._normalized_timestamp += decimal.Decimal(self._number_of_seconds) return self._normalized_timestamp
Retrieves the normalized timestamp. Returns: decimal.Decimal: normalized timestamp, which contains the number of seconds since January 1, 1970 00:00:00 and a fraction of second used for increased precision, or None if the normalized timestamp cannot be determined.
22,730
def create(self, instance, cidr_mask, description, **kwargs): url = self._url.format(instance=instance) request_data = { : cidr_mask, : description } request_data.update(kwargs) response = requests.post( url, data=json.dumps(request_data), **self._default_request_kwargs ) if response.status_code == 200: logger.info( .format(instance, request_data)) else: logger.info( .format(instance, request_data)) data = self._get_response_data(response) return self._concrete_acl(data)
Create an ACL entry for the specified instance. :param str instance: The name of the instance to associate the new ACL entry with. :param str cidr_mask: The IPv4 CIDR mask for the new ACL entry. :param str description: A short description for the new ACL entry. :param collector kwargs: (optional) Additional key=value pairs to be supplied to the creation payload. **Caution:** fields unrecognized by the API will cause this request to fail with a 400 from the API.
22,731
def encrypt_dir(self, path, output_path=None, overwrite=False, stream=True, enable_verbose=True): path, output_path = files.process_dst_overwrite_args( src=path, dst=output_path, overwrite=overwrite, src_to_dst_func=files.get_encrpyted_path, ) self._show("--- Encrypt directory ---" % path, enable_verbose=enable_verbose) st = time.clock() for current_dir, _, file_list in os.walk(path): new_dir = current_dir.replace(path, output_path) if not os.path.exists(new_dir): os.mkdir(new_dir) for basename in file_list: old_path = os.path.join(current_dir, basename) new_path = os.path.join(new_dir, basename) self.encrypt_file(old_path, new_path, overwrite=overwrite, stream=stream, enable_verbose=enable_verbose) self._show("Complete! Elapse %.6f seconds" % (time.clock() - st,), enable_verbose=enable_verbose) return output_path
Encrypt everything in a directory. :param path: path of the dir you need to encrypt :param output_path: encrypted dir output path :param overwrite: if True, then silently overwrite output file if exists :param stream: if it is a very big file, stream mode can avoid using too much memory :param enable_verbose: boolean, trigger on/off the help information
22,732
def html_dataset_type(is_binary, is_imbalanced): result = "<h2>Dataset Type : </h2>\n" balance_type = "Balanced" class_type = "Binary Classification" if is_imbalanced: balance_type = "Imbalanced" if not is_binary: class_type = "Multi-Class Classification" result += "<ul>\n\n<li>{0}</li>\n\n<li>{1}</li>\n</ul>\n".format( class_type, balance_type) result += "<p>{0}</p>\n".format(RECOMMEND_HTML_MESSAGE) result += "<p>{0}</p>\n".format(RECOMMEND_HTML_MESSAGE2) return result
Return HTML report file dataset type. :param is_binary: is_binary flag (binary : True , multi-class : False) :type is_binary: bool :param is_imbalanced: is_imbalanced flag (imbalance : True , balance : False) :type is_imbalanced: bool :return: dataset_type as str
22,733
def get_bip32_address(self, ecdh=False): index = struct.pack(, self.identity_dict.get(, 0)) addr = index + self.to_bytes() log.debug(, addr) digest = hashlib.sha256(addr).digest() s = io.BytesIO(bytearray(digest)) hardened = 0x80000000 addr_0 = 17 if bool(ecdh) else 13 address_n = [addr_0] + list(util.recv(s, )) return [(hardened | value) for value in address_n]
Compute BIP32 derivation address according to SLIP-0013/0017.
22,734
def revert_to_max(self): try: self._set_stochastics([self.mu[s] for s in self.stochastics]) except KeyError: self._set_stochastics(self.mu[self.stochastics])
N.revert_to_max() Sets all N's stochastics to their MAP values.
22,735
def get_cache(self, namespace, query_hash, length, start, end): query = \ cursor = self.cursor cursor.execute(query, (namespace, query_hash, length, start, end)) return tuple(cursor.fetchall())
Get a cached value for the specified date range and query
22,736
def get(self, container): name = utils.get_name(container) uri = "/%s" % name resp, resp_body = self.api.method_head(uri) hdrs = resp.headers data = {"total_bytes": int(hdrs.get("x-container-bytes-used", "0")), "object_count": int(hdrs.get("x-container-object-count", "0")), "name": name} return Container(self, data, loaded=False)
Returns a Container matching the specified container name. If no such container exists, a NoSuchContainer exception is raised.
22,737
def handle_receive_lock_expired( channel_state: NettingChannelState, state_change: ReceiveLockExpired, block_number: BlockNumber, ) -> TransitionResult[NettingChannelState]: is_valid, msg, merkletree = is_valid_lock_expired( state_change=state_change, channel_state=channel_state, sender_state=channel_state.partner_state, receiver_state=channel_state.our_state, block_number=block_number, ) events: List[Event] = list() if is_valid: assert merkletree, channel_state.partner_state.balance_proof = state_change.balance_proof channel_state.partner_state.merkletree = merkletree _del_unclaimed_lock(channel_state.partner_state, state_change.secrethash) send_processed = SendProcessed( recipient=state_change.balance_proof.sender, channel_identifier=CHANNEL_IDENTIFIER_GLOBAL_QUEUE, message_identifier=state_change.message_identifier, ) events = [send_processed] else: assert msg, invalid_lock_expired = EventInvalidReceivedLockExpired( secrethash=state_change.secrethash, reason=msg, ) events = [invalid_lock_expired] return TransitionResult(channel_state, events)
Remove expired locks from channel states.
22,738
def format_help(self, ctx, formatter): self.format_usage(ctx, formatter) self.format_help_text(ctx, formatter) self.format_options(ctx, formatter) self.format_epilog(ctx, formatter)
Writes the help into the formatter if it exists. This calls into the following methods: - :meth:`format_usage` - :meth:`format_help_text` - :meth:`format_options` - :meth:`format_epilog`
22,739
def timed(name=None, file=sys.stdout, callback=None, wall_clock=True): start = time.time() yield end = time.time() delta = end - start if callback is not None: callback(delta) elif isinstance(file, np.ndarray) and len(file) == 1: file[0] = delta else: name_str = .format(name) if name is not None else print(("[timed]{0}: {1} s".format(name_str, delta)), file=file)
Context manager to make it easy to time the execution of a piece of code. This timer will never run your code several times and is meant more for simple in-production timing, instead of benchmarking. Reports the wall-clock time (using `time.time`) and not the processor time. Parameters ---------- name : str Name of the timing block, to identify it. file : file handler Which file handler to print the results to. Default is standard output. If a numpy array and size 1 is given, the time in seconds will be stored inside it. Ignored if `callback` is set. callback : callable This offer even more flexibility than `file`. The callable will be called at the end of the execution with a single floating point argument with the elapsed time in seconds. Examples -------- >>> import deepdish as dd >>> import time The `timed` function is a context manager, so everything inside the ``with`` block will be timed. The results will be printed by default to standard output: >>> with dd.timed('Sleep'): # doctest: +SKIP ... time.sleep(1) [timed] Sleep: 1.001035451889038 s Using the `callback` parameter, we can accumulate multiple runs into a list: >>> times = [] >>> for i in range(3): # doctest: +SKIP ... with dd.timed(callback=times.append): ... time.sleep(1) >>> times # doctest: +SKIP [1.0035350322723389, 1.0035550594329834, 1.0039470195770264]
22,740
def json(self): if not self.headers and len(self.content) > 3: encoding = get_encoding_from_headers(self.headers) if encoding is not None: return json.loads(self.content.decode(encoding)) return json.loads(self.content)
Returns the json-encoded content of a response, if any.
22,741
def sum_table(records): size = len(records[0]) result = [None] * size firstrec = records[0] for i in range(size): if isinstance(firstrec[i], (numbers.Number, numpy.ndarray)): result[i] = sum(rec[i] for rec in records) else: result[i] = return result
Used to compute summaries. The records are assumed to have numeric fields, except the first field which is ignored, since it typically contains a label. Here is an example: >>> sum_table([('a', 1), ('b', 2)]) ['total', 3]
22,742
def bipole(src, rec, depth, res, freqtime, signal=None, aniso=None, epermH=None, epermV=None, mpermH=None, mpermV=None, msrc=False, srcpts=1, mrec=False, recpts=1, strength=0, xdirect=False, ht=, htarg=None, ft=, ftarg=None, opt=None, loop=None, verb=2): r for isz in range(nsrcz): srcazmdip = get_azm_dip(src, isz, nsrcz, srcpts, srcdipole, strength, , verb) tsrc, srcazm, srcdip, srcg_w, srcpts, src_w = srcazmdip for irz in range(nrecz): recazmdip = get_azm_dip(rec, irz, nrecz, recpts, recdipole, strength, , verb) trec, recazm, recdip, recg_w, recpts, rec_w = recazmdip out = fem(iab, *finp) tfact = get_geo_fact(iab, srcazm, srcdip, recazm, recdip, msrc, mrec) abEM += out[0]*np.squeeze(tfact) kcount += out[1] conv *= out[2] rEM += abEM*recg_w[irg] sEM += rEM*srcg_w[isg] src_rec_w = 1 if strength > 0: src_rec_w *= np.repeat(src_w, irec) src_rec_w *= np.tile(rec_w, isrc) sEM *= src_rec_w if nrec == nrecz: if nsrc == nsrcz: EM[:, isz*nrec+irz:isz*nrec+irz+1] = sEM else: EM[:, irz:nsrc*nrec:nrec] = sEM else: if nsrc == nsrcz: EM[:, isz*nrec:nrec*(isz+1)] = sEM else: EM = sEM conv_warning(conv, htarg, , verb) if signal is not None: EM, conv = tem(EM, EM[0, :], freq, time, signal, ft, ftarg) conv_warning(conv, ftarg, , verb) EM = np.squeeze(EM.reshape((-1, nrec, nsrc), order=)) printstartfinish(verb, t0, kcount) return EM
r"""Return the electromagnetic field due to an electromagnetic source. Calculate the electromagnetic frequency- or time-domain field due to arbitrary finite electric or magnetic bipole sources, measured by arbitrary finite electric or magnetic bipole receivers. By default, the electromagnetic response is normalized to to source and receiver of 1 m length, and source strength of 1 A. See Also -------- fem : Electromagnetic frequency-domain response. tem : Electromagnetic time-domain response. Parameters ---------- src, rec : list of floats or arrays Source and receiver coordinates (m): - [x0, x1, y0, y1, z0, z1] (bipole of finite length) - [x, y, z, azimuth, dip] (dipole, infinitesimal small) Dimensions: - The coordinates x, y, and z (dipole) or x0, x1, y0, y1, z0, and z1 (bipole) can be single values or arrays. - The variables x and y (dipole) or x0, x1, y0, and y1 (bipole) must have the same dimensions. - The variable z (dipole) or z0 and z1 (bipole) must either be single values or having the same dimension as the other coordinates. - The variables azimuth and dip must be single values. If they have different angles, you have to use the bipole-method (with srcpts/recpts = 1, so it is calculated as dipoles). Angles (coordinate system is left-handed, positive z down (East-North-Depth): - azimuth (°): horizontal deviation from x-axis, anti-clockwise. - dip (°): vertical deviation from xy-plane downwards. Sources or receivers placed on a layer interface are considered in the upper layer. depth : list Absolute layer interfaces z (m); #depth = #res - 1 (excluding +/- infinity). res : array_like Horizontal resistivities rho_h (Ohm.m); #res = #depth + 1. Alternatively, res can be a dictionary. See the main manual of empymod too see how to exploit this hook to re-calculate etaH, etaV, zetaH, and zetaV, which can be used to, for instance, use the Cole-Cole model for IP. freqtime : array_like Frequencies f (Hz) if ``signal`` == None, else times t (s); (f, t > 0). signal : {None, 0, 1, -1}, optional Source signal, default is None: - None: Frequency-domain response - -1 : Switch-off time-domain response - 0 : Impulse time-domain response - +1 : Switch-on time-domain response aniso : array_like, optional Anisotropies lambda = sqrt(rho_v/rho_h) (-); #aniso = #res. Defaults to ones. epermH, epermV : array_like, optional Relative horizontal/vertical electric permittivities epsilon_h/epsilon_v (-); #epermH = #epermV = #res. Default is ones. mpermH, mpermV : array_like, optional Relative horizontal/vertical magnetic permeabilities mu_h/mu_v (-); #mpermH = #mpermV = #res. Default is ones. msrc, mrec : boolean, optional If True, source/receiver (msrc/mrec) is magnetic, else electric. Default is False. srcpts, recpts : int, optional Number of integration points for bipole source/receiver, default is 1: - srcpts/recpts < 3 : bipole, but calculated as dipole at centre - srcpts/recpts >= 3 : bipole strength : float, optional Source strength (A): - If 0, output is normalized to source and receiver of 1 m length, and source strength of 1 A. - If != 0, output is returned for given source and receiver length, and source strength. Default is 0. xdirect : bool or None, optional Direct field calculation (only if src and rec are in the same layer): - If True, direct field is calculated analytically in the frequency domain. - If False, direct field is calculated in the wavenumber domain. - If None, direct field is excluded from the calculation, and only reflected fields are returned (secondary field). Defaults to False. ht : {'fht', 'qwe', 'quad'}, optional Flag to choose either the *Digital Linear Filter* method (FHT, *Fast Hankel Transform*), the *Quadrature-With-Extrapolation* (QWE), or a simple *Quadrature* (QUAD) for the Hankel transform. Defaults to 'fht'. htarg : dict or list, optional Depends on the value for ``ht``: - If ``ht`` = 'fht': [fhtfilt, pts_per_dec]: - fhtfilt: string of filter name in ``empymod.filters`` or the filter method itself. (default: ``empymod.filters.key_201_2009()``) - pts_per_dec: points per decade; (default: 0) - If 0: Standard DLF. - If < 0: Lagged Convolution DLF. - If > 0: Splined DLF - If ``ht`` = 'qwe': [rtol, atol, nquad, maxint, pts_per_dec, diff_quad, a, b, limit]: - rtol: relative tolerance (default: 1e-12) - atol: absolute tolerance (default: 1e-30) - nquad: order of Gaussian quadrature (default: 51) - maxint: maximum number of partial integral intervals (default: 40) - pts_per_dec: points per decade; (default: 0) - If 0, no interpolation is used. - If > 0, interpolation is used. - diff_quad: criteria when to swap to QUAD (only relevant if opt='spline') (default: 100) - a: lower limit for QUAD (default: first interval from QWE) - b: upper limit for QUAD (default: last interval from QWE) - limit: limit for quad (default: maxint) - If ``ht`` = 'quad': [atol, rtol, limit, lmin, lmax, pts_per_dec]: - rtol: relative tolerance (default: 1e-12) - atol: absolute tolerance (default: 1e-20) - limit: An upper bound on the number of subintervals used in the adaptive algorithm (default: 500) - lmin: Minimum wavenumber (default 1e-6) - lmax: Maximum wavenumber (default 0.1) - pts_per_dec: points per decade (default: 40) The values can be provided as dict with the keywords, or as list. However, if provided as list, you have to follow the order given above. A few examples, assuming ``ht`` = ``qwe``: - Only changing rtol: {'rtol': 1e-4} or [1e-4] or 1e-4 - Changing rtol and nquad: {'rtol': 1e-4, 'nquad': 101} or [1e-4, '', 101] - Only changing diff_quad: {'diffquad': 10} or ['', '', '', '', '', 10] ft : {'sin', 'cos', 'qwe', 'fftlog', 'fft'}, optional Only used if ``signal`` != None. Flag to choose either the Digital Linear Filter method (Sine- or Cosine-Filter), the Quadrature-With-Extrapolation (QWE), the FFTLog, or the FFT for the Fourier transform. Defaults to 'sin'. ftarg : dict or list, optional Only used if ``signal`` !=None. Depends on the value for ``ft``: - If ``ft`` = 'sin' or 'cos': [fftfilt, pts_per_dec]: - fftfilt: string of filter name in ``empymod.filters`` or the filter method itself. (Default: ``empymod.filters.key_201_CosSin_2012()``) - pts_per_dec: points per decade; (default: -1) - If 0: Standard DLF. - If < 0: Lagged Convolution DLF. - If > 0: Splined DLF - If ``ft`` = 'qwe': [rtol, atol, nquad, maxint, pts_per_dec]: - rtol: relative tolerance (default: 1e-8) - atol: absolute tolerance (default: 1e-20) - nquad: order of Gaussian quadrature (default: 21) - maxint: maximum number of partial integral intervals (default: 200) - pts_per_dec: points per decade (default: 20) - diff_quad: criteria when to swap to QUAD (default: 100) - a: lower limit for QUAD (default: first interval from QWE) - b: upper limit for QUAD (default: last interval from QWE) - limit: limit for quad (default: maxint) - If ``ft`` = 'fftlog': [pts_per_dec, add_dec, q]: - pts_per_dec: sampels per decade (default: 10) - add_dec: additional decades [left, right] (default: [-2, 1]) - q: exponent of power law bias (default: 0); -1 <= q <= 1 - If ``ft`` = 'fft': [dfreq, nfreq, ntot]: - dfreq: Linear step-size of frequencies (default: 0.002) - nfreq: Number of frequencies (default: 2048) - ntot: Total number for FFT; difference between nfreq and ntot is padded with zeroes. This number is ideally a power of 2, e.g. 2048 or 4096 (default: nfreq). - pts_per_dec : points per decade (default: None) Padding can sometimes improve the result, not always. The default samples from 0.002 Hz - 4.096 Hz. If pts_per_dec is set to an integer, calculated frequencies are logarithmically spaced with the given number per decade, and then interpolated to yield the required frequencies for the FFT. The values can be provided as dict with the keywords, or as list. However, if provided as list, you have to follow the order given above. See ``htarg`` for a few examples. opt : {None, 'parallel'}, optional Optimization flag. Defaults to None: - None: Normal case, no parallelization nor interpolation is used. - If 'parallel', the package ``numexpr`` is used to evaluate the most expensive statements. Always check if it actually improves performance for a specific problem. It can speed up the calculation for big arrays, but will most likely be slower for small arrays. It will use all available cores for these specific statements, which all contain ``Gamma`` in one way or another, which has dimensions (#frequencies, #offsets, #layers, #lambdas), therefore can grow pretty big. The module ``numexpr`` uses by default all available cores up to a maximum of 8. You can change this behaviour to your desired number of threads ``nthreads`` with ``numexpr.set_num_threads(nthreads)``. - The value 'spline' is deprecated and will be removed. See ``htarg`` instead for the interpolated versions. The option 'parallel' only affects speed and memory usage, whereas 'spline' also affects precision! Please read the note in the *README* documentation for more information. loop : {None, 'freq', 'off'}, optional Define if to calculate everything vectorized or if to loop over frequencies ('freq') or over offsets ('off'), default is None. It always loops over frequencies if ``ht = 'qwe'`` or if ``opt = 'spline'``. Calculating everything vectorized is fast for few offsets OR for few frequencies. However, if you calculate many frequencies for many offsets, it might be faster to loop over frequencies. Only comparing the different versions will yield the answer for your specific problem at hand! verb : {0, 1, 2, 3, 4}, optional Level of verbosity, default is 2: - 0: Print nothing. - 1: Print warnings. - 2: Print additional runtime and kernel calls - 3: Print additional start/stop, condensed parameter information. - 4: Print additional full parameter information Returns ------- EM : ndarray, (nfreq, nrec, nsrc) Frequency- or time-domain EM field (depending on ``signal``): - If rec is electric, returns E [V/m]. - If rec is magnetic, returns B [T] (not H [A/m]!). However, source and receiver are normalised (unless strength != 0). So for instance in the electric case the source strength is 1 A and its length is 1 m. So the electric field could also be written as [V/(A.m2)]. In the magnetic case the source strength is given by :math:`i\omega\mu_0 A I^e`, where A is the loop area (m2), and :math:`I^e` the electric source strength. For the normalized magnetic source :math:`A=1m^2` and :math:`I^e=1 Ampere`. A magnetic source is therefore frequency dependent. The shape of EM is (nfreq, nrec, nsrc). However, single dimensions are removed. Examples -------- >>> import numpy as np >>> from empymod import bipole >>> # x-directed bipole source: x0, x1, y0, y1, z0, z1 >>> src = [-50, 50, 0, 0, 100, 100] >>> # x-directed dipole source-array: x, y, z, azimuth, dip >>> rec = [np.arange(1, 11)*500, np.zeros(10), 200, 0, 0] >>> # layer boundaries >>> depth = [0, 300, 1000, 1050] >>> # layer resistivities >>> res = [1e20, .3, 1, 50, 1] >>> # Frequency >>> freq = 1 >>> # Calculate electric field due to an electric source at 1 Hz. >>> # [msrc = mrec = True (default)] >>> EMfield = bipole(src, rec, depth, res, freq, verb=4) :: empymod START :: ~ depth [m] : 0 300 1000 1050 res [Ohm.m] : 1E+20 0.3 1 50 1 aniso [-] : 1 1 1 1 1 epermH [-] : 1 1 1 1 1 epermV [-] : 1 1 1 1 1 mpermH [-] : 1 1 1 1 1 mpermV [-] : 1 1 1 1 1 frequency [Hz] : 1 Hankel : DLF (Fast Hankel Transform) > Filter : Key 201 (2009) > DLF type : Standard Kernel Opt. : None Loop over : None (all vectorized) Source(s) : 1 bipole(s) > intpts : 1 (as dipole) > length [m] : 100 > x_c [m] : 0 > y_c [m] : 0 > z_c [m] : 100 > azimuth [°] : 0 > dip [°] : 0 Receiver(s) : 10 dipole(s) > x [m] : 500 - 5000 : 10 [min-max; #] : 500 1000 1500 2000 2500 3000 3500 4000 4500 5000 > y [m] : 0 - 0 : 10 [min-max; #] : 0 0 0 0 0 0 0 0 0 0 > z [m] : 200 > azimuth [°] : 0 > dip [°] : 0 Required ab's : 11 ~ :: empymod END; runtime = 0:00:00.005536 :: 1 kernel call(s) ~ >>> print(EMfield) [ 1.68809346e-10 -3.08303130e-10j -8.77189179e-12 -3.76920235e-11j -3.46654704e-12 -4.87133683e-12j -3.60159726e-13 -1.12434417e-12j 1.87807271e-13 -6.21669759e-13j 1.97200208e-13 -4.38210489e-13j 1.44134842e-13 -3.17505260e-13j 9.92770406e-14 -2.33950871e-13j 6.75287598e-14 -1.74922886e-13j 4.62724887e-14 -1.32266600e-13j]
22,743
def get_manual_intervention(self, project, release_id, manual_intervention_id): route_values = {} if project is not None: route_values[] = self._serialize.url(, project, ) if release_id is not None: route_values[] = self._serialize.url(, release_id, ) if manual_intervention_id is not None: route_values[] = self._serialize.url(, manual_intervention_id, ) response = self._send(http_method=, location_id=, version=, route_values=route_values) return self._deserialize(, response)
GetManualIntervention. [Preview API] Get manual intervention for a given release and manual intervention id. :param str project: Project ID or project name :param int release_id: Id of the release. :param int manual_intervention_id: Id of the manual intervention. :rtype: :class:`<ManualIntervention> <azure.devops.v5_1.release.models.ManualIntervention>`
22,744
def hash32(data: Any, seed=0) -> int: with MultiTimerContext(timer, TIMING_HASH): c_data = to_str(data) if mmh3: return mmh3.hash(c_data, seed=seed) py_data = to_bytes(c_data) py_unsigned = murmur3_x86_32(py_data, seed=seed) return twos_comp_to_signed(py_unsigned, n_bits=32)
Non-cryptographic, deterministic, fast hash. Args: data: data to hash seed: seed Returns: signed 32-bit integer
22,745
def read(self): try: buf = os.read(self._fd, 8) except OSError as e: raise LEDError(e.errno, "Reading LED brightness: " + e.strerror) try: os.lseek(self._fd, 0, os.SEEK_SET) except OSError as e: raise LEDError(e.errno, "Rewinding LED brightness: " + e.strerror) return int(buf)
Read the brightness of the LED. Returns: int: Current brightness. Raises: LEDError: if an I/O or OS error occurs.
22,746
def make_thematic_png(self, outpath=None): from matplotlib.patches import Patch fig, previewax = plt.subplots() shape = self.thmap.shape previewax.imshow(self.thmap, origin=, interpolation=, cmap=self.config.solar_cmap, vmin=-1, vmax=len(self.config.solar_classes)-1) legend_elements = [Patch(facecolor=c, label=sc, edgecolor=) for sc, c in self.config.solar_colors.items()] previewax.legend(handles=legend_elements, fontsize=, bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.) previewax.set_xlim([0, shape[0]]) previewax.set_ylim([0, shape[0]]) previewax.set_aspect("equal") previewax.set_axis_off() if outpath: fig.savefig(outpath, dpi=300, transparent=True, bbox_inches=, pad_inches=0.) plt.close() else: plt.show()
Convert a thematic map into png format with a legend :param outpath: if specified, will save the image instead of showing it
22,747
def validate_all_keys(obj_name, obj, validation_fun): for key, value in obj.items(): validation_fun(obj_name, key) if isinstance(value, dict): validate_all_keys(obj_name, value, validation_fun)
Validate all (nested) keys in `obj` by using `validation_fun`. Args: obj_name (str): name for `obj` being validated. obj (dict): dictionary object. validation_fun (function): function used to validate the value of `key`. Returns: None: indicates validation successful Raises: ValidationError: `validation_fun` will raise this error on failure
22,748
def encode_int(self, n): str = [] while True: n, r = divmod(n, self.BASE) str.append(self.ALPHABET[r]) if n == 0: break return .join(reversed(str))
Encodes an integer into a short Base64 string. Example: ``encode_int(123)`` returns ``'B7'``.
22,749
def list_public_containers(self): resp, resp_body = self.api.cdn_request("", "GET") return [cont["name"] for cont in resp_body]
Returns a list of the names of all CDN-enabled containers.
22,750
def _set_scores(self): anom_scores = {} for i, (timestamp, value) in enumerate(self.time_series.items()): baseline_value = self.baseline_time_series[i] if baseline_value > 0: diff_percent = 100 * (value - baseline_value) / baseline_value elif value > 0: diff_percent = 100.0 else: diff_percent = 0.0 anom_scores[timestamp] = 0.0 if self.percent_threshold_upper and diff_percent > 0 and diff_percent > self.percent_threshold_upper: anom_scores[timestamp] = diff_percent if self.percent_threshold_lower and diff_percent < 0 and diff_percent < self.percent_threshold_lower: anom_scores[timestamp] = -1 * diff_percent self.anom_scores = TimeSeries(self._denoise_scores(anom_scores))
Compute anomaly scores for the time series This algorithm just takes the diff of threshold with current value as anomaly score
22,751
def create_box_field(self, box_key, name, field_type, **kwargs): self._raise_unimplemented_error() uri = .join([self.api_uri, self.boxes_suffix, box_key, self.fields_suffix ]) code, data = self._create_field(uri, name, field_type, **kwargs) return code, data
Creates a box field with the provided attributes. Args: box_key specifying the box to add the field to name required name string field_type required type string [TEXT_INPUT, DATE or PERSON] kwargs {} return (status code, field dict)
22,752
def run(self, args): project_name_or_id = self.create_project_name_or_id_from_args(args) folder = args.folder if not folder: folder = replace_invalid_path_chars(project_name_or_id.value.replace(, )) destination_path = format_destination_path(folder) path_filter = PathFilter(args.include_paths, args.exclude_paths) project = self.fetch_project(args, must_exist=True) project_download = ProjectDownload(self.remote_store, project, destination_path, path_filter) project_download.run()
Download a project based on passed in args. :param args: Namespace arguments parsed from the command line.
22,753
def generate_single_seasonal_average(args): qout_file = args[0] seasonal_average_file = args[1] day_of_year = args[2] mp_lock = args[3] min_day = day_of_year - 3 max_day = day_of_year + 3 with RAPIDDataset(qout_file) as qout_nc_file: time_indices = [] for idx, t in enumerate(qout_nc_file.get_time_array()): var_time = gmtime(t) compare_yday = var_time.tm_yday if isleap(var_time.tm_year) and compare_yday > 60: compare_yday -= 1 if max_day > compare_yday >= min_day: time_indices.append(idx) if not time_indices: raise IndexError("No time steps found within range ...") streamflow_array = qout_nc_file.get_qout(time_index_array=time_indices) avg_streamflow_array = np.mean(streamflow_array, axis=1) std_streamflow_array = np.std(streamflow_array, axis=1) max_streamflow_array = np.amax(streamflow_array, axis=1) min_streamflow_array = np.min(streamflow_array, axis=1) mp_lock.acquire() seasonal_avg_nc = Dataset(seasonal_average_file, ) seasonal_avg_nc.variables[][:, day_of_year-1] = \ avg_streamflow_array seasonal_avg_nc.variables[][:, day_of_year-1] = \ std_streamflow_array seasonal_avg_nc.variables[][:, day_of_year-1] = \ max_streamflow_array seasonal_avg_nc.variables[][:, day_of_year-1] = \ min_streamflow_array seasonal_avg_nc.close() mp_lock.release()
This function calculates the seasonal average for a single day of the year for all river segments
22,754
def search_records(self, domain, record_type, name=None, data=None): return domain.search_records(record_type=record_type, name=name, data=data)
Returns a list of all records configured for the specified domain that match the supplied search criteria.
22,755
def _parse_astorb_database_file( self, astorbgz): self.log.info() print "Parsing the astorb.dat orbital elements file" with gzip.open(astorbgz, ) as f: thisData = f.read() astorbDictList = [] lines = thisData.split("\n") for l in lines: if len(l) < 50: continue d = {} d["mpc_number"] = l[0:7].strip() d["name"] = l[7:26].strip() d["discoverer"] = l[26:41].strip() d["H_abs_mag"] = l[41:48].strip() d["G_slope"] = l[48:54].strip() d["color_b_v"] = l[54:59].strip() d["diameter_km"] = l[59:65].strip() d["class"] = l[65:71].strip() d["orbital_arc_days"] = l[95:101].strip() d["number_obs"] = l[101:106].strip() d["epoch"] = l[106:115].strip() d["M_mean_anomaly_deg"] = l[115:126].strip() d["o_arg_peri_deg"] = l[126:137].strip() d["O_long_asc_node_deg"] = l[137:148].strip() d["i_inclination_deg"] = l[148:158].strip() d["e_eccentricity"] = l[158:169].strip() d["a_semimajor_axis"] = l[169:182].strip() d["orbit_comp_date"] = l[182:191].strip() d["ephem_uncertainty_arcsec"] = l[191:199].strip() d["ephem_uncertainty_change_arcsec_day"] = l[199:208].strip() d["ephem_uncertainty_date"] = l[208:217].strip() yyyy = int(d["epoch"][:4]) mm = int(d["epoch"][4:6]) dd = int(d["epoch"][6:]) d["epoch_xeph"] = "%(mm)s/%(dd)s/%(yyyy)s" % locals() xephemStr = "%(mpc_number)s %(name)s,e,%(i_inclination_deg)s,%(O_long_asc_node_deg)s,%(o_arg_peri_deg)s,%(a_semimajor_axis)s,0,%(e_eccentricity)s,%(M_mean_anomaly_deg)s,%(epoch_xeph)s,2000.0,%(H_abs_mag)s,%(G_slope)s" % d xephemStr = xephemStr.strip() d["pyephem_string"] = xephemStr d["astorb_string"] = l if len(d["mpc_number"]) == 0: d["mpc_number"] = None for k, v in d.iteritems(): if v != None and len(v) == 0: d[k] = None astorbDictList.append(d) print "Finshed parsing the astorb.dat orbital elements file" self.log.info() return astorbDictList
* parse astorb database file* **Key Arguments:** - ``astorbgz`` -- path to the downloaded astorb database file **Return:** - ``astorbDictList`` -- the astorb database parsed as a list of dictionaries
22,756
def getFeatureID(self, location): truthyFeature = self.contains(location) if not truthyFeature: return self.EMPTY_FEATURE elif truthyFeature==: return self.FLAT elif truthyFeature==: return self.POINTY elif truthyFeature=="edge": return self.EDGE elif truthyFeature=="surface": return self.SURFACE else: return self.EMPTY_FEATURE
Returns the feature index associated with the provided location. In the case of a sphere, it is always the same if the location is valid.
22,757
def fuzzy_search_by_title(self, title, ignore_groups=None): entries = [] for entry in self.entries: if entry.title == title: entries.append(entry) if entries: return self._filter_entries(entries, ignore_groups) title_lower = title.lower() for entry in self.entries: if entry.title.lower() == title.lower(): entries.append(entry) if entries: return self._filter_entries(entries, ignore_groups) for entry in self.entries: if self._is_subsequence(title_lower, entry.title.lower()): entries.append(entry) if entries: return self._filter_entries(entries, ignore_groups) entry_map = {entry.title.lower(): entry for entry in self.entries} matches = difflib.get_close_matches( title.lower(), entry_map.keys(), cutoff=0.7) if matches: return self._filter_entries( [entry_map[name] for name in matches], ignore_groups) return []
Find an entry by by fuzzy match. This will check things such as: * case insensitive matching * typo checks * prefix matches If the ``ignore_groups`` argument is provided, then any matching entries in the ``ignore_groups`` list will not be returned. This argument can be used to filter out groups you are not interested in. Returns a list of matches (an empty list is returned if no matches are found).
22,758
def render_pdf_file_to_image_files_pdftoppm_ppm(pdf_file_name, root_output_file_path, res_x=150, res_y=150, extra_args=None): if extra_args is None: extra_args = [] if not pdftoppm_executable: init_and_test_pdftoppm_executable(prefer_local=False, exit_on_fail=True) if old_pdftoppm_version: command = [pdftoppm_executable] + extra_args + ["-r", res_x, pdf_file_name, root_output_file_path] else: command = [pdftoppm_executable] + extra_args + ["-rx", res_x, "-ry", res_y, pdf_file_name, root_output_file_path] comm_output = get_external_subprocess_output(command) return comm_output
Use the pdftoppm program to render a PDF file to .png images. The root_output_file_path is prepended to all the output files, which have numbers and extensions added. Extra arguments can be passed as a list in extra_args. Return the command output.
22,759
def predict(self, n_periods=10, exogenous=None, return_conf_int=False, alpha=0.05, **kwargs): check_is_fitted(self, "steps_") if isinstance(transformer, BaseExogFeaturizer): num_p = kw.get("n_periods", None) if num_p is not None and num_p != n_periods: raise ValueError("Manually set kwarg for " "step differs from forecasting " "n_periods (%r != %r)" % (name, num_p, n_periods)) kw["n_periods"] = n_periods _, Xt = transformer.transform(y=None, exogenous=Xt, **kw) nm, est = self.steps_[-1] return est.predict( n_periods=n_periods, exogenous=Xt, return_conf_int=return_conf_int, alpha=alpha, **named_kwargs[nm])
Forecast future (transformed) values Generate predictions (forecasts) ``n_periods`` in the future. Note that if ``exogenous`` variables were used in the model fit, they will be expected for the predict procedure and will fail otherwise. Forecasts may be transformed by the endogenous steps along the way and might be on a different scale than raw training/test data. Parameters ---------- n_periods : int, optional (default=10) The number of periods in the future to forecast. exogenous : array-like, shape=[n_obs, n_vars], optional (default=None) An optional 2-d array of exogenous variables. If provided, these variables are used as additional features in the regression operation. This should not include a constant or trend. Note that if an ``ARIMA`` is fit on exogenous features, it must be provided exogenous features for making predictions. return_conf_int : bool, optional (default=False) Whether to get the confidence intervals of the forecasts. alpha : float, optional (default=0.05) The confidence intervals for the forecasts are (1 - alpha) % **kwargs : keyword args Extra keyword arguments used for each stage's ``transform`` stage and the estimator's ``predict`` stage. Similar to scikit-learn pipeline keyword args, the keys are compound, comprised of the stage name and the argument name separated by a "__". For instance, if you have a FourierFeaturizer whose stage is named "fourier", your transform kwargs could resemble:: {"fourier__n_periods": 50} Returns ------- forecasts : array-like, shape=(n_periods,) The array of transformed, forecasted values. conf_int : array-like, shape=(n_periods, 2), optional The confidence intervals for the forecasts. Only returned if ``return_conf_int`` is True.
22,760
def mass_3d(self, R, Rs, rho0, r_trunc): x = R * Rs ** -1 func = (r_trunc ** 2 * (-2 * x * (1 + r_trunc ** 2) + 4 * (1 + x) * r_trunc * np.arctan(x / r_trunc) - 2 * (1 + x) * (-1 + r_trunc ** 2) * np.log(Rs) + 2 * (1 + x) * (-1 + r_trunc ** 2) * np.log(Rs * (1 + x)) + 2 * (1 + x) * (-1 + r_trunc ** 2) * np.log(Rs * r_trunc) - (1 + x) * (-1 + r_trunc ** 2) * np.log(Rs ** 2 * (x ** 2 + r_trunc ** 2)))) / (2. * (1 + x) * (1 + r_trunc ** 2) ** 2) m_3d = 4*np.pi*Rs ** 3 * rho0 * func return m_3d
mass enclosed a 3d sphere or radius r :param r: :param Ra: :param Rs: :return:
22,761
def to_schema(self): process_type = self.metadata.process_type if not process_type.endswith(): process_type = .format(process_type) schema = { : self.metadata.slug, : self.metadata.name, : process_type, : self.metadata.version, : , : { : { : { : , }, }, }, } if self.metadata.description is not None: schema[] = self.metadata.description if self.metadata.category is not None: schema[] = self.metadata.category if self.metadata.scheduling_class is not None: schema[] = self.metadata.scheduling_class if self.metadata.persistence is not None: schema[] = self.metadata.persistence if self.metadata.requirements is not None: schema[] = self.metadata.requirements if self.metadata.data_name is not None: schema[] = self.metadata.data_name if self.metadata.entity is not None: schema[] = self.metadata.entity if self.inputs: schema[] = [] for field in self.inputs.values(): schema[].append(field.to_schema()) if self.outputs: schema[] = [] for field in self.outputs.values(): schema[].append(field.to_schema()) schema[] = { : , : self.source or , } return schema
Return process schema for this process.
22,762
def create_calc_dh_d_shape(estimator): dh_d_shape = estimator.rows_to_alts.copy() calc_dh_d_shape = partial(_uneven_transform_deriv_shape, output_array=dh_d_shape) return calc_dh_d_shape
Return the function that can be used in the various gradient and hessian calculations to calculate the derivative of the transformation with respect to the shape parameters. Parameters ---------- estimator : an instance of the estimation.LogitTypeEstimator class. Should contain a `rows_to_alts` attribute that is a 2D scipy sparse matrix that maps the rows of the `design` matrix to the alternatives available in this dataset. Returns ------- Callable. Will accept a 1D array of systematic utility values, a 1D array of alternative IDs, (shape parameters if there are any) and miscellaneous args and kwargs. Should return a 2D array whose elements contain the derivative of the tranformed utility vector with respect to the vector of shape parameters. The dimensions of the returned vector should be `(design.shape[0], num_alternatives)`.
22,763
def wait_for(self, text, seconds): found = False stream = self.stream start_time = time.time() while not found: if time.time() - start_time > seconds: break stream.data_available.wait(0.5) stream.data_unoccupied.clear() while stream.data: line = stream.data.pop(0) value = line.getvalue() if text in value: found = True self.lines.append(value) stream.data_available.clear() stream.data_unoccupied.set() if time.time() - start_time > seconds: break return found
Returns True when the specified text has appeared in a line of the output, or False when the specified number of seconds have passed without that occurring.
22,764
def point_before_card(self, card, x, y): def ycmp(): if self.card_y_hint_step == 0: return False elif self.card_y_hint_step > 0: return y < card.y else: return y > card.top if self.card_x_hint_step > 0: if x < card.x: return True return ycmp() elif self.card_x_hint_step == 0: return ycmp() else: if x > card.right: return True return ycmp()
Return whether ``(x, y)`` is somewhere before ``card``, given how I know cards to be arranged. If the cards are being stacked down and to the right, that means I'm testing whether ``(x, y)`` is above or to the left of the card.
22,765
def find_largest_contig(self): for sample in self.metadata: sample[self.analysistype].longest_contig = sample[self.analysistype].contig_lengths
Determine the largest contig for each strain
22,766
def Stiel_Thodos(T, Tc, Pc, MW): r Pc = Pc/101325. Tr = T/Tc xi = Tc**(1/6.)/(MW**0.5*Pc**(2/3.)) if Tr > 1.5: mu_g = 17.78E-5*(4.58*Tr-1.67)**.625/xi else: mu_g = 34E-5*Tr**0.94/xi return mu_g/1000.
r'''Calculates the viscosity of a gas using an emperical formula developed in [1]_. .. math:: TODO Parameters ---------- T : float Temperature of the fluid [K] Tc : float Critical temperature of the fluid [K] Pc : float Critical pressure of the fluid [Pa] MW : float Molwcular weight of fluid [g/mol] Returns ------- mu_g : float Viscosity of gas, [Pa*S] Notes ----- Untested. Claimed applicability from 0.2 to 5 atm. Developed with data from 52 nonpolar, and 53 polar gases. internal units are poise and atm. Seems to give reasonable results. Examples -------- >>> Stiel_Thodos(300., 556.35, 4.5596E6, 153.8) #CCl4 1.0408926223608723e-05 References ---------- .. [1] Stiel, Leonard I., and George Thodos. "The Viscosity of Nonpolar Gases at Normal Pressures." AIChE Journal 7, no. 4 (1961): 611-15. doi:10.1002/aic.690070416.
22,767
def add(self, name, path=None, **kwargs): path = path or kwargs.pop(, None) if not self._path_is_valid(path): return if not self._is_unique(name, path): p = Project.select().where( (Project.name == name) | (Project.path == path) )[0] self._print(self._ERROR_PROJECT_EXISTS.format(name, p.path), ) return Project.create(name=name, path=path) self._print(self._SUCCESS_PROJECT_ADDED.format(name), )
add new project with given name and path to database if the path is not given, current working directory will be taken ...as default
22,768
def get_dashboard_panels_visibility_by_section(section_name): registry_info = get_dashboard_registry_record() if section_name not in registry_info: if len(pairs) == 0 or len(pairs) % 2 != 0: setup_dashboard_panels_visibility_registry(section_name) return get_dashboard_panels_visibility_by_section(section_name) result = [ (pairs[i], pairs[i + 1]) for i in range(len(pairs)) if i % 2 == 0] return result
Return a list of pairs as values that represents the role-permission view relation for the panel section passed in. :param section_name: the panels section id. :return: a list of tuples.
22,769
def _make_request(self, bbox, meta_info, timestamps): service_type = ServiceType(meta_info[]) if self.cm_size_x is None and self.cm_size_y is None: raise ValueError("Specify size_x and size_y for data request") if service_type == ServiceType.WCS: if self.cm_size_y is None: self.cm_size_y = self.cm_size_x elif self.cm_size_x is None: self.cm_size_x = self.cm_size_y custom_url_params = {CustomUrlParam.SHOWLOGO: False, CustomUrlParam.TRANSPARENT: False, CustomUrlParam.EVALSCRIPT: self.model_evalscript} request = {ServiceType.WMS: self._get_wms_request, ServiceType.WCS: self._get_wcs_request}[service_type](bbox, meta_info[], self.cm_size_x, self.cm_size_y, meta_info[], meta_info[], custom_url_params) request_dates = request.get_dates() download_frames = get_common_timestamps(request_dates, timestamps) request_return = request.get_data(raise_download_errors=False, data_filter=download_frames) bad_data = [idx for idx, value in enumerate(request_return) if value is None] for idx in reversed(sorted(bad_data)): LOGGER.warning(, str(request_dates[idx]), self.data_feature) del request_return[idx] del request_dates[idx] return np.asarray(request_return), request_dates
Make OGC request to create input for cloud detector classifier :param bbox: Bounding box :param meta_info: Meta-info dictionary of input eopatch :return: Requested data
22,770
def force_encoding(self, encoding): if not encoding: self.disabled = False else: self.write_with_encoding(encoding, None) self.disabled = True
Sets a fixed encoding. The change is emitted right away. From now one, this buffer will switch the code page anymore. However, it will still keep track of the current code page.
22,771
def calc_in_wc_v1(self): con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess for k in range(con.nmbzones): if con.zonetype[k] != ILAKE: flu.in_[k] = max(sta.wc[k]-con.whc[k]*sta.sp[k], 0.) sta.wc[k] -= flu.in_[k] else: flu.in_[k] = flu.tf[k] sta.wc[k] = 0.
Calculate the actual water release from the snow layer due to the exceedance of the snow layers capacity for (liquid) water. Required control parameters: |NmbZones| |ZoneType| |WHC| Required state sequence: |SP| Required flux sequence |TF| Calculated fluxes sequences: |In_| Updated state sequence: |WC| Basic equations: :math:`\\frac{dWC}{dt} = -In` \n :math:`-In = max(WC - WHC \\cdot SP, 0)` Examples: Initialize six zones of different types and frozen water contents of the snow layer and set the relative water holding capacity to 20% of the respective frozen water content: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(6) >>> zonetype(ILAKE, GLACIER, FIELD, FOREST, FIELD, FIELD) >>> whc(0.2) >>> states.sp = 0.0, 10.0, 10.0, 10.0, 5.0, 0.0 Also set the actual value of stand precipitation to 5 mm/d: >>> fluxes.tf = 5.0 When there is no (liquid) water content in the snow layer, no water can be released: >>> states.wc = 0.0 >>> model.calc_in_wc_v1() >>> fluxes.in_ in_(5.0, 0.0, 0.0, 0.0, 0.0, 0.0) >>> states.wc wc(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) When there is a (liquid) water content in the snow layer, the water release depends on the frozen water content. Note the special cases of the first zone being an internal lake, for which the snow routine does not apply, and of the last zone, which has no ice content and thus effectively not really a snow layer: >>> states.wc = 5.0 >>> model.calc_in_wc_v1() >>> fluxes.in_ in_(5.0, 3.0, 3.0, 3.0, 4.0, 5.0) >>> states.wc wc(0.0, 2.0, 2.0, 2.0, 1.0, 0.0) When the relative water holding capacity is assumed to be zero, all liquid water is released: >>> whc(0.0) >>> states.wc = 5.0 >>> model.calc_in_wc_v1() >>> fluxes.in_ in_(5.0, 5.0, 5.0, 5.0, 5.0, 5.0) >>> states.wc wc(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) Note that for the single lake zone, stand precipitation is directly passed to `in_` in all three examples.
22,772
def fit(self, X, y, model_filename=None): train_file = "temp.train" X = [x.replace("\n", " ") for x in X] y = [item[0] for item in y] y = [_.replace(" ", "-") for _ in y] lines = ["__label__{} , {}".format(j, i) for i, j in zip(X, y)] content = "\n".join(lines) write(train_file, content) if model_filename: self.estimator = fasttext.supervised(train_file, model_filename) else: self.estimator = fasttext.supervised(train_file) os.remove(train_file)
Fit FastText according to X, y Parameters: ---------- X : list of text each item is a text y: list each item is either a label (in multi class problem) or list of labels (in multi label problem)
22,773
def random(self, shape, tf_fn, kwargs): slice_shape = self.slice_shape(shape) op_seed = random.random() def my_fn(pnum): seed = hash("%s,%s" % (op_seed, self.slice_begin(shape, pnum))) return tf_fn(slice_shape, seed=seed, **kwargs) return self.slicewise(my_fn, self.laid_out_pnum())
Call a random tf operation (e.g. tf.random.uniform). Args: shape: a Shape tf_fn: a function such as tf.random.uniform kwargs: kwargs to pass to tf_fn, except for seed Returns: a LaidOutTensor
22,774
def dist_between(h,seg1,seg2): h.distance(0, seg1.x, sec=seg1.sec) return h.distance(seg2.x, sec=seg2.sec)
Calculates the distance between two segments. I stole this function from a post by Michael Hines on the NEURON forum (www.neuron.yale.edu/phpbb/viewtopic.php?f=2&t=2114)
22,775
def doc_dir(self): from os.path import abspath if not self.ref: return None u = parse_app_url(self.ref) return abspath(dirname(u.path))
The absolute directory of the document
22,776
def _wait_for_macaroon(wait_url): headers = { BAKERY_PROTOCOL_HEADER: str(bakery.LATEST_VERSION) } resp = requests.get(url=wait_url, headers=headers) if resp.status_code != 200: raise InteractionError(.format(wait_url)) return bakery.Macaroon.from_dict(resp.json().get())
Returns a macaroon from a legacy wait endpoint.
22,777
def tool_factory(clsname, name, driver, base=GromacsCommand): clsdict = { : name, : driver, : property(base._get_gmx_docs) } return type(clsname, (base,), clsdict)
Factory for GromacsCommand derived types.
22,778
async def get_power_parameters_for( cls, system_ids: typing.Sequence[str]): if len(system_ids) == 0: return {} data = await cls._handler.power_parameters(id=system_ids) return data
Get a list of power parameters for specified systems. *WARNING*: This method is considered 'alpha' and may be modified in future. :param system_ids: The system IDs to get power parameters for
22,779
def _to_url(self): url = self._target_url params = collections.defaultdict(list, copy.deepcopy(self._filters)) if self._order_by is not None: params[] = self._order_by for k, vl in self._extra.items(): params[k] += vl if params: url += "?" + urllib.parse.urlencode(params, doseq=True) return url
Serialises this query into a request-able URL including parameters
22,780
def is_table_existed(self, tablename): all_tablenames = self.list_tables() tablename = tablename.lower() if tablename in all_tablenames: return True else: return False
Check whether the given table name exists in this database. Return boolean.
22,781
def encode_plus(s): regex = r"\+" pat = re.compile(regex) return pat.sub("%2B", s)
Literally encodes the plus sign input is a string returns the string with plus signs encoded
22,782
def popitem(self): try: value = next(iter(self)) key = value[self._keycol] except StopIteration: raise KeyError del self[key] return key, value
D.popitem() -> (k, v) Remove and return some (key, value) pair as a 2-tuple; but raise KeyError if D is empty.
22,783
def canonicalize(cls, dataset, data, data_coords=None, virtual_coords=[]): if data_coords is None: data_coords = dataset.dimensions(, label=)[::-1] dims = [name for name in data_coords if isinstance(cls.coords(dataset, name), get_array_types())] dropped = [dims.index(d) for d in dims if d not in dataset.kdims+virtual_coords] if dropped: data = np.squeeze(data, axis=tuple(dropped)) if not any(cls.irregular(dataset, d) for d in dataset.kdims): inds = [dims.index(kd.name) for kd in dataset.kdims] inds = [i - sum([1 for d in dropped if i>=d]) for i in inds] if inds: data = data.transpose(inds[::-1]) invert = False slices = [] for d in dataset.kdims[::-1]: coords = cls.coords(dataset, d) if np.all(coords[1:] < coords[:-1]) and not coords.ndim > 1: slices.append(slice(None, None, -1)) invert = True else: slices.append(slice(None)) data = data[tuple(slices)] if invert else data if len(dataset.kdims) < 2: data = data.flatten() return data
Canonicalize takes an array of values as input and reorients and transposes it to match the canonical format expected by plotting functions. In certain cases the dimensions defined via the kdims of an Element may not match the dimensions of the underlying data. A set of data_coords may be passed in to define the dimensionality of the data, which can then be used to np.squeeze the data to remove any constant dimensions. If the data is also irregular, i.e. contains multi-dimensional coordinates, a set of virtual_coords can be supplied, required by some interfaces (e.g. xarray) to index irregular datasets with a virtual integer index. This ensures these coordinates are not simply dropped.
22,784
def get_buildroot(self, build_id): docker_info = self.tasker.get_info() host_arch, docker_version = get_docker_architecture(self.tasker) buildroot = { : 1, : { : docker_info[], : host_arch, }, : { : PROG, : atomic_reactor_version, }, : { : , : os.uname()[4], }, : [ { : tool[], : tool[], } for tool in get_version_of_tools()] + [ { : , : docker_version, }, ], : self.get_rpms(), : { : { : build_id, : self.get_builder_image_id(), } }, } return buildroot
Build the buildroot entry of the metadata. :return: dict, partial metadata
22,785
def info(self): status, n_datasets, n_file_attrs = _C.SDfileinfo(self._id) _checkErr(, status, "cannot execute") return n_datasets, n_file_attrs
Retrieve information about the SD interface. Args:: no argument Returns:: 2-element tuple holding: number of datasets inside the file number of file attributes C library equivalent : SDfileinfo
22,786
def create_token_mapping(docgraph_with_old_names, docgraph_with_new_names, verbose=False): def kwic_string(docgraph, keyword_index): tokens = [tok for (tokid, tok) in list(docgraph.get_tokens())] before, keyword, after = get_kwic(tokens, keyword_index) return "{0} (Index: {1}): {2} [[{3}]] {4}\n".format( docgraph.name, keyword_index, .join(before), keyword, .join(after)) old_token_gen = docgraph_with_old_names.get_tokens() new_token_gen = docgraph_with_new_names.get_tokens() old2new = {} for i, (new_tok_id, new_tok) in enumerate(new_token_gen): old_tok_id, old_tok = old_token_gen.next() if new_tok != old_tok: if verbose: raise ValueError(u"Tokenization mismatch:\n{0}{1}".format( kwic_string(docgraph_with_old_names, i), kwic_string(docgraph_with_new_names, i))) raise ValueError( u"Tokenization mismatch: {0} ({1}) vs. {2} ({3})\n" "\t{4} != {5}".format( docgraph_with_new_names.name, docgraph_with_new_names.ns, docgraph_with_old_names.name, docgraph_with_old_names.ns, new_tok, old_tok).encode()) else: old2new[old_tok_id] = new_tok_id return old2new
given two document graphs which annotate the same text and which use the same tokenization, creates a dictionary with a mapping from the token IDs used in the first graph to the token IDs used in the second graph. Parameters ---------- docgraph_with_old_names : DiscourseDocumentGraph a document graph with token IDs that will be replaced later on docgraph_with_new_names : DiscourseDocumentGraph a document graph with token IDs that will replace the token IDs used in ``docgraph_with_old_names`` later on Returns ------- old2new : dict maps from a token ID used in ``docgraph_with_old_names`` to the token ID used in ``docgraph_with_new_names`` to reference the same token
22,787
def get_issue_comments(self): return github.PaginatedList.PaginatedList( github.IssueComment.IssueComment, self._requester, self.issue_url + "/comments", None )
:calls: `GET /repos/:owner/:repo/issues/:number/comments <http://developer.github.com/v3/issues/comments>`_ :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.IssueComment.IssueComment`
22,788
def fn_from_str(name: str) -> Callable[..., Any]: try: module_name, fn_name = name.split() except ValueError: raise ConfigError( .format(name)) return getattr(importlib.import_module(module_name), fn_name)
Returns a function object with the name given in string.
22,789
def to_new(self, data, k=None, sigma=None, return_distances=False): n_samples = data.shape[0] n_reference_samples = self.n_samples if k is None: k = self.k elif k >= n_reference_samples: raise ValueError( "`k` (%d) cannot be larger than the number of reference " "samples (%d)." % (k, self.n_samples) ) if sigma is None: sigma = self.sigma neighbors, distances = self.knn_index.query(data, k) conditional_P = np.exp(-distances ** 2 / (2 * sigma ** 2)) conditional_P /= np.sum(conditional_P, axis=1)[:, np.newaxis] P = sp.csr_matrix( (conditional_P.ravel(), neighbors.ravel(), range(0, n_samples * k + 1, k)), shape=(n_samples, n_reference_samples), ) if return_distances: return P, neighbors, distances return P
Compute the affinities of new samples to the initial samples. This is necessary for embedding new data points into an existing embedding. Parameters ---------- data: np.ndarray The data points to be added to the existing embedding. k: int The number of nearest neighbors to consider for each kernel. sigma: float The bandwidth to use for the Gaussian kernels in the ambient space. return_distances: bool If needed, the function can return the indices of the nearest neighbors and their corresponding distances. Returns ------- P: array_like An :math:`N \\times M` affinity matrix expressing interactions between :math:`N` new data points the initial :math:`M` data samples. indices: np.ndarray Returned if ``return_distances=True``. The indices of the :math:`k` nearest neighbors in the existing embedding for every new data point. distances: np.ndarray Returned if ``return_distances=True``. The distances to the :math:`k` nearest neighbors in the existing embedding for every new data point.
22,790
def create_saml_provider(name, saml_metadata_document, region=None, key=None, keyid=None, profile=None): conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.create_saml_provider(saml_metadata_document, name) log.info(, name) return True except boto.exception.BotoServerError as e: aws = __utils__[](e) log.debug(aws) log.error(, name) return False
Create SAML provider CLI Example: .. code-block:: bash salt myminion boto_iam.create_saml_provider my_saml_provider_name saml_metadata_document
22,791
def bounding_box(img): r locations = numpy.argwhere(img) mins = locations.min(0) maxs = locations.max(0) + 1 return [slice(x, y) for x, y in zip(mins, maxs)]
r""" Return the bounding box incorporating all non-zero values in the image. Parameters ---------- img : array_like An array containing non-zero objects. Returns ------- bbox : a list of slicer objects defining the bounding box
22,792
def search_account_domains(self, domain=None, latitude=None, longitude=None, name=None): path = {} data = {} params = {} if name is not None: params["name"] = name if domain is not None: params["domain"] = domain if latitude is not None: params["latitude"] = latitude if longitude is not None: params["longitude"] = longitude self.logger.debug("GET /api/v1/accounts/search with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/accounts/search".format(**path), data=data, params=params, no_data=True)
Search account domains. Returns a list of up to 5 matching account domains Partial match on name / domain are supported
22,793
def ascend_bip32(bip32_pub_node, secret_exponent, child): i_as_bytes = struct.pack(">l", child) sec = public_pair_to_sec(bip32_pub_node.public_pair(), compressed=True) data = sec + i_as_bytes I64 = hmac.HMAC(key=bip32_pub_node._chain_code, msg=data, digestmod=hashlib.sha512).digest() I_left_as_exponent = from_bytes_32(I64[:32]) return (secret_exponent - I_left_as_exponent) % bip32_pub_node._generator.order()
Given a BIP32Node with public derivation child "child" with a known private key, return the secret exponent for the bip32_pub_node.
22,794
def _get_next_available_channel_id(self): for index in compatibility.RANGE(self._last_channel_id or 1, self.max_allowed_channels + 1): if index in self._channels: continue self._last_channel_id = index return index if self._last_channel_id: self._last_channel_id = None return self._get_next_available_channel_id() raise AMQPConnectionError( % self.max_allowed_channels)
Returns the next available available channel id. :raises AMQPConnectionError: Raises if there is no available channel. :rtype: int
22,795
def scrypt_mcf_check(mcf, password): if isinstance(password, unicode): password = password.encode() elif not isinstance(password, bytes): raise TypeError() if not isinstance(mcf, bytes): raise TypeError() if mcf_mod._scrypt_mcf_7_is_standard(mcf) and not _scrypt_ll: return _scrypt_str_chk(mcf, password, len(password)) == 0 return mcf_mod.scrypt_mcf_check(scrypt, mcf, password)
Returns True if the password matches the given MCF hash
22,796
def clock(rpc): while True: yield from rpc.notify(, str(datetime.datetime.now())) yield from asyncio.sleep(1)
This task runs forever and notifies all clients subscribed to 'clock' once a second.
22,797
def merge_dict(dict1, dict2): if not isinstance(dict1, dict) or not isinstance(dict2, dict): raise ValueError() result = copy.deepcopy(dict1) for k, v in dict2.items(): if k in result and isinstance(result[k], dict): result[k] = merge_dict(result[k], v) else: result[k] = copy.deepcopy(v) return result
Recursively merge dictionaries: dict2 on to dict1. This differs from dict.update() in that values that are dicts are recursively merged. Note that only dict value types are merged, not lists, etc. :param dict dict1: dictionary to merge to :param dict dict2: dictionary to merge with :rtype: dict :return: merged dictionary
22,798
def prime_field_inv(a: int, n: int) -> int: if a == 0: return 0 lm, hm = 1, 0 low, high = a % n, n while low > 1: r = high // low nm, new = hm - lm * r, high - low * r lm, low, hm, high = nm, new, lm, low return lm % n
Extended euclidean algorithm to find modular inverses for integers
22,799
def init_logger(v_num: int): logging.addLevelName(LogLevel.INFO_LV1.value, ) logging.addLevelName(LogLevel.INFO_LV2.value, ) logging.addLevelName(LogLevel.INFO_LV3.value, ) logging.config.dictConfig(create_logger_config({ 0: LogLevel.INFO_LV1, 1: LogLevel.INFO_LV2, 2: LogLevel.INFO_LV3, 3: LogLevel.DEBUG, }[v_num]))
Call when initialize Jumeaux !! :return: