Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
14,200
def get_traffic(self, subreddit): url = self.config[].format( subreddit=six.text_type(subreddit)) return self.request_json(url)
Return the json dictionary containing traffic stats for a subreddit. :param subreddit: The subreddit whose /about/traffic page we will collect.
14,201
def ResolveForRead(self, partition_key): intersecting_ranges = self._GetIntersectingRanges(partition_key) collection_links = list() for keyrange in intersecting_ranges: collection_links.append(self.partition_map.get(keyrange)) return collection_links
Resolves the collection for reading/querying the documents based on the partition key. :param dict document: The document to be read/queried. :return: Collection Self link(s) or Name based link(s) which should handle the Read operation. :rtype: list
14,202
def _render(roster_file, **kwargs): renderers = salt.loader.render(__opts__, {}) domain = __opts__.get(, ) try: result = salt.template.compile_template(roster_file, renderers, __opts__[], __opts__[], __opts__[], mask_value=, **kwargs) result.setdefault(, .format(os.path.basename(roster_file), domain)) return result except: log.warning(, roster_file, exc_info=True) return {}
Render the roster file
14,203
def package_releases(self, project_name): try: return self._connection.package_releases(project_name) except Exception as err: raise PyPIClientError(err)
Retrieve the versions from PyPI by ``project_name``. Args: project_name (str): The name of the project we wish to retrieve the versions of. Returns: list: Of string versions.
14,204
def _get_embed(self, embed, vocab_size, embed_size, initializer, dropout, prefix): if embed is None: assert embed_size is not None, \ with self.name_scope(): embed = nn.HybridSequential(prefix=prefix) with embed.name_scope(): embed.add(nn.Embedding(input_dim=vocab_size, output_dim=embed_size, weight_initializer=initializer)) if dropout: embed.add(nn.Dropout(rate=dropout)) assert isinstance(embed, Block) return embed
Construct an embedding block.
14,205
def _send(self, message): message[] = payload = json.dumps(message).encode() if len(payload) > LAMBDA_ASYNC_PAYLOAD_LIMIT: raise AsyncException("Payload too large for async Lambda call") self.response = self.client.invoke( FunctionName=self.lambda_function_name, InvocationType=, Payload=payload ) self.sent = (self.response.get(, 0) == 202)
Given a message, directly invoke the lamdba function for this task.
14,206
def get_hotp( secret, intervals_no, as_string=False, casefold=True, digest_method=hashlib.sha1, token_length=6, ): if isinstance(secret, six.string_types): secret = secret.encode() secret = secret.replace(b, b) try: key = base64.b32decode(secret, casefold=casefold) except (TypeError): raise TypeError() msg = struct.pack(, intervals_no) hmac_digest = hmac.new(key, msg, digest_method).digest() ob = hmac_digest[19] if six.PY3 else ord(hmac_digest[19]) o = ob & 15 token_base = struct.unpack(, hmac_digest[o:o + 4])[0] & 0x7fffffff token = token_base % (10 ** token_length) if as_string: return six.b(.format(token_length).format(token)) else: return token
Get HMAC-based one-time password on the basis of given secret and interval number. :param secret: the base32-encoded string acting as secret key :type secret: str or unicode :param intervals_no: interval number used for getting different tokens, it is incremented with each use :type intervals_no: int :param as_string: True if result should be padded string, False otherwise :type as_string: bool :param casefold: True (default), if should accept also lowercase alphabet :type casefold: bool :param digest_method: method of generating digest (hashlib.sha1 by default) :type digest_method: callable :param token_length: length of the token (6 by default) :type token_length: int :return: generated HOTP token :rtype: int or str >>> get_hotp(b'MFRGGZDFMZTWQ2LK', intervals_no=1) 765705 >>> get_hotp(b'MFRGGZDFMZTWQ2LK', intervals_no=2) 816065 >>> result = get_hotp(b'MFRGGZDFMZTWQ2LK', intervals_no=2, as_string=True) >>> result == b'816065' True
14,207
def threshold_monitor_hidden_threshold_monitor_Memory_actions(self, **kwargs): config = ET.Element("config") threshold_monitor_hidden = ET.SubElement(config, "threshold-monitor-hidden", xmlns="urn:brocade.com:mgmt:brocade-threshold-monitor") threshold_monitor = ET.SubElement(threshold_monitor_hidden, "threshold-monitor") Memory = ET.SubElement(threshold_monitor, "Memory") actions = ET.SubElement(Memory, "actions") actions.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
14,208
def setLinkState(self, tlsID, tlsLinkIndex, state): fullState = list(self.getRedYellowGreenState(tlsID)) if tlsLinkIndex >= len(fullState): raise TraCIException(None, None, "Invalid tlsLinkIndex %s for tls with maximum index %s." % ( tlsLinkIndex, tlsID, len(fullState) - 1)) else: fullState[tlsLinkIndex] = state self.setRedYellowGreenState(tlsID, .join(fullState))
setLinkState(string, string, int, string) -> None Sets the state for the given tls and link index. The state must be one of rRgGyYoOu for red, red-yellow, green, yellow, off, where lower case letters mean that the stream has to decelerate. The link index is shown the gui when setting the appropriate junctino visualization optin.
14,209
def getClientIP(request): forwardedfor = request.META.get() if forwardedfor: ip = forwardedfor.split()[0] else: ip = request.META.get() return ip
Returns the best IP address found from the request
14,210
def list(self, path, timeout=None): transport = DentFilesyncTransport(self.stream) transport.write_data(, path, timeout) return (DeviceFileStat(dent_msg.name, dent_msg.mode, dent_msg.size, dent_msg.time) for dent_msg in transport.read_until_done(, timeout))
List directory contents on the device. Args: path: List the contents of this directory. timeout: Timeout to use for this operation. Returns: Generator yielding DeviceFileStat tuples representing the contents of the requested path.
14,211
def register_remove_user_command(self, remove_user_func): description = "Removes user permission to access a remote project." remove_user_parser = self.subparsers.add_parser(, description=description) add_project_name_or_id_arg(remove_user_parser, help_text_suffix="remove a user from") user_or_email = remove_user_parser.add_mutually_exclusive_group(required=True) add_user_arg(user_or_email) add_email_arg(user_or_email) remove_user_parser.set_defaults(func=remove_user_func)
Add the remove-user command to the parser and call remove_user_func(project_name, user_full_name) when chosen. :param remove_user_func: func Called when this option is chosen: remove_user_func(project_name, user_full_name).
14,212
def _translate(self, from_str, to_str): return ops.Translate(self, from_str, to_str).to_expr()
Returns string with set of 'from' characters replaced by set of 'to' characters. from_str[x] is replaced by to_str[x]. To avoid unexpected behavior, from_str should be shorter than to_string. Parameters ---------- from_str : string to_str : string Examples -------- >>> import ibis >>> table = ibis.table([('string_col', 'string')]) >>> expr = table.string_col.translate('a', 'b') >>> expr = table.string_col.translate('a', 'bc') Returns ------- translated : string
14,213
def files_view(request): hosts = Host.objects.visible_to_user(request.user) context = {"hosts": hosts} return render(request, "files/home.html", context)
The main filecenter view.
14,214
def ball_count(cls, ball_tally, strike_tally, pitch_res): b, s = ball_tally, strike_tally if pitch_res == "B": if ball_tally < 4: b += 1 elif pitch_res == "S" or pitch_res == "C" or pitch_res == "X": if strike_tally < 3: s += 1 elif pitch_res == "F": if strike_tally < 2: s += 1 return b, s
Ball/Strike counter :param ball_tally: Ball telly :param strike_tally: Strike telly :param pitch_res: pitching result(Retrosheet format) :return: ball count, strike count
14,215
def delete_user_from_group(self, GroupID, UserID): log.info( % (UserID, GroupID)) self.put( % (GroupID, UserID))
Delete a user from a group.
14,216
def scaled_dot_product_attention_simple(q, k, v, bias, name=None): with tf.variable_scope( name, default_name="scaled_dot_product_attention_simple"): scalar = tf.rsqrt(tf.to_float(common_layers.shape_list(q)[2])) logits = tf.matmul(q * scalar, k, transpose_b=True) if bias is not None: logits += bias weights = tf.nn.softmax(logits, name="attention_weights") if common_layers.should_generate_summaries(): tf.summary.image( "attention", tf.expand_dims(tf.pow(weights, 0.2), 3), max_outputs=1) return tf.matmul(weights, v)
Scaled dot-product attention. One head. One spatial dimension. Args: q: a Tensor with shape [batch, length_q, depth_k] k: a Tensor with shape [batch, length_kv, depth_k] v: a Tensor with shape [batch, length_kv, depth_v] bias: optional Tensor broadcastable to [batch, length_q, length_kv] name: an optional string Returns: A Tensor.
14,217
def configured_logger(self, name=None): log_handlers = self.log_handlers if not name: basename = name = self.name if name and name != basename: name = % (basename, name) else: name = basename namespaces = {} for log_level in self.log_level or (): bits = log_level.split() namespaces[.join(bits[:-1]) or ] = bits[-1] for namespace in sorted(namespaces): if self.daemon: handlers = [] for hnd in log_handlers: if hnd != : handlers.append(hnd) if not handlers: handlers.append() log_handlers = handlers configured_logger(namespace, config=self.log_config, level=namespaces[namespace], handlers=log_handlers) return logging.getLogger(name)
Configured logger.
14,218
def check( state_engine, nameop, block_id, checked_ops ): from ..nameset import BlockstackDB name = str(nameop[]) sender = str(nameop[]) sender_pubkey = None recipient = str(nameop[]) recipient_address = str(nameop[]) preorder_hash = hash_name( nameop[], sender, recipient_address ) log.debug("preorder_hash = %s (%s, %s, %s)" % (preorder_hash, nameop[], sender, recipient_address)) preorder_block_number = block_id name_block_number = block_id name_first_registered = block_id name_last_renewed = block_id if not nameop.has_key(): log.warning("Name import requires a sender_pubkey (i.e. use of a p2pkh transaction)") return False if not is_name_valid( name ): log.warning("Malformed name " % name) return False name_without_namespace = get_name_from_fq_name( name ) namespace_id = get_namespace_from_name( name ) if not state_engine.is_namespace_revealed( namespace_id ): log.warning("Namespace is not revealed" % namespace_id ) return False namespace = state_engine.get_namespace_reveal( namespace_id ) if sender_address != namespace[]: log.warning("First NAME_IMPORT must come from the namespace revealers public key log.warning("Generating %s-key keychain for " % (NAME_IMPORT_KEYRING_SIZE, namespace_id)) import_addresses = BlockstackDB.build_import_keychain( state_engine.working_dir, namespace[], sender_pubkey_hex ) epoch_features = get_epoch_features(block_id) if EPOCH_FEATURE_STACKS_BUY_NAMESPACES not in epoch_features or EPOCH_FEATURE_NAMEOPS_COST_TOKENS not in epoch_features: log.fatal(re in the wrong epoch!senderaddressimporterimporter_addressop_feetoken_fee{}namespace_block_numberblock_numberconsensus_hashpreorder_hashblock_numberfirst_registeredlast_renewedpreorder_block_numberopcodelast_creation_op'] = NAME_IMPORT return True
Given a NAME_IMPORT nameop, see if we can import it. * the name must be well-formed * the namespace must be revealed, but not ready * the name cannot have been imported yet * the sender must be the same as the namespace's sender Set the __preorder__ and __prior_history__ fields, since this is a state-creating operation. Return True if accepted Return False if not
14,219
def clear(self): self.database.delete(self.key) self.database.delete(self.event)
Clear the lock, allowing it to be acquired. Do not use this method except to recover from a deadlock. Otherwise you should use :py:meth:`Lock.release`.
14,220
def create_routertype(self, context, routertype): LOG.debug("create_routertype() called. Contents %s", routertype) rt = routertype[] with context.session.begin(subtransactions=True): routertype_db = l3_models.RouterType( id=self._get_id(rt), tenant_id=rt[], name=rt[], description=rt[], template_id=rt[], ha_enabled_by_default=rt[], shared=rt[], slot_need=rt[], scheduler=rt[], driver=rt[], cfg_agent_service_helper=rt[], cfg_agent_driver=rt[]) context.session.add(routertype_db) return self._make_routertype_dict(routertype_db)
Creates a router type. Also binds it to the specified hosting device template.
14,221
def list_addresses(self, tag_values=None): title = % self.__class__.__name__ input_fields = { : tag_values } for key, value in input_fields.items(): if value: object_title = % (title, key, str(value)) self.fields.validate(value, % key, object_title) kw_args = {} tag_text = if tag_values: kw_args = { : [ { : , : tag_values } ] } from labpack.parsing.grammar import join_words plural_value = if len(tag_values) > 1: plural_value = tag_text = % (plural_value, join_words(tag_values)) self.iam.printer( % (self.iam.region_name, tag_text)) address_list = [] try: response = self.connection.describe_addresses(**kw_args) except: raise AWSConnectionError(title) response_list = response[] for address in response_list: address_list.append(address[]) return address_list
a method to list elastic ip addresses associated with account on AWS :param tag_values: [optional] list of tag values :return: list of strings with ip addresses
14,222
def horizon_main_nav(context): if not in context: return {} current_dashboard = context[].horizon.get(, None) dashboards = [] for dash in Horizon.get_dashboards(): if dash.can_access(context): if callable(dash.nav) and dash.nav(context): dashboards.append(dash) elif dash.nav: dashboards.append(dash) return {: dashboards, : context[].user, : current_dashboard, : context[]}
Generates top-level dashboard navigation entries.
14,223
def parse_pr_numbers(git_log_lines): prs = [] for line in git_log_lines: pr_number = parse_pr_number(line) if pr_number: prs.append(pr_number) return prs
Parse PR numbers from commit messages. At GitHub those have the format: `here is the message (#1234)` being `1234` the PR number.
14,224
def plot_lognormal_cdf(self,**kwargs): if not hasattr(self,): return x=np.sort(self.data) n=len(x) xcdf = np.arange(n,0,-1,dtype=)/float(n) lcdf = self.lognormal_dist.sf(x) D_location = argmax(xcdf-lcdf) pylab.vlines(x[D_location],xcdf[D_location],lcdf[D_location],color=,linewidth=2) pylab.plot(x, lcdf,,**kwargs)
Plot the fitted lognormal distribution
14,225
def fit(self, sequences, y=None): super(PCCA, self).fit(sequences, y=y) self._do_lumping() return self
Fit a PCCA lumping model using a sequence of cluster assignments. Parameters ---------- sequences : list(np.ndarray(dtype='int')) List of arrays of cluster assignments y : None Unused, present for sklearn compatibility only. Returns ------- self
14,226
def info(device): * out = __salt__[]("btrfs filesystem show {0}".format(device)) salt.utils.fsutils._verify_run(out) return _parse_btrfs_info(out[])
Get BTRFS filesystem information. CLI Example: .. code-block:: bash salt '*' btrfs.info /dev/sda1
14,227
def _harvest_validate(self, userkwargs): parser = {} userkwargs.update(self.network_kwargs) original_kwargs = set(map(lambda k: k.split()[1] if k.find()>-1 else k, userkwargs.keys())) requires = [] for key in userkwargs.keys(): if key.find() > 0: agg, base = tuple(key.split()) if base in userkwargs: if type(userkwargs[base]) is not list: userkwargs[base] = [(None, userkwargs[base])] userkwargs[base].append( (agg, userkwargs.pop(key)) ) else: userkwargs[base] = [(agg, userkwargs.pop(key))] for key, seed in self.arguments.iteritems(): if seed.get() and key in userkwargs: value = userkwargs.pop(key) if key in userkwargs else NotImplemented oldkey = key+"" key = seed.get() seed = get(self.arguments, seed.get()) if value is not NotImplemented: if key in userkwargs: raise valideer.ValidationError("Argument alias already specified for `%s` via `%s`" % (oldkey, key), oldkey) userkwargs[key] = value if key.endswith(): multi = True key = key[:-2] else: multi = False if key in userkwargs: value = userkwargs.pop(key) elif seed.get(): value = userkwargs.get(seed.get()) else: value = seed.get() if value is None or value == []: if seed.get(): raise valideer.ValidationError("missing required property: %s" % key, key) else: continue requires.extend(array(get(seed, , []))) if type(value) is list and type(value[0]) is tuple: for v in value: ud, pd = self._harvest_args(key, seed, v, multi) userkwargs.update(ud) parser.update(pd) else: ud, pd = self._harvest_args(key, seed, value, multi) userkwargs.update(ud) parser.update(pd) for seed in self.seeds: ignores = set(array(get(seed, ))) if ignores: if ignores & original_kwargs: if not get(seed, ): additionals = ignores & original_kwargs raise valideer.ValidationError("additional properties: %s" % ",".join(additionals), additionals) [userkwargs.pop(key) for key in ignores if key in userkwargs] operators = {} for key, value in userkwargs.items(): rk = key agg = None if key.find()>-1: agg, rk = tuple(key.split()) seed = self.arguments.get(rk, self.arguments.get(rk+)) if seed: if type(value) is list: operators[key] = [] new_values = [] for v in value: operator, v = self._operator(v, *seed.get(, "").rsplit("::", 1)) new_values.append(v) operators[key].append((agg, operator) if agg else operator) userkwargs[key] = new_values else: operator, value = self._operator(value, *seed.get(, "").rsplit("::", 1)) operators[key] = (agg, operator) if agg else operator userkwargs[key] = value if in userkwargs: seed = self.arguments.get(userkwargs[].lower(), self.arguments.get(userkwargs[].lower()+)) if seed: seed[] = str(userkwargs[].lower()) for r in set(requires): if userkwargs.get(r) is None: raise valideer.ValidationError("required property not set: %s" % r, r) parser = valideer.parse(parser, additional_properties=False) validated = parser.validate(userkwargs, adapt=self.navigator.adapter()) validated.update(self.network_kwargs) return operators, validated
Validate and Plant user provided arguments - Go through and plants the seedlings for any user arguments provided. - Validate the arguments, cleaning and adapting (valideer wise) - Extract negatives "!" arguments
14,228
def lru_cache(fn): @wraps(fn) def memoized_fn(*args): pargs = pickle.dumps(args) if pargs not in memoized_fn.cache: memoized_fn.cache[pargs] = fn(*args) return memoized_fn.cache[pargs] for attr, value in iter(fn.__dict__.items()): setattr(memoized_fn, attr, value) memoized_fn.cache = {} return memoized_fn
Memoization wrapper that can handle function attributes, mutable arguments, and can be applied either as a decorator or at runtime. :param fn: Function :type fn: function :returns: Memoized function :rtype: function
14,229
def stop_reactor_on_state_machine_finish(state_machine): wait_for_state_machine_finished(state_machine) from twisted.internet import reactor if reactor.running: plugins.run_hook("pre_destruction") reactor.callFromThread(reactor.stop)
Wait for a state machine to be finished and stops the reactor :param state_machine: the state machine to synchronize with
14,230
def format_help_text(self, ctx, formatter): if self.help: formatter.write_paragraph() with formatter.indentation(): formatter.write_text(self.help)
Writes the help text to the formatter if it exists.
14,231
def worker(self): if self._worker is None: self._worker = XOrbLookupWorker(self.isThreadEnabled()) self.loadRequested.connect(self._worker.loadRecords) self.loadBatchRequested.connect(self._worker.loadBatch) self.loadColumnsRequested.connect(self._worker.loadColumns) self._worker.loadingStarted.connect(self.markLoadingStarted) self._worker.loadingFinished.connect(self.markLoadingFinished) self._worker.loadedRecords[object].connect(self._loadRecords) self._worker.loadedRecords[object, object].connect(self._loadRecords) self._worker.loadedGroup.connect(self.createGroupItem) self._worker.columnLoaded.connect(self._loadColumns) self._worker.connectionLost.connect(self._connectionLost) return self._worker
Returns the worker associated with this tree widget. :return <projexui.xorblookupworker.XOrbLookupWorker>
14,232
def build(self, get_grad_fn, get_opt_fn): with override_to_local_variable(): get_global_step_var() get_opt_fn = memoized(get_opt_fn) get_opt_fn() grad_list = DataParallelBuilder.build_on_towers( self.towers, get_grad_fn, devices=self.raw_devices, use_vs=[True] * len(self.towers)) DataParallelBuilder._check_grad_list(grad_list) avg_grads = aggregate_grads( grad_list, colocation=False, devices=self.raw_devices) with tf.device(self.param_server_device): ps_var_grads = DistributedReplicatedBuilder._apply_shadow_vars(avg_grads) var_update_ops = self._apply_gradients_and_copy( get_opt_fn(), grad_list, ps_var_grads) self._shadow_vars = [v for (__, v) in ps_var_grads] self._shadow_model_vars = DistributedReplicatedBuilder._shadow_model_variables(self._shadow_vars) main_fetch = tf.group(*var_update_ops, name=) train_op = self._add_sync_queues_and_barrier( , [main_fetch]) with tf.name_scope(): initial_sync_op = self._get_initial_sync_op() if len(self._shadow_model_vars) and self.is_chief: with tf.name_scope(): model_sync_op = self._get_sync_model_vars_op() else: model_sync_op = None return train_op, initial_sync_op, model_sync_op
Args: get_grad_fn (-> [(grad, var)]): get_opt_fn (-> tf.train.Optimizer): callable which returns an optimizer Returns: (tf.Operation, tf.Operation, tf.Operation): 1. the training op. 2. the op which sync all the local variables from PS. This op should be run before training. 3. the op which sync all the local `MODEL_VARIABLES` from PS. You can choose how often to run it by yourself.
14,233
def _evaluate(self,R,z,phi=0.,t=0.): r2= R**2.+z**2. rb= nu.sqrt(r2+self.b2) return -1./(self.b+rb)
NAME: _evaluate PURPOSE: evaluate the potential at R,z INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: Phi(R,z) HISTORY: 2013-09-08 - Written - Bovy (IAS)
14,234
def sample(model, n, method="optgp", thinning=100, processes=1, seed=None): if method == "optgp": sampler = OptGPSampler(model, processes, thinning=thinning, seed=seed) elif method == "achr": sampler = ACHRSampler(model, thinning=thinning, seed=seed) else: raise ValueError("method must be or !") return pandas.DataFrame(columns=[rxn.id for rxn in model.reactions], data=sampler.sample(n))
Sample valid flux distributions from a cobra model. The function samples valid flux distributions from a cobra model. Currently we support two methods: 1. 'optgp' (default) which uses the OptGPSampler that supports parallel sampling [1]_. Requires large numbers of samples to be performant (n < 1000). For smaller samples 'achr' might be better suited. or 2. 'achr' which uses artificial centering hit-and-run. This is a single process method with good convergence [2]_. Parameters ---------- model : cobra.Model The model from which to sample flux distributions. n : int The number of samples to obtain. When using 'optgp' this must be a multiple of `processes`, otherwise a larger number of samples will be returned. method : str, optional The sampling algorithm to use. thinning : int, optional The thinning factor of the generated sampling chain. A thinning of 10 means samples are returned every 10 steps. Defaults to 100 which in benchmarks gives approximately uncorrelated samples. If set to one will return all iterates. processes : int, optional Only used for 'optgp'. The number of processes used to generate samples. seed : int > 0, optional The random number seed to be used. Initialized to current time stamp if None. Returns ------- pandas.DataFrame The generated flux samples. Each row corresponds to a sample of the fluxes and the columns are the reactions. Notes ----- The samplers have a correction method to ensure equality feasibility for long-running chains, however this will only work for homogeneous models, meaning models with no non-zero fixed variables or constraints ( right-hand side of the equalities are zero). References ---------- .. [1] Megchelenbrink W, Huynen M, Marchiori E (2014) optGpSampler: An Improved Tool for Uniformly Sampling the Solution-Space of Genome-Scale Metabolic Networks. PLoS ONE 9(2): e86587. .. [2] Direction Choice for Accelerated Convergence in Hit-and-Run Sampling David E. Kaufman Robert L. Smith Operations Research 199846:1 , 84-95
14,235
def to_ascii_bytes(self, filter_func=None): try: string = self.to_str(filter_func) string = string.encode() except (UnicodeEncodeError, UnicodeDecodeError): self.percent_encode_non_ascii_headers() string = self.to_str(filter_func) string = string.encode() return string + b
Attempt to encode the headers block as ascii If encoding fails, call percent_encode_non_ascii_headers() to encode any headers per RFCs
14,236
def _parse_response_types(argspec, attrs): return_type = argspec.annotations.get("return") or None type_description = attrs.parameter_descriptions.get("return", "") response_types = attrs.response_types.copy() if return_type or len(response_types) == 0: response_types[attrs.success_code] = ResponseType( type=return_type, type_description=type_description, description="success", ) return response_types
from the given parameters, return back the response type dictionaries.
14,237
def _SetYaraRules(self, yara_rules_string): if not yara_rules_string: return analyzer_object = analyzers_manager.AnalyzersManager.GetAnalyzerInstance( ) analyzer_object.SetRules(yara_rules_string) self._analyzers.append(analyzer_object)
Sets the Yara rules. Args: yara_rules_string (str): unparsed Yara rule definitions.
14,238
def do_reference(self, parent=None, ident=0): (handle,) = self._readStruct(">L") log_debug(" ref = self.references[handle - self.BASE_REFERENCE_IDX] log_debug(" return ref
Handles a TC_REFERENCE opcode :param parent: :param ident: Log indentation level :return: The referenced object
14,239
def summarize(self): if not self._achievements_summarized: for _ in self.operations(): pass self._summarize() return self._summary
Summarize game.
14,240
def _parse_complement(self, tokens): tokens.pop(0) tokens.pop(0) res = self._parse_nested_interval(tokens) tokens.pop(0) res.switch_strand() return res
Parses a complement Complement ::= 'complement' '(' SuperRange ')'
14,241
def show_hbonds(self): grp = self.getPseudoBondGroup("Hydrogen Bonds-%i" % self.tid, associateWith=[self.model]) grp.lineWidth = 3 for i in self.plcomplex.hbonds.ldon_id: b = grp.newPseudoBond(self.atoms[i[0]], self.atoms[i[1]]) b.color = self.colorbyname() self.bs_res_ids.append(i[0]) for i in self.plcomplex.hbonds.pdon_id: b = grp.newPseudoBond(self.atoms[i[0]], self.atoms[i[1]]) b.color = self.colorbyname() self.bs_res_ids.append(i[1])
Visualizes hydrogen bonds.
14,242
def resolve_indirect (data, key, splithosts=False): value = data[key] env_value = os.environ.get(value) if env_value: if splithosts: data[key] = split_hosts(env_value) else: data[key] = env_value else: del data[key]
Replace name of environment variable with its value.
14,243
def build_spec(user, repo, sha=None, prov=None, extraMetadata=[]): loader = grlc.utils.getLoader(user, repo, sha=sha, prov=prov) files = loader.fetchFiles() raw_repo_uri = loader.getRawRepoUri() items = [] allowed_ext = ["rq", "sparql", "json", "tpf"] for c in files: glogger.debug(.format(c[])) extension = c[].split()[-1] if extension in allowed_ext: call_name = c[].split()[0] query_text = loader.getTextFor(c) item = None if extension == "json": query_text = json.loads(query_text) if extension in ["rq", "sparql", "json"]: glogger.debug("===================================================================") glogger.debug("Processing SPARQL query: {}".format(c[])) glogger.debug("===================================================================") item = process_sparql_query_text(query_text, loader, call_name, extraMetadata) elif "tpf" == extension: glogger.debug("===================================================================") glogger.debug("Processing TPF query: {}".format(c[])) glogger.debug("===================================================================") item = process_tpf_query_text(query_text, raw_repo_uri, call_name, extraMetadata) else: glogger.info("Ignoring unsupported source call name: {}".format(c[])) if item: items.append(item) return items
Build grlc specification for the given github user / repo.
14,244
def process( self, request, application, expected_state, label, extra_roles=None): roles = self._get_roles_for_request(request, application) if extra_roles is not None: roles.update(extra_roles) if not in roles: return HttpResponseForbidden() url = get_url(request, application, roles) return HttpResponseRedirect(url) state_config = self._config[application.state] instance = load_state_instance(state_config) if request.method == "GET": response = instance.get_next_config(request, application, label, roles) assert isinstance(response, HttpResponse) return response elif request.method == "POST": response = instance.get_next_config(request, application, label, roles) if isinstance(response, HttpResponse): return response else: next_config = response return self._next(request, application, roles, next_config) else: return HttpResponseBadRequest("<h1>Bad Request</h1>")
Process the view request at the current state.
14,245
def nextGen(self): self.current_gen += 1 self.change_gen[self.current_gen % 3] = copy.copy(self.grid) grid_cp = copy.copy(self.grid) for cell in self.grid: y, x = cell y1 = (y - 1) % self.y_grid y2 = (y + 1) % self.y_grid x1 = (x - 1) % self.x_grid x2 = (x + 1) % self.x_grid n = self.countNeighbours(cell) if n < 2 or n > 3: del grid_cp[cell] self.addchar(y + self.y_pad, x + self.x_pad, ) else: grid_cp[cell] = min(self.grid[cell] + 1, self.color_max) for neighbour in product([y1, y, y2], [x1, x, x2]): if not self.grid.get(neighbour): if self.countNeighbours(neighbour) == 3: y, x = neighbour y = y % self.y_grid x = x % self.x_grid neighbour = y, x grid_cp[neighbour] = 1 self.grid = grid_cp
Decide the fate of the cells
14,246
def compute_Pi_JinsDJ_given_D(self, CDR3_seq, Pi_J_given_D, max_J_align): max_insertions = len(self.PinsDJ) - 1 Pi_JinsDJ_given_D = [np.zeros((4, len(CDR3_seq)*3)) for i in range(len(Pi_J_given_D))] for D_in in range(len(Pi_J_given_D)): for init_pos in range(-1, -(max_J_align+1), -3): Pi_JinsDJ_given_D[D_in][:, init_pos] += self.PinsDJ[0]*Pi_J_given_D[D_in][:, init_pos] Pi_JinsDJ_given_D[D_in][:, init_pos-1] += self.PinsDJ[1]*np.dot(self.rDdj[CDR3_seq[init_pos/3]], Pi_J_given_D[D_in][:, init_pos]) current_base_nt_vec = np.dot(self.rTdj[CDR3_seq[init_pos/3]], Pi_J_given_D[D_in][:, init_pos]) Pi_JinsDJ_given_D[D_in][0, init_pos-2] += self.PinsDJ[2]*np.sum(current_base_nt_vec) base_ins = 2 for aa in CDR3_seq[init_pos/3 - 1: init_pos/3 - max_insertions/3:-1]: Pi_JinsDJ_given_D[D_in][:, init_pos-base_ins-1] += self.PinsDJ[base_ins + 1]*np.dot(self.Sdj[aa], current_base_nt_vec) Pi_JinsDJ_given_D[D_in][:, init_pos-base_ins-2] += self.PinsDJ[base_ins + 2]*np.dot(self.Ddj[aa], current_base_nt_vec) current_base_nt_vec = np.dot(self.Tdj[aa], current_base_nt_vec) Pi_JinsDJ_given_D[D_in][0, init_pos-base_ins-3] += self.PinsDJ[base_ins + 3]*np.sum(current_base_nt_vec) base_ins +=3 for init_pos in range(-2, -(max_J_align+1), -3): Pi_JinsDJ_given_D[D_in][:, init_pos] += self.PinsDJ[0]*Pi_J_given_D[D_in][:, init_pos] current_base_nt_vec = np.multiply(Pi_J_given_D[D_in][:, init_pos], self.first_nt_bias_insDJ) Pi_JinsDJ_given_D[D_in][0, init_pos-1] += self.PinsDJ[1]*np.sum(current_base_nt_vec) base_ins = 1 for aa in CDR3_seq[init_pos/3 - 1: init_pos/3 - max_insertions/3:-1]: Pi_JinsDJ_given_D[D_in][:, init_pos-base_ins-1] += self.PinsDJ[base_ins + 1]*np.dot(self.Sdj[aa], current_base_nt_vec) Pi_JinsDJ_given_D[D_in][:, init_pos-base_ins-2] += self.PinsDJ[base_ins + 2]*np.dot(self.Ddj[aa], current_base_nt_vec) current_base_nt_vec = np.dot(self.Tdj[aa], current_base_nt_vec) Pi_JinsDJ_given_D[D_in][0, init_pos-base_ins-3] += self.PinsDJ[base_ins + 3]*np.sum(current_base_nt_vec) base_ins +=3 for init_pos in range(-3, -(max_J_align+1), -3): Pi_JinsDJ_given_D[D_in][0, init_pos] += self.PinsDJ[0]*Pi_J_given_D[D_in][0, init_pos] current_base_nt_vec = self.zero_nt_bias_insDJ*Pi_J_given_D[D_in][0, init_pos] base_ins = 0 for aa in CDR3_seq[init_pos/3 - 1: init_pos/3 - max_insertions/3:-1]: Pi_JinsDJ_given_D[D_in][:, init_pos-base_ins-1] += self.PinsDJ[base_ins + 1]*np.dot(self.Sdj[aa], current_base_nt_vec) Pi_JinsDJ_given_D[D_in][:, init_pos-base_ins-2] += self.PinsDJ[base_ins + 2]*np.dot(self.Ddj[aa], current_base_nt_vec) current_base_nt_vec = np.dot(self.Tdj[aa], current_base_nt_vec) Pi_JinsDJ_given_D[D_in][0, init_pos-base_ins-3] += self.PinsDJ[base_ins + 3]*np.sum(current_base_nt_vec) base_ins +=3 return Pi_JinsDJ_given_D
Compute Pi_JinsDJ conditioned on D. This function returns the Pi array from the model factors of the J genomic contributions, P(D,J)*P(delJ|J), and the DJ (N2) insertions, first_nt_bias_insDJ(n_1)PinsDJ(\ell_{DJ})\prod_{i=2}^{\ell_{DJ}}Rdj(n_i|n_{i-1}) conditioned on D identity. This corresponds to {N^{x_3}}_{x_4}J(D)^{x_4}. For clarity in parsing the algorithm implementation, we include which instance attributes are used in the method as 'parameters.' Parameters ---------- CDR3_seq : str CDR3 sequence composed of 'amino acids' (single character symbols each corresponding to a collection of codons as given by codons_dict). Pi_J_given_D : ndarray List of (4, 3L) ndarrays corresponding to J(D)^{x_4}. max_J_align : int Maximum alignment of the CDR3_seq to any genomic J allele allowed by J_usage_mask. self.PinsDJ : ndarray Probability distribution of the DJ (N2) insertion sequence length self.first_nt_bias_insDJ : ndarray (4,) array of the probability distribution of the indentity of the first nucleotide insertion for the DJ junction. self.zero_nt_bias_insDJ : ndarray (4,) array of the probability distribution of the indentity of the the nucleotide BEFORE the DJ insertion. Note, as the Markov model at the DJ junction goes 3' to 5' this is the position AFTER the insertions reading left to right. self.Tdj : dict Dictionary of full codon transfer matrices ((4, 4) ndarrays) by 'amino acid'. self.Sdj : dict Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for the DJ insertion ending in the first position. self.Ddj : dict Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for the VD insertion ending in the second position. self.rTdj : dict Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for the DJ insertion starting in the first position. self.rDdj : dict Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for DJ insertion starting in the first position and ending in the second position of the same codon. Returns ------- Pi_JinsDJ_given_D : list List of (4, 3L) ndarrays corresponding to {N^{x_3}}_{x_4}J(D)^{x_4}.
14,247
def roundrobin(*iterables): raise NotImplementedError() pending = len(iterables) if six.PY2: nexts = cycle(iter(it).next for it in iterables) else: nexts = cycle(iter(it).__next__ for it in iterables) while pending: try: for next in nexts: yield next() except StopIteration: pending -= 1 nexts = cycle(islice(nexts, pending))
roundrobin('ABC', 'D', 'EF') --> A D E B F C
14,248
def isLoggedIn(self): r = self._cleanGet(self.req_url.LOGIN, allow_redirects=False) return "Location" in r.headers and "home" in r.headers["Location"]
Sends a request to Facebook to check the login status :return: True if the client is still logged in :rtype: bool
14,249
def bytes(num, check_result=False): if num <= 0: raise ValueError(" should be > 0") buf = create_string_buffer(num) result = libcrypto.RAND_bytes(buf, num) if check_result and result == 0: raise RandError("Random Number Generator not seeded sufficiently") return buf.raw[:num]
Returns num bytes of cryptographically strong pseudo-random bytes. If checkc_result is True, raises error if PRNG is not seeded enough
14,250
def dispatch(self, request, **kwargs): if request.method == : self.__authtoken = (bool(getattr(self.request, "authtoken", False))) self.json_worker = self.__authtoken or (self.json is True) return super(GenDelete, self).dispatch(request, **kwargs) else: json_answer = json.dumps({ : True, : _(), }) return HttpResponse(json_answer, content_type=)
Entry point for this class, here we decide basic stuff
14,251
def get_urls(self): from django.conf.urls import patterns, url urls = super(RecurrenceRuleAdmin, self).get_urls() my_urls = patterns( , url( r, self.admin_site.admin_view(self.preview), name= ), ) return my_urls + urls
Add a preview URL.
14,252
def merge_chromosome_dfs(df_tuple): plus_df, minus_df = df_tuple index_cols = "Chromosome Bin".split() count_column = plus_df.columns[0] if plus_df.empty: return return_other(minus_df, count_column, index_cols) if minus_df.empty: return return_other(plus_df, count_column, index_cols) plus_df = plus_df.groupby(index_cols).sum() minus_df = minus_df.groupby(index_cols).sum() df = pd.concat([plus_df, minus_df], axis=1).fillna(0).sum(axis=1) df = df.reset_index().sort_values(by="Bin") df.columns = ["Chromosome", "Bin", count_column] df = df.sort_values(["Chromosome", "Bin"]) df[["Bin", count_column]] = df[["Bin", count_column]].astype(int32) df = df[[count_column, "Chromosome", "Bin"]] return df.reset_index(drop=True)
Merges data from the two strands into strand-agnostic counts.
14,253
def read_params(filename, asheader=False, verbosity=0) -> Dict[str, Union[int, float, bool, str, None]]: filename = str(filename) from collections import OrderedDict params = OrderedDict([]) for line in open(filename): if in line: if not asheader or line.startswith(): line = line[1:] if line.startswith() else line key, val = line.split() key = key.strip() val = val.strip() params[key] = convert_string(val) return params
Read parameter dictionary from text file. Assumes that parameters are specified in the format: par1 = value1 par2 = value2 Comments that start with '#' are allowed. Parameters ---------- filename : str, Path Filename of data file. asheader : bool, optional Read the dictionary from the header (comment section) of a file. Returns ------- Dictionary that stores parameters.
14,254
def expand_variables(template_str, value_map, transformer=None): if template_str is None: return None else: if transformer is None: transformer = lambda v: v try:
Expand a template string like "blah blah $FOO blah" using given value mapping.
14,255
def switch_on(self, *args): if self.on_check(*args): return self._switch.switch(True) else: return False
Sets the state of the switch to True if on_check() returns True, given the arguments provided in kwargs. :param kwargs: variable length dictionary of key-pair arguments :return: Boolean. Returns True if the operation is successful
14,256
def prior_to_xarray(self): prior = self.prior prior_model = self.prior_model prior_predictive = self.prior_predictive if prior_predictive is None: prior_predictive = [] elif isinstance(prior_predictive, str): prior_predictive = [prior_predictive] ignore = prior_predictive data = get_draws_stan3(prior, model=prior_model, ignore=ignore) return dict_to_dataset(data, library=self.stan, coords=self.coords, dims=self.dims)
Convert prior samples to xarray.
14,257
def generate_em_constraint_data(mNS_min, mNS_max, delta_mNS, sBH_min, sBH_max, delta_sBH, eos_name, threshold, eta_default): mNS_nsamples = complex(0,int(np.ceil((mNS_max-mNS_min)/delta_mNS)+1)) sBH_nsamples = complex(0,int(np.ceil((sBH_max-sBH_min)/delta_sBH)+1)) mNS_vec, sBH_vec = np.mgrid[mNS_min:mNS_max:mNS_nsamples, sBH_min:sBH_max:sBH_nsamples] mNS_locations = np.array(mNS_vec[:,0]) sBH_locations = np.array(sBH_vec[0]) mNS_sBH_grid = zip(mNS_vec.ravel(), sBH_vec.ravel()) mNS_sBH_grid = np.array(mNS_sBH_grid) mNS_vec = np.array(mNS_sBH_grid[:,0]) sBH_vec = np.array(mNS_sBH_grid[:,1]) eos_name_vec=[eos_name for _ in range(len(mNS_vec))] eos_name_vec=np.array(eos_name_vec) threshold_vec=np.empty(len(mNS_vec)) threshold_vec.fill(threshold) eta_default_vec=np.empty(len(mNS_vec)) eta_default_vec.fill(eta_default) eta_sol = find_em_constraint_data_points(mNS_vec, sBH_vec, eos_name_vec, threshold_vec, eta_default_vec) eta_sol = eta_sol.reshape(-1,len(sBH_locations)) np.savez(, mNS_pts=mNS_locations, sBH_pts=sBH_locations, eta_mins=eta_sol) constraint_data = zip(mNS_vec.ravel(), sBH_vec.ravel(), eta_sol.ravel()) np.savetxt(, constraint_data)
Wrapper that calls find_em_constraint_data_point over a grid of points to generate the bh_spin_z x ns_g_mass x eta surface above which NS-BH binaries yield a remnant disk mass that exceeds the threshold required by the user. The user must also specify the default symmetric mass ratio value to be assigned to points for which the NS mass exceeds the maximum NS mass allowed by the chosend NS equation of state. The 2D surface that is generated is saved to file in two formats: constraint_em_bright.npz and constraint_em_bright.npz. Parameters ----------- mNS_min: float lower boundary of the grid in the NS mass direction mNS_max: float upper boundary of the grid in the NS mass direction delta_mNS: float grid spacing in the NS mass direction sBH_min: float lower boundary of the grid in the direction of the BH dimensionless spin component along the orbital angular momentum sBH_max: float upper boundary of the grid in the direction of the BH dimensionless spin component along the orbital angular momentum delta_sBH: float grid spacing in the direction of the BH dimensionless spin component along the orbital angular momentum eos_name: string NS equation of state label ('2H' is the only supported choice at the moment) threshold: float an amount to be subtracted to the remnant mass upper limit predicted by the model (in solar masses) eta_default: float the value to be returned for points in the grids in which the NS mass is too high
14,258
def memory_used(self): if self._end_memory: memory_used = self._end_memory - self._start_memory return memory_used else: return None
To know the allocated memory at function termination. ..versionadded:: 4.1 This property might return None if the function is still running. This function should help to show memory leaks or ram greedy code.
14,259
def casperjs_capture(stream, url, method=None, width=None, height=None, selector=None, data=None, waitfor=None, size=None, crop=None, render=, wait=None): if isinstance(stream, six.string_types): output = stream else: with NamedTemporaryFile(, suffix= % render, delete=False) as f: output = f.name try: cmd = CASPERJS_CMD + [url, output] cmd += [ % render] if method: cmd += [ % method] if width: cmd += [ % width] if height: cmd += [ % height] if selector: cmd += [ % selector] if data: cmd += [ % json.dumps(data)] if waitfor: cmd += [ % waitfor] if wait: cmd += [ % wait] logger.debug(cmd) proc = subprocess.Popen(cmd, **casperjs_command_kwargs()) stdout = proc.communicate()[0] process_casperjs_stdout(stdout) size = parse_size(size) render = parse_render(render) if size or (render and render != and render != ): image_postprocess(output, stream, size, crop, render) else: if stream != output: with open(output, ) as out: stream.write(out.read()) stream.flush() finally: if stream != output: os.unlink(output)
Captures web pages using ``casperjs``
14,260
def _crossmatch_transients_against_catalogues( self, transientsMetadataListIndex, colMaps): global theseBatches self.log.debug( ) transientsMetadataList = theseBatches[transientsMetadataListIndex] dbConn = database( log=self.log, dbSettings=self.settings["database settings"]["static catalogues"] ).connect() self.allClassifications = [] cm = transient_catalogue_crossmatch( log=self.log, dbConn=dbConn, transients=transientsMetadataList, settings=self.settings, colMaps=colMaps ) crossmatches = cm.match() self.log.debug( ) return crossmatches
run the transients through the crossmatch algorithm in the settings file **Key Arguments:** - ``transientsMetadataListIndex`` -- the list of transient metadata lifted from the database. - ``colMaps`` -- dictionary of dictionaries with the name of the database-view (e.g. `tcs_view_agn_milliquas_v4_5`) as the key and the column-name dictary map as value (`{view_name: {columnMap}}`). **Return:** - ``crossmatches`` -- a list of dictionaries of the associated sources crossmatched from the catalogues database .. todo :: - update key arguments values and definitions with defaults - update return values and definitions - update usage examples and text - update docstring text - check sublime snippet exists - clip any useful text to docs mindmap - regenerate the docs and check redendering of this docstring
14,261
def request( self, url, params=None, data=None, headers=None, timeout=None, auth=None, cookiejar=None, ): if headers is None: headers = {} if timeout is None: timeout = getattr(self._py3status_module, "request_timeout", 10) if "User-Agent" not in headers: headers["User-Agent"] = "py3status/{} {}".format(version, self._uid) return HttpResponse( url, params=params, data=data, headers=headers, timeout=timeout, auth=auth, cookiejar=cookiejar, )
Make a request to a url and retrieve the results. If the headers parameter does not provide an 'User-Agent' key, one will be added automatically following the convention: py3status/<version> <per session random uuid> :param url: url to request eg `http://example.com` :param params: extra query string parameters as a dict :param data: POST data as a dict. If this is not supplied the GET method will be used :param headers: http headers to be added to the request as a dict :param timeout: timeout for the request in seconds :param auth: authentication info as tuple `(username, password)` :param cookiejar: an object of a CookieJar subclass :returns: HttpResponse
14,262
def _hex_to_dec(ip, check=True): if check and not is_hex(ip): raise ValueError( % ip) if isinstance(ip, int): ip = hex(ip) return int(str(ip), 16)
Hexadecimal to decimal conversion.
14,263
def get_content(self, default=None): tree = parse_string(self.book.get_template(self._template_name)) tree_root = tree.getroot() tree_root.set(, self.lang or self.book.language) tree_root.attrib[ % NAMESPACES[]] = self.lang or self.book.language try: html_tree = parse_html_string(self.content) except: return html_root = html_tree.getroottree() _head = etree.SubElement(tree_root, ) if self.title != : _title = etree.SubElement(_head, ) _title.text = self.title for lnk in self.links: if lnk.get() == : _lnk = etree.SubElement(_head, , lnk) _lnk.text = else: _lnk = etree.SubElement(_head, , lnk) _body = etree.SubElement(tree_root, ) if self.direction: _body.set(, self.direction) tree_root.set(, self.direction) body = html_tree.find() if body is not None: for i in body.getchildren(): _body.append(i) tree_str = etree.tostring(tree, pretty_print=True, encoding=, xml_declaration=True) return tree_str
Returns content for this document as HTML string. Content will be of type 'str' (Python 2) or 'bytes' (Python 3). :Args: - default: Default value for the content if it is not defined. :Returns: Returns content of this document.
14,264
def _bcrypt_generate_pair(algorithm, bit_size=None, curve=None): if algorithm == : alg_constant = BcryptConst.BCRYPT_RSA_ALGORITHM struct_type = private_blob_type = BcryptConst.BCRYPT_RSAFULLPRIVATE_BLOB public_blob_type = BcryptConst.BCRYPT_RSAPUBLIC_BLOB elif algorithm == : alg_constant = BcryptConst.BCRYPT_DSA_ALGORITHM if bit_size > 1024: struct_type = else: struct_type = private_blob_type = BcryptConst.BCRYPT_DSA_PRIVATE_BLOB public_blob_type = BcryptConst.BCRYPT_DSA_PUBLIC_BLOB else: alg_constant = { : BcryptConst.BCRYPT_ECDSA_P256_ALGORITHM, : BcryptConst.BCRYPT_ECDSA_P384_ALGORITHM, : BcryptConst.BCRYPT_ECDSA_P521_ALGORITHM, }[curve] bit_size = { : 256, : 384, : 521, }[curve] struct_type = private_blob_type = BcryptConst.BCRYPT_ECCPRIVATE_BLOB public_blob_type = BcryptConst.BCRYPT_ECCPUBLIC_BLOB alg_handle = open_alg_handle(alg_constant) key_handle_pointer = new(bcrypt, ) res = bcrypt.BCryptGenerateKeyPair(alg_handle, key_handle_pointer, bit_size, 0) handle_error(res) key_handle = unwrap(key_handle_pointer) res = bcrypt.BCryptFinalizeKeyPair(key_handle, 0) handle_error(res) private_out_len = new(bcrypt, ) res = bcrypt.BCryptExportKey(key_handle, null(), private_blob_type, null(), 0, private_out_len, 0) handle_error(res) private_buffer_length = deref(private_out_len) private_buffer = buffer_from_bytes(private_buffer_length) res = bcrypt.BCryptExportKey( key_handle, null(), private_blob_type, private_buffer, private_buffer_length, private_out_len, 0 ) handle_error(res) private_blob_struct_pointer = struct_from_buffer(bcrypt, struct_type, private_buffer) private_blob_struct = unwrap(private_blob_struct_pointer) struct_size = sizeof(bcrypt, private_blob_struct) private_blob = bytes_from_buffer(private_buffer, private_buffer_length)[struct_size:] if algorithm == : private_key = _bcrypt_interpret_rsa_key_blob(, private_blob_struct, private_blob) elif algorithm == : if bit_size > 1024: private_key = _bcrypt_interpret_dsa_key_blob(, 2, private_blob_struct, private_blob) else: private_key = _bcrypt_interpret_dsa_key_blob(, 1, private_blob_struct, private_blob) else: private_key = _bcrypt_interpret_ec_key_blob(, private_blob_struct, private_blob) public_out_len = new(bcrypt, ) res = bcrypt.BCryptExportKey(key_handle, null(), public_blob_type, null(), 0, public_out_len, 0) handle_error(res) public_buffer_length = deref(public_out_len) public_buffer = buffer_from_bytes(public_buffer_length) res = bcrypt.BCryptExportKey( key_handle, null(), public_blob_type, public_buffer, public_buffer_length, public_out_len, 0 ) handle_error(res) public_blob_struct_pointer = struct_from_buffer(bcrypt, struct_type, public_buffer) public_blob_struct = unwrap(public_blob_struct_pointer) struct_size = sizeof(bcrypt, public_blob_struct) public_blob = bytes_from_buffer(public_buffer, public_buffer_length)[struct_size:] if algorithm == : public_key = _bcrypt_interpret_rsa_key_blob(, public_blob_struct, public_blob) elif algorithm == : if bit_size > 1024: public_key = _bcrypt_interpret_dsa_key_blob(, 2, public_blob_struct, public_blob) else: public_key = _bcrypt_interpret_dsa_key_blob(, 1, public_blob_struct, public_blob) else: public_key = _bcrypt_interpret_ec_key_blob(, public_blob_struct, public_blob) return (load_public_key(public_key), load_private_key(private_key))
Generates a public/private key pair using CNG :param algorithm: The key algorithm - "rsa", "dsa" or "ec" :param bit_size: An integer - used for "rsa" and "dsa". For "rsa" the value maye be 1024, 2048, 3072 or 4096. For "dsa" the value may be 1024, plus 2048 or 3072 if on Windows 8 or newer. :param curve: A unicode string - used for "ec" keys. Valid values include "secp256r1", "secp384r1" and "secp521r1". :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A 2-element tuple of (PublicKey, PrivateKey). The contents of each key may be saved by calling .asn1.dump().
14,265
def __read_response(self, nblines=-1): resp, code, data = (b"", None, None) cpt = 0 while True: try: line = self.__read_line() except Response as inst: code = inst.code data = inst.data break except Literal as inst: resp += self.__read_block(inst.value) if not resp.endswith(CRLF): resp += self.__read_line() + CRLF continue if not len(line): continue resp += line + CRLF cpt += 1 if nblines != -1 and cpt == nblines: break return (code, data, resp)
Read a response from the server. In the usual case, we read lines until we find one that looks like a response (OK|NO|BYE\s*(.+)?). If *nblines* > 0, we read excactly nblines before returning. :param nblines: number of lines to read (default : -1) :rtype: tuple :return: a tuple of the form (code, data, response). If nblines is provided, code and data can be equal to None.
14,266
def format_national_number_with_preferred_carrier_code(numobj, fallback_carrier_code): if (numobj.preferred_domestic_carrier_code is not None and len(numobj.preferred_domestic_carrier_code) > 0): carrier_code = numobj.preferred_domestic_carrier_code else: carrier_code = fallback_carrier_code return format_national_number_with_carrier_code(numobj, carrier_code)
Formats a phone number in national format for dialing using the carrier as specified in the preferred_domestic_carrier_code field of the PhoneNumber object passed in. If that is missing, use the fallback_carrier_code passed in instead. If there is no preferred_domestic_carrier_code, and the fallback_carrier_code contains an empty string, return the number in national format without any carrier code. Use format_national_number_with_carrier_code instead if the carrier code passed in should take precedence over the number's preferred_domestic_carrier_code when formatting. Arguments: numobj -- The phone number to be formatted carrier_code -- The carrier selection code to be used, if none is found in the phone number itself. Returns the formatted phone number in national format for dialing using the number's preferred_domestic_carrier_code, or the fallback_carrier_code pass in if none is found.
14,267
def iri(uri_string): uri_string = str(uri_string) if uri_string[:1] == "?": return uri_string if uri_string[:1] == "[": return uri_string if uri_string[:1] != "<": uri_string = "<{}".format(uri_string.strip()) if uri_string[len(uri_string)-1:] != ">": uri_string = "{}>".format(uri_string.strip()) return uri_string
converts a string to an IRI or returns an IRI if already formated Args: uri_string: uri in string format Returns: formated uri with <>
14,268
def _deserialize_value(cls, types, value): if types.main == list and value is not None: return cls._deserialize_list(types.sub, value) else: return cls.deserialize(types.main, value)
:type types: ValueTypes :type value: int|str|bool|float|bytes|unicode|list|dict :rtype: int|str|bool|float|bytes|unicode|list|dict|object
14,269
def set_matrix_dimensions(self, bounds, xdensity, ydensity): self.bounds = bounds self.xdensity = xdensity self.ydensity = ydensity scs = SheetCoordinateSystem(bounds, xdensity, ydensity) for of in self.output_fns: if isinstance(of, TransferFn): of.initialize(SCS=scs, shape=scs.shape)
Change the dimensions of the matrix into which the pattern will be drawn. Users of this class should call this method rather than changing the bounds, xdensity, and ydensity parameters directly. Subclasses can override this method to update any internal data structures that may depend on the matrix dimensions.
14,270
def transform(self, blocks, y=None): feature_vecs = ( tuple(re.search(token, block.css[attrib]) is not None for block in blocks) for attrib, tokens in self.attribute_tokens for token in tokens ) return np.column_stack(tuple(feature_vecs)).astype(int)
Transform an ordered sequence of blocks into a 2D features matrix with shape (num blocks, num features). Args: blocks (List[Block]): as output by :class:`Blockifier.blockify` y (None): This isn't used, it's only here for API consistency. Returns: `np.ndarray`: 2D array of shape (num blocks, num CSS attributes), where values are either 0 or 1, indicating the absence or presence of a given token in a CSS attribute on a given block.
14,271
def get_access_token(self, code): payload = {: , : code, : self._client_id, : self._client_secret, : self._redirect_uri,} req = requests.post(settings.API_ACCESS_TOKEN_URL, data=payload) data = req.json() return data.get()
Returns Access Token retrieved from the Health Graph API Token Endpoint following the login to RunKeeper. to RunKeeper. @param code: Code returned by Health Graph API at the Authorization or RunKeeper Login phase. @return: Access Token for querying the Health Graph API.
14,272
def walk(self, parent=None): if parent is None: yield self.root parent = self.root for cpage_name in parent.subpages: cpage = self.__all_pages[cpage_name] yield cpage for page in self.walk(parent=cpage): yield page
Generator that yields pages in infix order Args: parent: hotdoc.core.tree.Page, optional, the page to start traversal from. If None, defaults to the root of the tree. Yields: hotdoc.core.tree.Page: the next page
14,273
def get_executions(self, **kwargs): return self._client.service_executions(service=self.id, scope=self.scope_id, **kwargs)
Retrieve the executions related to the current service. .. versionadded:: 1.13 :param kwargs: (optional) additional search keyword arguments to limit the search even further. :type kwargs: dict :return: list of ServiceExecutions associated to the current service.
14,274
def classify(self, dataset, missing_value_action=): return super(BoostedTreesClassifier, self).classify(dataset, missing_value_action=missing_value_action)
Return a classification, for each example in the ``dataset``, using the trained boosted trees model. The output SFrame contains predictions as class labels (0 or 1) and probabilities associated with the the example. Parameters ---------- dataset : SFrame Dataset of new observations. Must include columns with the same names as the features used for model training, but does not require a target column. Additional columns are ignored. missing_value_action : str, optional Action to perform when missing values are encountered. Can be one of: - 'auto': By default the model will treat missing value as is. - 'impute': Proceed with evaluation by filling in the missing values with the mean of the training data. Missing values are also imputed if an entire column of data is missing during evaluation. - 'error': Do not proceed with evaluation and terminate with an error message. Returns ------- out : SFrame An SFrame with model predictions i.e class labels and probabilities associated with each of the class labels. See Also ---------- create, evaluate, predict Examples ---------- >>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv') >>> data['is_expensive'] = data['price'] > 30000 >>> model = turicreate.boosted_trees_classifier.create(data, >>> target='is_expensive', >>> features=['bath', 'bedroom', 'size']) >>> classes = model.classify(data)
14,275
def run(self): with self.lock: if self in self.device_manager._threads: self.stream.close() self.device_manager.thread_finished(self)
Plays the audio. This method plays the audio, and shouldn't be called explicitly, let the constructor do so.
14,276
async def auth_crammd5( self, username: str, password: str, timeout: DefaultNumType = _default ) -> SMTPResponse: async with self._command_lock: initial_response = await self.execute_command( b"AUTH", b"CRAM-MD5", timeout=timeout ) if initial_response.code != SMTPStatus.auth_continue: raise SMTPAuthenticationError( initial_response.code, initial_response.message ) password_bytes = password.encode("ascii") username_bytes = username.encode("ascii") response_bytes = initial_response.message.encode("ascii") verification_bytes = crammd5_verify( username_bytes, password_bytes, response_bytes ) response = await self.execute_command(verification_bytes) if response.code != SMTPStatus.auth_successful: raise SMTPAuthenticationError(response.code, response.message) return response
CRAM-MD5 auth uses the password as a shared secret to MD5 the server's response. Example:: 250 AUTH CRAM-MD5 auth cram-md5 334 PDI0NjA5LjEwNDc5MTQwNDZAcG9wbWFpbC5TcGFjZS5OZXQ+ dGltIGI5MTNhNjAyYzdlZGE3YTQ5NWI0ZTZlNzMzNGQzODkw
14,277
def get_tier(self, name_num): return self.tiers[name_num - 1] if isinstance(name_num, int) else\ [i for i in self.tiers if i.name == name_num][0]
Gives a tier, when multiple tiers exist with that name only the first is returned. :param name_num: Name or number of the tier to return. :type name_num: int or str :returns: The tier. :raises IndexError: If the tier doesn't exist.
14,278
def train(self, x=None, y=None, training_frame=None, offset_column=None, fold_column=None, weights_column=None, validation_frame=None, max_runtime_secs=None, ignored_columns=None, model_id=None, verbose=False): self._train(x=x, y=y, training_frame=training_frame, offset_column=offset_column, fold_column=fold_column, weights_column=weights_column, validation_frame=validation_frame, max_runtime_secs=max_runtime_secs, ignored_columns=ignored_columns, model_id=model_id, verbose=verbose)
Train the H2O model. :param x: A list of column names or indices indicating the predictor columns. :param y: An index or a column name indicating the response column. :param H2OFrame training_frame: The H2OFrame having the columns indicated by x and y (as well as any additional columns specified by fold, offset, and weights). :param offset_column: The name or index of the column in training_frame that holds the offsets. :param fold_column: The name or index of the column in training_frame that holds the per-row fold assignments. :param weights_column: The name or index of the column in training_frame that holds the per-row weights. :param validation_frame: H2OFrame with validation data to be scored on while training. :param float max_runtime_secs: Maximum allowed runtime in seconds for model training. Use 0 to disable. :param bool verbose: Print scoring history to stdout. Defaults to False.
14,279
def get_logger(name): if name in loggers: return loggers[name] logger = logging.getLogger(name) logger.propagate = False pre1, suf1 = hash_coloured_escapes(name) if supports_color() else (, ) pre2, suf2 = hash_coloured_escapes(name + ) \ if supports_color() else (, ) formatter = logging.Formatter( .format(pre1, pre2, suf1) ) ch = logging.StreamHandler() ch.setFormatter(formatter) logger.addHandler(ch) loggers[name] = logger logger.once_dict = {} return logger
Helper function to get a logger
14,280
def add_country_location(self, country, exact=True, locations=None, use_live=True): iso3, match = Country.get_iso3_country_code_fuzzy(country, use_live=use_live) if iso3 is None: raise HDXError( % country) return self.add_other_location(iso3, exact=exact, alterror= % (country, iso3), locations=locations)
Add a country. If an iso 3 code is not provided, value is parsed and if it is a valid country name, converted to an iso 3 code. If the country is already added, it is ignored. Args: country (str): Country to add exact (bool): True for exact matching or False to allow fuzzy matching. Defaults to True. locations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX. use_live (bool): Try to get use latest country data from web rather than file in package. Defaults to True. Returns: bool: True if country added or False if country already present
14,281
def remove_label(self, label, relabel=False): self.remove_labels(label, relabel=relabel)
Remove the label number. The removed label is assigned a value of zero (i.e., background). Parameters ---------- label : int The label number to remove. relabel : bool, optional If `True`, then the segmentation image will be relabeled such that the labels are in consecutive order starting from 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_label(label=5) >>> segm.data array([[1, 1, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 3, 3, 0, 0], [7, 0, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_label(label=5, relabel=True) >>> segm.data array([[1, 1, 0, 0, 3, 3], [0, 0, 0, 0, 0, 3], [0, 0, 2, 2, 0, 0], [4, 0, 0, 0, 0, 0], [4, 4, 0, 0, 0, 0], [4, 4, 0, 0, 0, 0]])
14,282
def reload(self, metadata, ignore_unsupported_plugins=True): supported_plugins = self._supported_plugins for plugin in metadata: if not ignore_unsupported_plugins \ or plugin[] in supported_plugins: self._plugins[plugin[]] = Lv2Plugin(plugin)
Loads the metadata. They will be used so that it is possible to generate lv2 audio plugins. :param list metadata: lv2 audio plugins metadata :param bool ignore_unsupported_plugins: Not allows instantiation of uninstalled or unrecognized audio plugins?
14,283
def append_query_parameter(url, parameters, ignore_if_exists=True): if ignore_if_exists: for key in parameters.keys(): if key + "=" in url: del parameters[key] parameters_str = "&".join(k + "=" + v for k, v in parameters.items()) append_token = "&" if "?" in url else "?" return url + append_token + parameters_str
quick and dirty appending of query parameters to a url
14,284
def remove_this_tlink(self,tlink_id): for tlink in self.get_tlinks(): if tlink.get_id() == tlink_id: self.node.remove(tlink.get_node()) break
Removes the tlink for the given tlink identifier @type tlink_id: string @param tlink_id: the tlink identifier to be removed
14,285
def _time_threaded_normxcorr(templates, stream, *args, **kwargs): no_chans = np.zeros(len(templates)) chans = [[] for _ in range(len(templates))] array_dict_tuple = _get_array_dicts(templates, stream) stream_dict, template_dict, pad_dict, seed_ids = array_dict_tuple cccsums = np.zeros([len(templates), len(stream[0]) - len(templates[0][0]) + 1]) for seed_id in seed_ids: tr_cc, tr_chans = time_multi_normxcorr( template_dict[seed_id], stream_dict[seed_id], pad_dict[seed_id], True) cccsums = np.sum([cccsums, tr_cc], axis=0) no_chans += tr_chans.astype(np.int) for chan, state in zip(chans, tr_chans): if state: chan.append((seed_id.split()[1], seed_id.split()[-1].split()[0])) return cccsums, no_chans, chans
Use the threaded time-domain routine for concurrency :type templates: list :param templates: A list of templates, where each one should be an obspy.Stream object containing multiple traces of seismic data and the relevant header information. :type stream: obspy.core.stream.Stream :param stream: A single Stream object to be correlated with the templates. :returns: New list of :class:`numpy.ndarray` objects. These will contain the correlation sums for each template for this day of data. :rtype: list :returns: list of ints as number of channels used for each cross-correlation. :rtype: list :returns: list of list of tuples of station, channel for all cross-correlations. :rtype: list
14,286
def more_statements(self, more_url): if isinstance(more_url, StatementsResult): more_url = more_url.more more_url = self.get_endpoint_server_root() + more_url request = HTTPRequest( method="GET", resource=more_url ) lrs_response = self._send_request(request) if lrs_response.success: lrs_response.content = StatementsResult.from_json(lrs_response.data) return lrs_response
Query the LRS for more statements :param more_url: URL from a StatementsResult object used to retrieve more statements :type more_url: str | unicode :return: LRS Response object with the returned StatementsResult object as content :rtype: :class:`tincan.lrs_response.LRSResponse`
14,287
def indexTupleFromItem(self, treeItem): if not treeItem: return (QtCore.QModelIndex(), QtCore.QModelIndex()) if not treeItem.parentItem: return (QtCore.QModelIndex(), QtCore.QModelIndex()) row = treeItem.childNumber() return (self.createIndex(row, 0, treeItem), self.createIndex(row, self.columnCount() - 1, treeItem))
Return (first column model index, last column model index) tuple for a configTreeItem
14,288
def get_imported_repo(self, import_path): try: session = requests.session() session.mount("http://", requests.adapters.HTTPAdapter(max_retries=self.get_options().retries)) page_data = session.get(.format(import_path=import_path)) except requests.ConnectionError: return None if not page_data: return None for (root, vcs, url) in self.find_meta_tags(page_data.text): if root and vcs and url: if root == import_path: return ImportedRepo(root, vcs, url) elif import_path.startswith(root): return self.get_imported_repo(root) return None
Looks for a go-import meta tag for the provided import_path. Returns an ImportedRepo instance with the information in the meta tag, or None if no go-import meta tag is found.
14,289
def _save_documentation(version, base_url="https://spark.apache.org/docs"): target_dir = join(dirname(__file__), , ) with open(join(target_dir, "spark_properties_{}.json".format(version)), ) as fp: all_props = _fetch_documentation(version=version, base_url=base_url) all_props = sorted(all_props, key=lambda x: x[0]) all_props_d = [{"property": p, "default": d, "description": desc} for p, d, desc in all_props] json.dump(all_props_d, fp, indent=2)
Write the spark property documentation to a file
14,290
def set_main_wire(self, wire=None): if not wire: for k in dir(self): if isinstance(getattr(self, k), Wire): wire = getattr(self, k) break elif not isinstance(wire, Wire): raise ValueError("wire needs to be a Wire instance") if not isinstance(wire, Wire): wire = None self.main = wire return wire
Sets the specified wire as the link's main wire This is done automatically during the first wire() call Keyword Arguments: - wire (Wire): if None, use the first wire instance found Returns: - Wire: the new main wire instance
14,291
def dicom_read(directory, pixeltype=): slices = [] imgidx = 0 for imgpath in os.listdir(directory): if imgpath.endswith(): if imgidx == 0: tmp = image_read(os.path.join(directory,imgpath), dimension=3, pixeltype=pixeltype) origin = tmp.origin spacing = tmp.spacing direction = tmp.direction tmp = tmp.numpy()[:,:,0] else: tmp = image_read(os.path.join(directory,imgpath), dimension=2, pixeltype=pixeltype).numpy() slices.append(tmp) imgidx += 1 slices = np.stack(slices, axis=-1) return from_numpy(slices, origin=origin, spacing=spacing, direction=direction)
Read a set of dicom files in a directory into a single ANTsImage. The origin of the resulting 3D image will be the origin of the first dicom image read. Arguments --------- directory : string folder in which all the dicom images exist Returns ------- ANTsImage Example ------- >>> import ants >>> img = ants.dicom_read('~/desktop/dicom-subject/')
14,292
def consultar_status_operacional(self): resp = self._http_post() conteudo = resp.json() return RespostaConsultarStatusOperacional.analisar( conteudo.get())
Sobrepõe :meth:`~satcfe.base.FuncoesSAT.consultar_status_operacional`. :return: Uma resposta SAT especializada em ``ConsultarStatusOperacional``. :rtype: satcfe.resposta.consultarstatusoperacional.RespostaConsultarStatusOperacional
14,293
def imfill(immsk): immsk for iz in range(immsk.shape[0]): for iy in range(immsk.shape[1]): ix0 = np.argmax(immsk[iz,iy,:]>0) ix1 = immsk.shape[2] - np.argmax(immsk[iz,iy,::-1]>0) if (ix1-ix0) > immsk.shape[2]-10: continue immsk[iz,iy,ix0:ix1] = 1 return immsk
fill the empty patches of image mask 'immsk'
14,294
def ImportFile(store, filename, start): with io.open(filename, "r") as fp: reader = csv.Reader(fp.read()) i = 0 current_row = None product_code_list = [] op_system_code_list = [] for row in reader: i += 1 if i and i % 5000 == 0: data_store.DB.Flush() print("Imported %d hashes" % i) if i > 1: if len(row) != 8: continue try: if i < start: continue if current_row: if current_row[0] == row[0]: product_code_list.append(int(row[5])) op_system_code_list.append(row[6]) continue else: current_row = row product_code_list = [int(row[5])] op_system_code_list = [row[6]] continue _ImportRow(store, current_row, product_code_list, op_system_code_list) current_row = row product_code_list = [int(row[5])] op_system_code_list = [row[6]] except Exception as e: print("Failed at %d with %s" % (i, str(e))) return i - 1 if current_row: _ImportRow(store, current_row, product_code_list, op_system_code_list) return i
Import hashes from 'filename' into 'store'.
14,295
def _printer(self, *out, **kws): flush = kws.pop(, True) fileh = kws.pop(, self.writer) sep = kws.pop(, ) end = kws.pop(, ) print(*out, file=fileh, sep=sep, end=end) if flush: fileh.flush()
Generic print function.
14,296
def addLOADDEV(rh): rh.printSysLog("Enter changeVM.addLOADDEV") if ( in rh.parms and not in rh.parms): msg = msgs.msg[][1] % (modId, "scpData", "scpDataType") rh.printLn("ES", msg) rh.updateResults(msgs.msg[][0]) return if ( in rh.parms and not in rh.parms): if rh.parms[].lower() == "delete": scpDataType = 1 else: lun.replace("0x", "") if not in rh.parms: wwpn = "" else: wwpn = rh.parms[]
Sets the LOADDEV statement in the virtual machine's directory entry. Input: Request Handle with the following properties: function - 'CHANGEVM' subfunction - 'ADDLOADDEV' userid - userid of the virtual machine parms['boot'] - Boot program number parms['addr'] - Logical block address of the boot record parms['lun'] - One to eight-byte logical unit number of the FCP-I/O device. parms['wwpn'] - World-Wide Port Number parms['scpDataType'] - SCP data type parms['scpData'] - Designates information to be passed to the program is loaded during guest IPL. Note that any of the parms may be left blank, in which case we will not update them. Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error
14,297
def _reset(self, command, *args, **kwargs): if self.indexable: self.deindex() result = self._traverse_command(command, *args, **kwargs) if self.indexable: self.index() return result
Shortcut for commands that reset values of the field. All will be deindexed and reindexed.
14,298
def draw_key(self, surface, key): if isinstance(key, VSpaceKey): self.draw_space_key(surface, key) elif isinstance(key, VBackKey): self.draw_back_key(surface, key) elif isinstance(key, VUppercaseKey): self.draw_uppercase_key(surface, key) elif isinstance(key, VSpecialCharKey): self.draw_special_char_key(surface, key) else: self.draw_character_key(surface, key)
Default drawing method for key. Draw the key accordingly to it type. :param surface: Surface background should be drawn in. :param key: Target key to be drawn.
14,299
def parse_wait_time(text: str) -> int: val = RATELIMIT.findall(text) if len(val) > 0: try: res = val[0] if res[1] == : return int(res[0]) * 60 if res[1] == : return int(res[0]) except Exception as e: util_logger.warning( + str(e)) return 1 * 60
Parse the waiting time from the exception