Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
381,600
def parse_table_row(self, markup, row): if row == None: row = WikipediaTableRow() markup = markup.replace("!!", "||") for cell in markup.lstrip("|!").split("||"): i = cell.find("|") j = cell.find("[[") if i>0 and (j<0 or i<j): data = self.plain(cell[i+1:]) properties = cell[:i].strip() else: data = self.plain(cell) properties = u"" cell = WikipediaTableCell(data) cell.properties = properties row.append(cell) return row
Parses a row of cells in a Wikipedia table. Cells in the row are separated by "||". A "!" indicates a row of heading columns. Each cell can contain properties before a "|", # e.g. align="right" | Cell 2 (right aligned).
381,601
def on_session_end(self, session): if not isinstance(session, ISession): raise TypeError("session can only be an instance of type ISession") progress = self._call("onSessionEnd", in_p=[session]) progress = IProgress(progress) return progress
Triggered by the given session object when the session is about to close normally. in session of type :class:`ISession` Session that is being closed return progress of type :class:`IProgress` Used to wait until the corresponding machine is actually dissociated from the given session on the server. Returned only when this session is a direct one.
381,602
def td_taper(out, start, end, beta=8, side=): out = out.copy() width = end - start winlen = 2 * int(width / out.delta_t) window = Array(signal.get_window((, beta), winlen)) xmin = int((start - out.start_time) / out.delta_t) xmax = xmin + winlen//2 if side == : out[xmin:xmax] *= window[:winlen//2] if xmin > 0: out[:xmin].clear() elif side == : out[xmin:xmax] *= window[winlen//2:] if xmax < len(out): out[xmax:].clear() else: raise ValueError("unrecognized side argument {}".format(side)) return out
Applies a taper to the given TimeSeries. A half-kaiser window is used for the roll-off. Parameters ---------- out : TimeSeries The ``TimeSeries`` to taper. start : float The time (in s) to start the taper window. end : float The time (in s) to end the taper window. beta : int, optional The beta parameter to use for the Kaiser window. See ``scipy.signal.kaiser`` for details. Default is 8. side : {'left', 'right'} The side to apply the taper to. If ``'left'`` (``'right'``), the taper will roll up (down) between ``start`` and ``end``, with all values before ``start`` (after ``end``) set to zero. Default is ``'left'``. Returns ------- TimeSeries The tapered time series.
381,603
def hide_arp_holder_arp_entry_interfacetype_Port_channel_Port_channel(self, **kwargs): config = ET.Element("config") hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp") arp_entry = ET.SubElement(hide_arp_holder, "arp-entry") arp_ip_address_key = ET.SubElement(arp_entry, "arp-ip-address") arp_ip_address_key.text = kwargs.pop() interfacetype = ET.SubElement(arp_entry, "interfacetype") Port_channel = ET.SubElement(interfacetype, "Port-channel") Port_channel = ET.SubElement(Port_channel, "Port-channel") Port_channel.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
381,604
def slice_graph(graph, node, frontier, include_frontier=False): subgraph = networkx.DiGraph() for frontier_node in frontier: for simple_path in networkx.all_simple_paths(graph, node, frontier_node): for src, dst in zip(simple_path, simple_path[1:]): if include_frontier or (src not in frontier and dst not in frontier): subgraph.add_edge(src, dst) if not list(subgraph.nodes): if (node, node) in graph.edges: subgraph.add_edge(node, node) return subgraph
Generate a slice of the graph from the head node to the given frontier. :param networkx.DiGraph graph: The graph to work on. :param node: The starting node in the graph. :param frontier: A list of frontier nodes. :param bool include_frontier: Whether the frontier nodes are included in the slice or not. :return: A subgraph. :rtype: networkx.DiGraph
381,605
def output_summary(self, output_stream=sys.stdout): if self.app_name or self.app_description: print(, end=, file=output_stream) if self.app_name: print(self.app_name, self.app_version, file=output_stream) if self.app_description: print(self.app_description, file=output_stream) if self.app_name or self.app_description: print(, file=output_stream) names_list = self.get_option_names() print( "usage:\n%s [OPTIONS]... " % self.app_invocation_name, end=, file=output_stream ) bracket_count = 0 for key in names_list: an_option = self.option_definitions[key] if an_option.is_argument: if an_option.default is None: print(an_option.default, end=, file=output_stream) else: print("[ %s" % an_option.name, end=, file=output_stream) bracket_count += 1 print( * bracket_count, , file=output_stream) names_list.sort() if names_list: print(, file=output_stream) pad = * 4 for name in names_list: if name in self.options_banned_from_help: continue option = self._get_option(name) line = * 2 if option.short_form: line += % option.short_form line += % name line += doc = option.doc if option.doc is not None else if doc: line += % (pad, doc) try: value = option.value type_of_value = type(value) converter_function = to_string_converters[type_of_value] default = converter_function(value) except KeyError: default = option.value if default is not None: if ( (option.secret or in name.lower()) and not self.option_definitions.admin.expose_secrets.default ): default = if name not in (,): print(line, file=output_stream)
outputs a usage tip and the list of acceptable commands. This is useful as the output of the 'help' option. parameters: output_stream - an open file-like object suitable for use as the target of a print function
381,606
def assign(name, value): * ret = {} cmd = .format(name, value) data = __salt__[](cmd, python_shell=False) if data[] != 0: raise CommandExecutionError(.format( data[])) new_name, new_value = data[].split(, 1) ret[new_name] = new_value.split()[-1] return ret
Assign a single sysctl parameter for this minion CLI Example: .. code-block:: bash salt '*' sysctl.assign net.inet.icmp.icmplim 50
381,607
def substructure(self, atoms, meta=False, as_view=True): s = super().substructure(atoms, meta, as_view) if as_view: s.check_valence = s.explicify_hydrogens = s.implicify_hydrogens = s.reset_query_marks = frozen s.standardize = s.aromatize = frozen return s
create substructure containing atoms from nbunch list :param atoms: list of atoms numbers of substructure :param meta: if True metadata will be copied to substructure :param as_view: If True, the returned graph-view provides a read-only view of the original structure scaffold without actually copying any data.
381,608
def playerJoin(config, agentCallBack, lobbyTimeout=c.INITIAL_TIMEOUT, debug=True): FLAGS(sys.argv) log = protocol.logging.logging log.disable(log.CRITICAL) amHosting = not bool(config.host) thisPlayer = config.whoAmI() operPrefix = "HOST" if amHosting else "JOIN" operType = "%sGAME"%operPrefix createReq = config.requestCreateDetails() if amHosting else None joinReq = config.requestJoinDetails() selectedIP = config.clientInitHost() selectPort = config.clientInitPort() controller = None finalResult = rh.playerSurrendered(config) startWaitTime = now() while True: obs = getGameState() result = obs.player_result if result: finalResult = rh.idPlayerResults(config, result) break try: agentCallBack(obs) if newNow - startWaitTime > c.REPLAY_SAVE_FREQUENCY: replayData = controller.save_replay() startWaitTime = newNow replayData = controller.save_replay() except (protocol.ConnectionError, protocol.ProtocolError, remote_controller.RequestError) as e: if "Status.in_game" in str(e): finalResult = rh.playerSurrendered(config) else: finalResult = rh.playerDisconnected(config) print("%s Connection to game host has ended, even intentionally by agent. Message:%s%s"%(type(e), os.linesep, e)) except KeyboardInterrupt: if debug: print("caught command to forcibly shutdown Starcraft2 client.") finalResult = rh.playerSurrendered(config) finally: if replayData: replayData = base64.encodestring(replayData).decode() if controller: controller.quit() return (finalResult, replayData)
cause an agent to join an already hosted game
381,609
def format_h1(s, format="text", indents=0): _CHAR = "=" if format.startswith("text"): return format_underline(s, _CHAR, indents) elif format.startswith("markdown"): return [" elif format.startswith("rest"): return format_underline(s, _CHAR, 0)
Encloses string in format text Args: s: string format: string starting with "text", "markdown", or "rest" indents: number of leading intenting spaces Returns: list >>> print("\\n".join(format_h2("Header 1", indents=10))) Header 1 -------- >>> print("\\n".join(format_h2("Header 1", "markdown", 0))) ## Header 1
381,610
def join_ops(ops1, ops2): i = len(ops1) - 1 j = 0 while i >= 0 and j < len(ops2): if ops1[i] == ops2[j]: i -= 1 j += 1 else: break return ops1[:i + 1] + ops2[j:]
For internal use.
381,611
def get_bound(pts): (x0, y0, x1, y1) = (INF, INF, -INF, -INF) for (x, y) in pts: x0 = min(x0, x) y0 = min(y0, y) x1 = max(x1, x) y1 = max(y1, y) return (x0, y0, x1, y1)
Compute a minimal rectangle that covers all the points.
381,612
def location_id(self, location_id): if location_id is None: raise ValueError("Invalid value for `location_id`, must not be `None`") if len(location_id) < 1: raise ValueError("Invalid value for `location_id`, length must be greater than or equal to `1`") self._location_id = location_id
Sets the location_id of this Order. The ID of the merchant location this order is associated with. :param location_id: The location_id of this Order. :type: str
381,613
def status(self, return_json=False): def tabular_print(title, value): click.echo( % (32, click.style("\t" + title, fg=) + , str(value))) return lambda_versions = self.zappa.get_lambda_function_versions(self.lambda_name) if not lambda_versions: raise ClickException(click.style("No Lambda %s detected in %s - have you deployed yet?" % (self.lambda_name, self.zappa.aws_region), fg=)) status_dict = collections.OrderedDict() status_dict["Lambda Versions"] = len(lambda_versions) function_response = self.zappa.lambda_client.get_function(FunctionName=self.lambda_name) conf = function_response[] self.lambda_arn = conf[] status_dict["Lambda Name"] = self.lambda_name status_dict["Lambda ARN"] = self.lambda_arn status_dict["Lambda Role ARN"] = conf[] status_dict["Lambda Handler"] = conf[] status_dict["Lambda Code Size"] = conf[] status_dict["Lambda Version"] = conf[] status_dict["Lambda Last Modified"] = conf[] status_dict["Lambda Memory Size"] = conf[] status_dict["Lambda Timeout"] = conf[] status_dict["Lambda Runtime"] = conf[] if in conf.keys(): status_dict["Lambda VPC ID"] = conf.get(, {}).get(, ) else: status_dict["Lambda VPC ID"] = None try: function_invocations = self.zappa.cloudwatch.get_metric_statistics( Namespace=, MetricName=, StartTime=datetime.utcnow()-timedelta(days=1), EndTime=datetime.utcnow(), Period=1440, Statistics=[], Dimensions=[{: , : .format(self.lambda_name)}] )[][0][] except Exception as e: function_invocations = 0 try: function_errors = self.zappa.cloudwatch.get_metric_statistics( Namespace=, MetricName=, StartTime=datetime.utcnow()-timedelta(days=1), EndTime=datetime.utcnow(), Period=1440, Statistics=[], Dimensions=[{: , : .format(self.lambda_name)}] )[][0][] except Exception as e: function_errors = 0 try: error_rate = "{0:.2f}%".format(function_errors / function_invocations * 100) except: error_rate = "Error calculating" status_dict["Invocations (24h)"] = int(function_invocations) status_dict["Errors (24h)"] = int(function_errors) status_dict["Error Rate (24h)"] = error_rate if self.use_apigateway: api_url = self.zappa.get_api_url( self.lambda_name, self.api_stage) status_dict["API Gateway URL"] = api_url api_id = self.zappa.get_api_id(self.lambda_name) for api_key in self.zappa.get_api_keys(api_id, self.api_stage): status_dict["API Gateway x-api-key"] = api_key for event in v: for item_k, item_v in event.items(): tabular_print(item_k, item_v) else: tabular_print(k, v) return True
Describe the status of the current deployment.
381,614
def _nodeSetValuesFromDict(self, dct): if in dct: qFont = QtGui.QFont() success = qFont.fromString(dct[]) if not success: msg = "Unable to create QFont from string {!r}".format(dct[]) logger.warn(msg) if DEBUGGING: raise ValueError(msg) self.data = qFont
Sets values from a dictionary in the current node. Non-recursive auxiliary function for setValuesFromDict
381,615
def _process_all(self, limit): omimids = self._get_omim_ids() LOG.info(, len(omimids)) LOG.info(, len(self.omim_type)) if self.test_mode: graph = self.testgraph else: graph = self.graph geno = Genotype(graph) model = Model(graph) tax_label = tax_id = self.globaltt[tax_label] geno.addGenome(tax_id, tax_label) model.addClassToGraph(tax_id, None) includes = set() includes.add() self.process_entries( omimids, self._transform_entry, includes, graph, limit, self.globaltt)
This takes the list of omim identifiers from the omim.txt.Z file, and iteratively queries the omim api for the json-formatted data. This will create OMIM classes, with the label, definition, and some synonyms. If an entry is "removed", it is added as a deprecated class. If an entry is "moved", it is deprecated and consider annotations are added. Additionally, we extract: *phenotypicSeries ids as superclasses *equivalent ids for Orphanet and UMLS If set to testMode, it will write only those items in the test_ids to the testgraph. :param limit: :return:
381,616
def can_invite_others(self, user): if self.is_managed: return False elif self.is_admin(user): return True elif self.subscription_policy != SubscriptionPolicy.CLOSED: return True else: return False
Determine if user can invite people to a group. Be aware that this check is independent from the people (users) which are going to be invited. The checked user is the one who invites someone, NOT who is going to be invited. :param user: User to be checked. :returns: True or False.
381,617
def generate_content_media_type(self): if self._definition[] == : with self.l(): with self.l(): self.l() with self.l(): self.l() with self.l(): with self.l(): self.l() self.l() with self.l(): self.l()
Means loading value when it's specified as JSON. .. code-block:: python { 'contentMediaType': 'application/json', }
381,618
def init_weights(self, w, n=-1): if n == -1: n = self.n if type(w) == str: if w == "random": w = np.random.normal(0, 0.5, n) elif w == "zeros": w = np.zeros(n) else: raise ValueError() elif len(w) == n: try: w = np.array(w, dtype="float64") except: raise ValueError() else: raise ValueError() self.w = w
This function initialises the adaptive weights of the filter. **Args:** * `w` : initial weights of filter. Possible values are: * array with initial weights (1 dimensional array) of filter size * "random" : create random weights * "zeros" : create zero value weights **Kwargs:** * `n` : size of filter (int) - number of filter coefficients. **Returns:** * `y` : output value (float) calculated from input array.
381,619
def maxCtxContextualSubtable(maxCtx, st, ruleType, chain=): if st.Format == 1: for ruleset in getattr(st, % (chain, ruleType)): if ruleset is None: continue for rule in getattr(ruleset, % (chain, ruleType)): if rule is None: continue maxCtx = maxCtxContextualRule(maxCtx, rule, chain) elif st.Format == 2: for ruleset in getattr(st, % (chain, ruleType)): if ruleset is None: continue for rule in getattr(ruleset, % (chain, ruleType)): if rule is None: continue maxCtx = maxCtxContextualRule(maxCtx, rule, chain) elif st.Format == 3: maxCtx = maxCtxContextualRule(maxCtx, st, chain) return maxCtx
Calculate usMaxContext based on a contextual feature subtable.
381,620
def normalize(W, copy=True): if copy: W = W.copy() W /= np.max(np.abs(W)) return W
Normalizes an input weighted connection matrix. If copy is not set, this function will *modify W in place.* Parameters ---------- W : np.ndarray weighted connectivity matrix copy : bool if True, returns a copy of the matrix. Otherwise, modifies the matrix in place. Default value=True. Returns ------- W : np.ndarray normalized connectivity matrix
381,621
def _to_rest_includes(models, includes): included = [] includes = includes or [] if not isinstance(models, list): models = [models] for include in includes: for model in models: rel = getattr(model, include) if hasattr(rel, ) and rel.model: rel_models = [rel.model] elif hasattr(rel, ) and rel.models: rel_models = rel.models for rel_model in rel_models: if rel_model in models or rel_model in included: continue else: included.append(rel_model) for idx, val in enumerate(included): included[idx] = _to_rest(val) return included
Fetch the models to be included The includes should follow a few basic rules: * the include MUST not already be an array member of the included array (no dupes) * the include MUST not be the same as the primary data if the primary data is a single resource object (no dupes) * the include MUST not be an array member of the primary data if the primary data an array of resource objects (no dupes) Basically, each included array member should be the only instance of that resource object in the entire restified data.
381,622
def split_params(sym, params): arg_params = {} aux_params = {} for args in sym.list_arguments(): if args in params: arg_params.update({args: nd.array(params[args])}) for aux in sym.list_auxiliary_states(): if aux in params: aux_params.update({aux: nd.array(params[aux])}) return arg_params, aux_params
Helper function to split params dictionary into args and aux params Parameters ---------- sym : :class:`~mxnet.symbol.Symbol` MXNet symbol object params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray` Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format Returns ------- arg_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray` Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format aux_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray` Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format
381,623
def to_unicode_optional_iterator(x): if isinstance(x, STRING_TYPES): return to_unicode(x) try: l = list(x) except TypeError as e: assert in str(e) return x else: return [ to_unicode(e) for e in l ]
Raise TypeError if x is a str containing non-utf8 bytes or if x is an iterable which contains such a str.
381,624
def create(self, data): response = self.http.post(str(self), json=data, auth=self.auth) response.raise_for_status() return response.json()
Create a new component
381,625
def top(self, objects: Set[Object]) -> Set[Object]: objects_per_box = self._separate_objects_by_boxes(objects) return_set: Set[Object] = set() for _, box_objects in objects_per_box.items(): min_y_loc = min([obj.y_loc for obj in box_objects]) return_set.update(set([obj for obj in box_objects if obj.y_loc == min_y_loc])) return return_set
Return the topmost objects (i.e. minimum y_loc). The comparison is done separately for each box.
381,626
def token(self): auth_header = self.headers.get(, ) if in auth_header: return auth_header.partition()[-1] else: return auth_header
Attempt to return the auth header token. :return: token related to request
381,627
def watch(self, key, pipeline=False): if pipeline: self._pipeline.watch(key) else: self._db.watch(key)
Watch the given key. Marks the given key to be watch for conditional execution of a transaction. Args: key (str): Key that needs to be watched pipeline (bool): True, start a transaction block. Default false.
381,628
def _py_ex_argtype(executable): result = [] for p in executable.ordered_parameters: atypes = p.argtypes if atypes is not None: result.extend(p.argtypes) else: print(("No argtypes for: {}".format(p.definition()))) if type(executable).__name__ == "Function": result.extend(executable.argtypes) return result
Returns the code to create the argtype to assign to the methods argtypes attribute.
381,629
def p_expression_uxnor(self, p): p[0] = Uxnor(p[2], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
expression : XNOR expression %prec UXNOR
381,630
def bundle_stylesheets(context: Context): args = [ , context.app.scss_build_path, , , ] if context.verbosity == 0: args.append() if not context.use_colour: args.append() for path in context.app.scss_include_paths: args.append() args.append(path) return_code = 0 for source_file in context.app.scss_source_file_set.paths_for_shell(separator=None): return_code = context.node_tool(, *args + [source_file]) or return_code return return_code
Compiles stylesheets
381,631
def Run(self, unused_args): if flags.FLAGS.pdb_post_mortem: logging.warning("Segfault action requested :(") print(ctypes.cast(1, ctypes.POINTER(ctypes.c_void_p)).contents) else: logging.warning("Segfault requested but not running in debug mode.")
Does the segfaulting.
381,632
def run_training(sub_id: int, run_seed: int, run_options, process_queue): docker_target_name = (run_options[] if run_options[] != else None) env_path = (run_options[] if run_options[] != else None) run_id = run_options[] load_model = run_options[] train_model = run_options[] save_freq = int(run_options[]) keep_checkpoints = int(run_options[]) base_port = int(run_options[]) num_envs = int(run_options[]) curriculum_folder = (run_options[] if run_options[] != else None) lesson = int(run_options[]) fast_simulation = not bool(run_options[]) no_graphics = run_options[] trainer_config_path = run_options[] if not docker_target_name: model_path = .format(run_id=run_id, sub_id=sub_id) summaries_dir = else: trainer_config_path = \ .format( docker_target_name=docker_target_name, trainer_config_path=trainer_config_path) if curriculum_folder is not None: curriculum_folder = \ .format( docker_target_name=docker_target_name, curriculum_folder=curriculum_folder) model_path = .format( docker_target_name=docker_target_name, run_id=run_id, sub_id=sub_id) summaries_dir = .format( docker_target_name=docker_target_name) trainer_config = load_config(trainer_config_path) env_factory = create_environment_factory( env_path, docker_target_name, no_graphics, run_seed, base_port + (sub_id * num_envs) ) env = SubprocessUnityEnvironment(env_factory, num_envs) maybe_meta_curriculum = try_create_meta_curriculum(curriculum_folder, env) tc = TrainerController(model_path, summaries_dir, run_id + + str(sub_id), save_freq, maybe_meta_curriculum, load_model, train_model, keep_checkpoints, lesson, env.external_brains, run_seed, fast_simulation) process_queue.put(True) tc.start_learning(env, trainer_config)
Launches training session. :param process_queue: Queue used to send signal back to main. :param sub_id: Unique id for training session. :param run_seed: Random seed used for training. :param run_options: Command line arguments for training.
381,633
def transcodeImage(self, media, height, width, opacity=100, saturation=100): if media: transcode_url = % ( height, width, opacity, saturation, media) return self.url(transcode_url, includeToken=True)
Returns the URL for a transcoded image from the specified media object. Returns None if no media specified (needed if user tries to pass thumb or art directly). Parameters: height (int): Height to transcode the image to. width (int): Width to transcode the image to. opacity (int): Opacity of the resulting image (possibly deprecated). saturation (int): Saturating of the resulting image.
381,634
def get_episode_ids(self, show_id, season): logger.info(, show_id, season) r = self.session.get(self.server_url + % (show_id, season), timeout=10) soup = ParserBeautifulSoup(r.content, [, ]) episode_ids = {} for row in soup.select(): if not row(, href=episode_id_re): continue cells = row() episode = int(cells[0].text.split()[1]) episode_id = int(cells[1].a[][8:-5]) episode_ids[episode] = episode_id if episode_ids: logger.debug(, episode_ids) else: logger.warning() return episode_ids
Get episode ids from the show id and the season. :param int show_id: show id. :param int season: season of the episode. :return: episode ids per episode number. :rtype: dict
381,635
def get_pk_value_on_save(self, instance): value = super(AleaIdField, self).get_pk_value_on_save(instance) if not value: value = self.get_seeded_value(instance) return value
Generate ID if required.
381,636
def bpp2newick(bppnewick): "converts bpp newick format to normal newick" regex1 = re.compile(r" regex2 = re.compile(r" regex3 = re.compile(r": ") new = regex1.sub(":", bppnewick) new = regex2.sub(";", new) new = regex3.sub(":", new) return new
converts bpp newick format to normal newick
381,637
def get_nendo (): y, m = map(int, time.strftime("%Y %m").split()) return y if m >= 4 else y - 1
今は何年度?
381,638
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name): u found_kwargs = False needs_fix = False for t in raw_params[2:]: if t.type == token.COMMA: continue elif t.type == token.NAME and not found_kwargs: needs_fix = True elif t.type == token.NAME and found_kwargs: return t.value if needs_fix else u elif t.type == token.DOUBLESTAR: found_kwargs = True else: return kwargs_default if needs_fix else u
u""" Returns string with the name of the kwargs dict if the params after the first star need fixing Otherwise returns empty string
381,639
def rsa_decrypt_base64_encoded_key(rsaprivatekey, enckey): return rsaprivatekey.decrypt( base64.b64decode(enckey), cryptography.hazmat.primitives.asymmetric.padding.OAEP( mgf=cryptography.hazmat.primitives.asymmetric.padding.MGF1( algorithm=cryptography.hazmat.primitives.hashes.SHA1() ), algorithm=cryptography.hazmat.primitives.hashes.SHA1(), label=None, ) )
Decrypt an RSA encrypted key encoded as base64 :param rsaprivatekey: RSA private key :type rsaprivatekey: cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey :param str enckey: base64-encoded key :rtype: bytes :return: decrypted key
381,640
def step2_exchange(self, code=None, http=None, device_flow_info=None): if code is None and device_flow_info is None: raise ValueError() if code is not None and device_flow_info is not None: raise ValueError() if code is None: code = device_flow_info.device_code elif not isinstance(code, (six.string_types, six.binary_type)): if not in code: raise FlowExchangeError(code.get( , )) code = code[] post_data = { : self.client_id, : code, : self.scope, } if self.client_secret is not None: post_data[] = self.client_secret if self._pkce: post_data[] = self.code_verifier if device_flow_info is not None: post_data[] = else: post_data[] = post_data[] = self.redirect_uri body = urllib.parse.urlencode(post_data) headers = { : , } if self.authorization_header is not None: headers[] = self.authorization_header if self.user_agent is not None: headers[] = self.user_agent if http is None: http = transport.get_http_object() resp, content = transport.request( http, self.token_uri, method=, body=body, headers=headers) d = _parse_exchange_token_response(content) if resp.status == http_client.OK and in d: access_token = d[] refresh_token = d.get(, None) if not refresh_token: logger.info( "reauthenticating with prompt=.") token_expiry = None if in d: delta = datetime.timedelta(seconds=int(d[])) token_expiry = delta + _UTCNOW() extracted_id_token = None id_token_jwt = None if in d: extracted_id_token = _extract_id_token(d[]) id_token_jwt = d[] logger.info() return OAuth2Credentials( access_token, self.client_id, self.client_secret, refresh_token, token_expiry, self.token_uri, self.user_agent, revoke_uri=self.revoke_uri, id_token=extracted_id_token, id_token_jwt=id_token_jwt, token_response=d, scopes=self.scope, token_info_uri=self.token_info_uri) else: logger.info(, content) if in d: error_msg = (str(d[]) + str(d.get(, ))) else: error_msg = .format(str(resp.status)) raise FlowExchangeError(error_msg)
Exchanges a code for OAuth2Credentials. Args: code: string, a dict-like object, or None. For a non-device flow, this is either the response code as a string, or a dictionary of query parameters to the redirect_uri. For a device flow, this should be None. http: httplib2.Http, optional http instance to use when fetching credentials. device_flow_info: DeviceFlowInfo, return value from step1 in the case of a device flow. Returns: An OAuth2Credentials object that can be used to authorize requests. Raises: FlowExchangeError: if a problem occurred exchanging the code for a refresh_token. ValueError: if code and device_flow_info are both provided or both missing.
381,641
def update(self, docs=None, split=0, parallelism=None, progress_bar=True): self.apply( docs=docs, split=split, train=True, clear=False, parallelism=parallelism, progress_bar=progress_bar, )
Update the features of the specified candidates. :param docs: If provided, apply features to all the candidates in these documents. :param split: If docs is None, apply features to the candidates in this particular split. :type split: int :param parallelism: How many threads to use for extraction. This will override the parallelism value used to initialize the Featurizer if it is provided. :type parallelism: int :param progress_bar: Whether or not to display a progress bar. The progress bar is measured per document. :type progress_bar: bool
381,642
def urlencode_utf8(params): if hasattr(params, ): params = params.items() params = ( .join(( quote_plus(k.encode(), safe=), quote_plus(v.encode(), safe=) )) for k, v in params ) return .join(params)
UTF-8 safe variant of urllib.urlencode. http://stackoverflow.com/a/8152242
381,643
def create_parser(subparsers): metrics_parser = subparsers.add_parser( , help=s metricsmetricscontainersDisplay info of a topology\, usage="%(prog)s cluster/[role]/[env] topology-name [options]", add_help=False) args.add_cluster_role_env(containers_parser) args.add_topology_name(containers_parser) args.add_verbose(containers_parser) args.add_tracker_url(containers_parser) args.add_config(containers_parser) args.add_container_id(containers_parser) containers_parser.set_defaults(subcommand=) return subparsers
create parser
381,644
def after(self, i, sibling, name=None): self.parent._insert(sibling, idx=self._own_index + 1 + i, name=name) return self
Adds siblings after the current tag.
381,645
def entropy(data=None, prob=None, tol=1e-5): entropy if prob is None and data is None: raise ValueError("%s.entropy requires either or to be defined" % __name__) if prob is not None and data is not None: raise ValueError("%s.entropy requires only or entropy%sprobprob%s.entropy' should sum to 1" % __name__) if data is not None: prob = symbols_to_prob(data).prob() logProb = np.log2(prob) logProb[logProb == -np.inf] = 0 return -float(np.dot(prob, logProb))
given a probability distribution (prob) or an interable of symbols (data) compute and return its entropy inputs: ------ data: iterable of symbols prob: iterable with probabilities tol: if prob is given, 'entropy' checks that the sum is about 1. It raises an error if abs(sum(prob)-1) >= tol
381,646
def install(self, name=None, prefix=None, pkgs=None, dep=True, channels=None, token=None): logger.debug(str((prefix, pkgs, channels))) if not pkgs or not isinstance(pkgs, (list, tuple, str)): raise TypeError( ) cmd_list = [, , , ] if name: cmd_list.extend([, name]) elif prefix: cmd_list.extend([, prefix]) else: pass if channels: cmd_list.extend([]) for channel in channels: cmd_list.extend([]) channel = self.parse_token_channel(channel, token) cmd_list.extend([channel]) if isinstance(pkgs, (list, tuple)): cmd_list.extend(pkgs) elif isinstance(pkgs, str): cmd_list.extend([, pkgs]) if not dep: cmd_list.extend([]) return self._call_and_parse(cmd_list)
Install a set of packages into an environment by name or path. If token is specified, the channels different from the defaults will get the token appended.
381,647
def _create_class(rule, index): name = + rule.__name__ + + str(index) + created = type(name, (SplitRule,), SplitRule.__dict__.copy()) created.rule = rule.rules[index] created.rule_index = index created.from_rule = rule return created
Create subtype of SplitRule based on rule. :param rule: Rule from which the SplitRule derive. :param index: Index of the rule (in original Rule class) to use for SplitRule. :return: Class inherited from SplitRule representing rule at index.
381,648
def _allocate_address_neutron(self, instance, network_ids): self._init_os_api() with OpenStackCloudProvider.__node_start_lock: free_ips = [ ip for ip in self.neutron_client.list_floatingips().get() if (ip[] in network_ids and ip[] is None and ip[] is None) ] if free_ips: floating_ip = free_ips.pop() log.debug("Using existing floating IP %r", floating_ip) else: break sleep(1) if instance.id in self._cached_instances: del self._cached_instances[instance.id] return ip_address
Allocates a floating/public ip address to the given instance, using the OpenStack Network ('Neutron') API. :param instance: instance to assign address to :param list network_id: List of IDs (as strings) of networks where to request allocation the floating IP. :return: public ip address
381,649
def delete(self, force=False): self._halt_if_already_deleted() if force: if cset.deletions or cset.creations: self.connection._change_resource_record_sets(cset) retval = self.connection.delete_hosted_zone_by_id(self.id) self._is_deleted = True return retval
Deletes this hosted zone. After this method is ran, you won't be able to add records, or do anything else with the zone. You'd need to re-create it, as zones are read-only after creation. :keyword bool force: If ``True``, delete the :py:class:`HostedZone <route53.hosted_zone.HostedZone>`, even if it means nuking all associated record sets. If ``False``, an exception is raised if this :py:class:`HostedZone <route53.hosted_zone.HostedZone>` has record sets. :rtype: dict :returns: A dict of change info, which contains some details about the request.
381,650
def com_google_fonts_check_metadata_valid_post_script_name_values(font_metadata, font_familynames): for font_familyname in font_familynames: psname = "".join(str(font_familyname).split()) if psname in "".join(font_metadata.post_script_name.split("-")): yield PASS, ("METADATA.pb postScriptName field" " contains font name in right format.") else: yield FAIL, ("METADATA.pb postScriptName (\"{}\")" " does not match correct font name format (\"{}\")." "").format(font_metadata.post_script_name, font_familyname)
METADATA.pb font.post_script_name field contains font name in right format?
381,651
def fromtif(path, ext=, start=None, stop=None, recursive=False, nplanes=None, npartitions=None, labels=None, engine=None, credentials=None, discard_extra=False): from tifffile import TiffFile if nplanes is not None and nplanes <= 0: raise ValueError( % nplanes) def getarray(idx_buffer_filename): idx, buf, fname = idx_buffer_filename fbuf = BytesIO(buf) tfh = TiffFile(fbuf) ary = tfh.asarray() pageCount = ary.shape[0] if nplanes is not None: extra = pageCount % nplanes if extra: if discard_extra: pageCount = pageCount - extra logging.getLogger().warn( % (extra, fname)) else: raise ValueError("nplanes does not evenly divide " % (nplanes, pageCount, fname)) values = [ary[i:(i+nplanes)] for i in range(0, pageCount, nplanes)] else: values = [ary] tfh.close() if ary.ndim == 3: values = [val.squeeze() for val in values] nvals = len(values) keys = [(idx*nvals + timepoint,) for timepoint in range(nvals)] return zip(keys, values) recount = False if nplanes is None else True data = frompath(path, accessor=getarray, ext=ext, start=start, stop=stop, recursive=recursive, npartitions=npartitions, recount=recount, labels=labels, engine=engine, credentials=credentials) if engine is not None and npartitions is not None and data.npartitions() < npartitions: data = data.repartition(npartitions) return data
Loads images from single or multi-page TIF files. Parameters ---------- path : str Path to data files or directory, specified as either a local filesystem path or in a URI-like format, including scheme. May include a single '*' wildcard character. ext : string, optional, default = 'tif' Extension required on data files to be loaded. start, stop : nonnegative int, optional, default = None Indices of the first and last-plus-one file to load, relative to the sorted filenames matching 'path' and 'ext'. Interpreted using python slice indexing conventions. recursive : boolean, optional, default = False If true, will recursively descend directories from path, loading all files with an extension matching 'ext'. nplanes : positive integer, optional, default = None If passed, will cause single files to be subdivided into nplanes separate images. Otherwise, each file is taken to represent one image. npartitions : int, optional, default = None Number of partitions for computational engine, if None will use default for engine. labels : array, optional, default = None Labels for records. If provided, should be one-dimensional. discard_extra : boolean, optional, default = False If True and nplanes doesn't divide by the number of pages in a multi-page tiff, the reminder will be discarded and a warning will be shown. If False, it will raise an error
381,652
def add_environment(self, environment, sync=True): LOGGER.debug("OSInstance.add_environment") if not sync: self.environment_2_add.append(environment) else: if environment.id is None: environment.save() if self.id is not None and environment.id is not None: params = { : self.id, : environment.id } args = {: , : , : params} response = OSInstanceService.requester.call(args) if response.rc != 0: LOGGER.warning( + self.name + + str(response.response_content) + + str(response.error_message) + " (" + str(response.rc) + ")" ) else: self.environment_ids.append(environment.id) environment.osi_ids.append(self.id) else: LOGGER.warning( + self.name + + environment.name + )
add an environment to this OS instance. :param environment: the environment to add on this OS instance :param sync: If sync=True(default) synchronize with Ariane server. If sync=False, add the environment object on list to be added on next save(). :return:
381,653
def create_event(self, institute, case, user, link, category, verb, subject, level=, variant=None, content=None, panel=None): variant = variant or {} event = dict( institute=institute[], case=case[], user_id=user[], user_name=user[], link=link, category=category, verb=verb, subject=subject, level=level, variant_id=variant.get(), content=content, panel=panel, created_at=datetime.now(), updated_at=datetime.now(), ) LOG.debug("Saving Event") self.event_collection.insert_one(event) LOG.debug("Event Saved") return event
Create a Event with the parameters given. Arguments: institute (dict): A institute case (dict): A case user (dict): A User link (str): The url to be used in the event category (str): case or variant verb (str): What type of event subject (str): What is operated on level (str): 'specific' or 'global'. Default is 'specific' variant (dict): A variant content (str): The content of the comment Returns: event(dict): The inserted event
381,654
def random(self: ) -> np.ndarray: if isinstance(self.val, np.ndarray): return np.random.randn(self.val.shape) * self.err + self.val else: return np.random.randn() * self.err + self.val
Sample a random number (array) of the distribution defined by mean=`self.val` and variance=`self.err`^2.
381,655
def get_gae_versions(): r = requests.get(SDK_RELEASES_URL) r.raise_for_status() releases = r.json().get(, {}) return sorted(versions_and_urls, key=lambda x: x[0])
Gets a list of all of the available Python SDK versions, sorted with the newest last.
381,656
def parse_bytes(self, bytestr, isfinal=True): with self._context(): self.filename = None self.p.Parse(bytestr, isfinal) return self._root
Parse a byte string. If the string is very large, split it in chuncks and parse each chunk with isfinal=False, then parse an empty chunk with isfinal=True.
381,657
def _start_keep_alive(self): keep_alive_thread = threading.Thread(target=self.keep_alive) keep_alive_thread.daemon = True keep_alive_thread.start()
Start the keep alive thread as a daemon
381,658
def get(self, name: str) -> Union[None, str, List[str]]: name = name.casefold() if name == "referer" or name == "referrer": if "referrer" in self._headers: return self._headers["referrer"] elif "referer" in self._headers: return self._headers["referer"] else: return None elif name in self._headers: return self._headers[name] else: return None
获取 header
381,659
def onStart(self): curses.mousemask(0) self.paths.host_config() version = Version() if self.first_time[0] and self.first_time[1] != : system = System() thr = Thread(target=system.start, args=(), kwargs={}) thr.start() countdown = 60 while thr.is_alive(): npyscreen.notify_wait( + str(countdown), title=) time.sleep(1) countdown -= 1 thr.join() quit_s = *4 + tab_esc = *4 + self.addForm(, MainForm, name= + version + + quit_s + tab_esc, color=) self.addForm(, HelpForm, name= + quit_s, color=) self.addForm(, TutorialIntroForm, name= + quit_s, color=) self.addForm(, TutorialBackgroundForm, name= + quit_s, color=) self.addForm(, TutorialTerminologyForm, name= + quit_s, color=) self.addForm(, TutorialGettingSetupForm, name= + quit_s, color=) self.addForm(, TutorialStartingCoresForm, name= + quit_s, color=) self.addForm(, TutorialAddingPluginsForm, name= + quit_s, color=) self.addForm(, TutorialAddingFilesForm, name= + quit_s, color=) self.addForm(, TutorialTroubleshootingForm, name= + quit_s, color=)
Override onStart method for npyscreen
381,660
def generic_commit_and_try_merge2master_wf(git_action, file_content, doc_id, auth_info, parent_sha, commit_msg=, merged_sha=None, doctype_display_name="document"): merge_needed = False fc = tempfile.NamedTemporaryFile() try: if is_str_type(file_content): fc.write(file_content) else: write_as_json(file_content, fc) fc.flush() try: max_file_size = git_action.max_file_size except: max_file_size = None if max_file_size is not None: file_size = os.stat(fc.name).st_size if file_size > max_file_size: m = \ m = m.format(t=doctype_display_name, i=doc_id, a=file_size, b=max_file_size) raise GitWorkflowError(m) f = "Could not acquire lock to write to %s acquire_lock_raise(git_action, fail_msg=f) try: try: commit_resp = git_action.write_doc_from_tmpfile(doc_id, fc, parent_sha, auth_info, commit_msg, doctype_display_name) except Exception as e: _LOG.exception() raise GitWorkflowError("Could not write to %s (doctype_display_name, doc_id, e.message)) written_fp = git_action.path_for_doc(doc_id) branch_name = commit_resp[] new_sha = commit_resp[] _LOG.debug(.format(t=doctype_display_name, i=doc_id, p=parent_sha, c=str(commit_resp))) m_resp = _do_merge2master_commit(git_action, new_sha, branch_name, written_fp, merged_sha=merged_sha, prev_file_sha=commit_resp.get()) new_sha, branch_name, merge_needed = m_resp finally: git_action.release_lock() finally: fc.close() r = { "error": 0, "resource_id": doc_id, "branch_name": branch_name, "description": "Updated %s "sha": new_sha, "merge_needed": merge_needed, } _LOG.debug(.format(r=str(r))) return r
Actually make a local Git commit and push it to our remote
381,661
async def _build_state(self, request: Request, message: BaseMessage, responder: Responder) \ -> Tuple[ Optional[BaseState], Optional[BaseTrigger], Optional[bool], ]: trigger, state_class, dnr = await self._find_trigger(request) if trigger is None: if not message.should_confuse(): return None, None, None state_class = self._confused_state(request) logger.debug(, state_class.name()) else: logger.debug(, state_class.name()) state = state_class(request, responder, trigger, trigger) return state, trigger, dnr
Build the state for this request.
381,662
def RawBytesToScriptHash(raw): rawh = binascii.unhexlify(raw) rawhashstr = binascii.unhexlify(bytes(Crypto.Hash160(rawh), encoding=)) return UInt160(data=rawhashstr)
Get a hash of the provided raw bytes using the ripemd160 algorithm. Args: raw (bytes): byte array of raw bytes. e.g. b'\xAA\xBB\xCC' Returns: UInt160:
381,663
def make_contiguous(im, keep_zeros=True): r im = sp.copy(im) if keep_zeros: mask = (im == 0) im[mask] = im.min() - 1 im = im - im.min() im_flat = im.flatten() im_vals = sp.unique(im_flat) im_map = sp.zeros(shape=sp.amax(im_flat) + 1) im_map[im_vals] = sp.arange(0, sp.size(sp.unique(im_flat))) im_new = im_map[im_flat] im_new = sp.reshape(im_new, newshape=sp.shape(im)) im_new = sp.array(im_new, dtype=im_flat.dtype) return im_new
r""" Take an image with arbitrary greyscale values and adjust them to ensure all values fall in a contiguous range starting at 0. This function will handle negative numbers such that most negative number will become 0, *unless* ``keep_zeros`` is ``True`` in which case it will become 1, and all 0's in the original image remain 0. Parameters ---------- im : array_like An ND array containing greyscale values keep_zeros : Boolean If ``True`` (default) then 0 values remain 0, regardless of how the other numbers are adjusted. This is mostly relevant when the array contains negative numbers, and means that -1 will become +1, while 0 values remain 0. Returns ------- image : ND-array An ND-array the same size as ``im`` but with all values in contiguous orders. Example ------- >>> import porespy as ps >>> import scipy as sp >>> im = sp.array([[0, 2, 9], [6, 8, 3]]) >>> im = ps.tools.make_contiguous(im) >>> print(im) [[0 1 5] [3 4 2]]
381,664
def rgb_to_yiq(r, g=None, b=None): if type(r) in [list,tuple]: r, g, b = r y = (r * 0.29895808) + (g * 0.58660979) + (b *0.11443213) i = (r * 0.59590296) - (g * 0.27405705) - (b *0.32184591) q = (r * 0.21133576) - (g * 0.52263517) + (b *0.31129940) return (y, i, q)
Convert the color from RGB to YIQ. Parameters: :r: The Red component value [0...1] :g: The Green component value [0...1] :b: The Blue component value [0...1] Returns: The color as an (y, i, q) tuple in the range: y[0...1], i[0...1], q[0...1] >>> '(%g, %g, %g)' % rgb_to_yiq(1, 0.5, 0) '(0.592263, 0.458874, -0.0499818)'
381,665
def get_nonparametric_sources(self): return [src for sm in self.source_models for src_group in sm.src_groups for src in src_group if hasattr(src, )]
:returns: list of non parametric sources in the composite source model
381,666
def K_r2(self, r2): r = self._clipped_sqrt(r2) return self.K_r(r)
Returns the kernel evaluated on `r2`, which is the scaled squared distance. Will call self.K_r(r=sqrt(r2)), or can be overwritten directly (and should operate element-wise on r2).
381,667
def _db_pre_transform(self, train_tfm:List[Callable], valid_tfm:List[Callable]): "Call `train_tfm` and `valid_tfm` after opening image, before converting from `PIL.Image`" self.train_ds.x.after_open = compose(train_tfm) self.valid_ds.x.after_open = compose(valid_tfm) return self
Call `train_tfm` and `valid_tfm` after opening image, before converting from `PIL.Image`
381,668
def list(ctx, scenario_name, format): args = ctx.obj.get() subcommand = base._get_subcommand(__name__) command_args = { : subcommand, : format, } statuses = [] s = scenarios.Scenarios( base.get_configs(args, command_args), scenario_name) for scenario in s: statuses.extend(base.execute_subcommand(scenario.config, subcommand)) headers = [util.title(name) for name in status.get_status()._fields] if format == or format == : table_format = if format == : headers = [] table_format = format _print_tabulate_data(headers, statuses, table_format) else: _print_yaml_data(headers, statuses)
Lists status of instances.
381,669
def is_git_file(cls, path, name): os.chdir(path) p = subprocess.Popen([, , , name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) p.wait() return p.returncode == 0
Determine if file is known by git.
381,670
def _kill(self, variable, code_loc): if variable in self._live_defs: for loc in self._live_defs.lookup_defs(variable): pv = ProgramVariable(variable, loc, arch=self.project.arch) self._data_graph_add_edge(pv, ProgramVariable(variable, code_loc, arch=self.project.arch), type=) self._live_defs.kill_def(variable, code_loc)
Kill previous defs. addr_list is a list of normalized addresses.
381,671
def remove(name=None, pkgs=None, **kwargs): t modify the OS. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt pkg.remove <package name> salt pkg.remove tcsh salt pkg.remove pkg://solaris/shell/tcsh salt pkg.remove pkgs= Removing these packages instead of %s: %s/bin/pkguninstall-vcmd.run_alltracepkg.list_pkgsretcodeError occurred removing package(s)changesretcoderetcodeerrorsstderr']] } ) return ret
Remove specified package. Accepts full or partial FMRI. In case of multiple match, the command fails and won't modify the OS. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. Returns a list containing the removed packages. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove tcsh salt '*' pkg.remove pkg://solaris/shell/tcsh salt '*' pkg.remove pkgs='["foo", "bar"]'
381,672
def _gql(cls, query_string, *args, **kwds): from .query import gql return gql( % (cls._class_name(), query_string), *args, **kwds)
Run a GQL query.
381,673
def identify_ibids(line): ibid_match_txt = {} for m_ibid in re_ibid.finditer(line): ibid_match_txt[m_ibid.start()] = m_ibid.group(0) line = line[0:m_ibid.start()] + \ "_" * len(m_ibid.group(0)) + \ line[m_ibid.end():] return ibid_match_txt, line
Find IBIDs within the line, record their position and length, and replace them with underscores. @param line: (string) the working reference line @return: (tuple) containing 2 dictionaries and a string: Dictionary: matched IBID text: (Key: position of IBID in line; Value: matched IBID text) String: working line with matched IBIDs removed
381,674
def _format_name(self, name, surname, snake_case=False): if not name or not surname: sep = elif snake_case: sep = else: sep = if snake_case: name = self._snakify_name(name) surname = self._snakify_name(surname) disp_name = .format(name, sep, surname) return disp_name
Format a first name and a surname into a cohesive string. Note that either name or surname can be empty strings, and formatting will still succeed. :param str name: A first name. :param str surname: A surname. :param bool snake_case: If True, format the name as "snake_case", also stripping diacritics if any. (default: False) :return str: The formatted name.
381,675
def expr_stmt(self, lhs, rhs): if isinstance(rhs, ast.AugAssign): if isinstance(lhs, ast.Tuple) or isinstance(lhs, ast.List): error = diagnostic.Diagnostic( "fatal", "illegal expression for augmented assignment", {}, rhs.op.loc, [lhs.loc]) self.diagnostic_engine.process(error) else: rhs.target = self._assignable(lhs) rhs.loc = rhs.target.loc.join(rhs.value.loc) return rhs elif rhs is not None: rhs.targets = list(map(self._assignable, [lhs] + rhs.targets)) rhs.loc = lhs.loc.join(rhs.value.loc) return rhs else: return ast.Expr(value=lhs, loc=lhs.loc)
(2.6, 2.7, 3.0, 3.1) expr_stmt: testlist (augassign (yield_expr|testlist) | ('=' (yield_expr|testlist))*) (3.2-) expr_stmt: testlist_star_expr (augassign (yield_expr|testlist) | ('=' (yield_expr|testlist_star_expr))*)
381,676
def add(self, resource, replace=False): uri = resource.uri if (uri in self and not replace): raise ResourceListDupeError( "Attempt to add resource already in resource_list") self[uri] = resource
Add just a single resource.
381,677
def parse_mbox(filepath): mbox = _MBox(filepath, create=False) for msg in mbox: message = message_to_dict(msg) yield message
Parse a mbox file. This method parses a mbox file and returns an iterator of dictionaries. Each one of this contains an email message. :param filepath: path of the mbox to parse :returns : generator of messages; each message is stored in a dictionary of type `requests.structures.CaseInsensitiveDict`
381,678
def find_shadowed(self, extra=()): i = self.identifiers return (i.declared | i.outer_undeclared) & \ (i.declared_locally | i.declared_parameter) | \ set(x for x in extra if i.is_declared(x))
Find all the shadowed names. extra is an iterable of variables that may be defined with `add_special` which may occour scoped.
381,679
def addFileAnnot(self, point, buffer, filename, ufilename=None, desc=None): CheckParent(self) val = _fitz.Page_addFileAnnot(self, point, buffer, filename, ufilename, desc) if not val: return val.thisown = True val.parent = weakref.proxy(self) self._annot_refs[id(val)] = val return val
Add a 'FileAttachment' annotation at location 'point'.
381,680
def is_filtered(self, require=None, ignore=None): ignore = ignore or ["PASS"] if "FT" not in self.data or not self.data["FT"]: return False for ft in self.data["FT"]: if ft in ignore: continue if not require: return True elif ft in require: return True return False
Return ``True`` for filtered calls :param iterable ignore: if set, the filters to ignore, make sure to include 'PASS', when setting, default is ``['PASS']`` :param iterable require: if set, the filters to require for returning ``True``
381,681
def td_tr(points, dist_threshold): if len(points) <= 2: return points else: max_dist_threshold = 0 found_index = 0 delta_e = time_dist(points[-1], points[0]) * I_3600 d_lat = points[-1].lat - points[0].lat d_lon = points[-1].lon - points[0].lon for i in range(1, len(points)-1): delta_i = time_dist(points[i], points[0]) * I_3600 di_de = delta_i / delta_e point = Point( points[0].lat + d_lat * di_de, points[0].lon + d_lon * di_de, None ) dist = loc_dist(points[i], point) if dist > max_dist_threshold: max_dist_threshold = dist found_index = i if max_dist_threshold > dist_threshold: one = td_tr(points[:found_index], dist_threshold) two = td_tr(points[found_index:], dist_threshold) one.extend(two) return one else: return [points[0], points[-1]]
Top-Down Time-Ratio Trajectory Compression Algorithm Detailed in https://www.itc.nl/library/Papers_2003/peer_ref_conf/meratnia_new.pdf Args: points (:obj:`list` of :obj:`Point`): trajectory or part of it dist_threshold (float): max distance error, in meters Returns: :obj:`list` of :obj:`Point`, compressed trajectory
381,682
def save(): results = {} cpu_number = 0 while True: try: _file = open( CPU_PREFIX + .format(cpu_number)) except: break governor = _file.read().strip() results.setdefault(cpu_number, {})[] = governor _file.close() try: _file = open( CPU_PREFIX + .format(cpu_number)) except: break results[cpu_number][] = _file.read().strip() _file.close() cpu_number += 1 return results
save function
381,683
def checkMultipleFiles(input): f,i,o,a=buildFileList(input) return len(f) > 1
Evaluates the input to determine whether there is 1 or more than 1 valid input file.
381,684
def get_activities(self): collection = JSONClientValidated(, collection=, runtime=self._runtime) result = collection.find(self._view_filter()).sort(, DESCENDING) return objects.ActivityList(result, runtime=self._runtime, proxy=self._proxy)
Gets all ``Activities``. In plenary mode, the returned list contains all known activites or an error results. Otherwise, the returned list may contain only those activities that are accessible through this session. return: (osid.learning.ActivityList) - a ``ActivityList`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
381,685
def handle_nextPageTemplate(self, pt): has_left_template = self._has_template_for_name(pt + ) has_right_template = self._has_template_for_name(pt + ) if has_left_template and has_right_template: pt = [pt + , pt + ] if isinstance(pt, str): if hasattr(self, ): del self._nextPageTemplateCycle for t in self.pageTemplates: if t.id == pt: self._nextPageTemplateIndex = self.pageTemplates.index(t) return raise ValueError("can%s_nextPageTemplateCycle*': c._restart = len(c) continue for t in self.pageTemplates: if t.id == ptn.strip(): c.append(t) break if not c: raise ValueError("No valid page templates in cycle") elif c._restart > len(c): raise ValueError("Invalid cycle restart position") self._nextPageTemplateCycle = c.cyclicIterator() else: raise TypeError("Argument pt should be string or integer or list")
if pt has also templates for even and odd page convert it to list
381,686
def compute_fw_at_frac_max_1d_simple(Y, xc, X=None, f=0.5): yy = np.asarray(Y) if yy.ndim != 1: raise ValueError() if yy.size == 0: raise ValueError() if X is None: xx = np.arange(yy.shape[0]) else: xx = X xpix = coor_to_pix_1d(xc - xx[0]) try: peak = yy[xpix] except IndexError: raise ValueError() fwhm_x, _codex, _msgx = compute_fwhm_1d(xx, yy - f * peak, xc, xpix) return peak, fwhm_x
Compute the full width at fraction f of the maximum
381,687
def mapping_create(index, doc_type, body=None, hosts=None, profile=None, source=None): { "user" : { "properties" : { "message" : {"type" : "string", "store" : true } } } } es = _get_instance(hosts, profile) if source and body: message = raise SaltInvocationError(message) if source: body = __salt__[]( source, saltenv=__opts__.get(, )) try: result = es.indices.put_mapping(index=index, doc_type=doc_type, body=body) return result.get(, False) except elasticsearch.TransportError as e: raise CommandExecutionError("Cannot create mapping {0}, server returned code {1} with message {2}".format(index, e.status_code, e.error))
Create a mapping in a given index index Index for the mapping doc_type Name of the document type body Mapping definition as specified in https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-put-mapping.html source URL to file specifying mapping definition. Cannot be used in combination with ``body``. CLI example:: salt myminion elasticsearch.mapping_create testindex user '{ "user" : { "properties" : { "message" : {"type" : "string", "store" : true } } } }'
381,688
def configure(cls, api_token, api_url="https://api.qubole.com/api/", version="v1.2", poll_interval=5, skip_ssl_cert_check=False, cloud_name="AWS"): cls._auth = QuboleAuth(api_token) cls.api_token = api_token cls.version = version cls.baseurl = api_url if poll_interval < Qubole.MIN_POLL_INTERVAL: log.warn("Poll interval cannot be less than %s seconds. Setting it to %s seconds.\n" % (Qubole.MIN_POLL_INTERVAL, Qubole.MIN_POLL_INTERVAL)) cls.poll_interval = Qubole.MIN_POLL_INTERVAL else: cls.poll_interval = poll_interval cls.skip_ssl_cert_check = skip_ssl_cert_check cls.cloud_name = cloud_name.lower() cls.cached_agent = None
Set parameters governing interaction with QDS Args: `api_token`: authorization token for QDS. required `api_url`: the base URL for QDS API. configurable for testing only `version`: QDS REST api version. Will be used throughout unless overridden in Qubole.agent(..) `poll_interval`: interval in secs when polling QDS for events
381,689
def set_parent(self, new_parent, init=False): "Associate the header to the control (it could be recreated)" self._created = False SubComponent.set_parent(self, new_parent, init) if self.index == -1 or self.index > self._parent.wx_obj.GetColumnCount(): self.index = self._parent.wx_obj.GetColumnCount() self._parent.wx_obj.InsertColumn(self.index, self.text, self._align, self.width) self._created = True
Associate the header to the control (it could be recreated)
381,690
def get_order(membersuite_id, client=None): if not membersuite_id: return None client = client or get_new_client(request_session=True) if not client.session_id: client.request_session() object_query = "SELECT Object() FROM ORDER WHERE ID = ".format( membersuite_id) result = client.execute_object_query(object_query) msql_result = result["body"]["ExecuteMSQLResult"] if msql_result["Success"]: membersuite_object_data = (msql_result["ResultValue"] ["SingleObject"]) else: raise ExecuteMSQLError(result=result) return Order(membersuite_object_data=membersuite_object_data)
Get an Order by ID.
381,691
def setup_job(manager, job_id, tool_id, tool_version, use_metadata=False): job_id = manager.setup_job(job_id, tool_id, tool_version) if use_metadata: manager.enable_metadata_directory(job_id) return build_job_config( job_id=job_id, job_directory=manager.job_directory(job_id), system_properties=manager.system_properties(), tool_id=tool_id, tool_version=tool_version )
Setup new job from these inputs and return dict summarizing state (used to configure command line).
381,692
def filter_conflicts(conflicts_list, fields): for field in fields: conflicts_list = filter_conflicts_by_path(conflicts_list, field) return conflicts_list
Use this function to automatically filter all the entries defined for a given rule. Params: conflicts_list(List[Conflict]): the list of conflicts to filter. fields(List[str]): fields to filter out, using an accessor syntax of the form ``field.subfield.subsubfield``. Return: List[Conflict]: the given list filtered by `fields`
381,693
def get_config_variable(self, config_id, offset): config = self._config_variables.get(config_id) if config is None: return [b""] return [bytes(config.current_value[offset:offset + 20])]
Get a chunk of a config variable's value.
381,694
def build_reportnum_kb(fpath): def _add_institute_preprint_patterns(preprint_classifications, preprint_numeration_ptns, preprint_reference_search_regexp_patterns, standardised_preprint_reference_categories, kb_line_num): if preprint_classifications and preprint_numeration_ptns: ordered_patterns = \ order_reportnum_patterns_bylen(preprint_numeration_ptns) numeration_regexp = \ create_institute_numeration_group_regexp_pattern( ordered_patterns) for classification in preprint_classifications: search_pattern_str = ur \ + classification[0].strip() + u \ + numeration_regexp + ur re_search_pattern = re.compile(search_pattern_str, re.UNICODE) preprint_reference_search_regexp_patterns[(kb_line_num, classification[0])] =\ re_search_pattern standardised_preprint_reference_categories[(kb_line_num, classification[0])] =\ classification[1] preprint_reference_search_regexp_patterns = {} standardised_preprint_reference_categories = {} current_institute_preprint_classifications = [] current_institute_numerations = [] re_institute_name = re.compile(ur, re.UNICODE) re_preprint_classification = \ re.compile(ur, re.UNICODE) re_numeration_pattern = re.compile(ur, re.UNICODE) kb_line_num = 0 with file_resolving(fpath) as fh: for rawline in fh: if rawline.startswith(): continue kb_line_num += 1 m_institute_name = re_institute_name.search(rawline) if m_institute_name: pass continue m_numeration_pattern = re_numeration_pattern.search(rawline) if m_numeration_pattern: try: current_institute_numerations.append( m_numeration_pattern.group(1)) except (AttributeError, NameError): pass continue _add_institute_preprint_patterns(current_institute_preprint_classifications, current_institute_numerations, preprint_reference_search_regexp_patterns, standardised_preprint_reference_categories, kb_line_num) return (preprint_reference_search_regexp_patterns, standardised_preprint_reference_categories)
Given the path to a knowledge base file containing the details of institutes and the patterns that their preprint report numbering schemes take, create a dictionary of regexp search patterns to recognise these preprint references in reference lines, and a dictionary of replacements for non-standard preprint categories in these references. The knowledge base file should consist only of lines that take one of the following 3 formats: #####Institute Name#### (the name of the institute to which the preprint reference patterns belong, e.g. '#####LANL#####', surrounded by 5 # on either side.) <pattern> (numeration patterns for an institute's preprints, surrounded by < and >.) seek-term --- replace-term (i.e. a seek phrase on the left hand side, a replace phrase on the right hand side, with the two phrases being separated by 3 hyphens.) E.g.: ASTRO PH ---astro-ph The left-hand side term is a non-standard version of the preprint reference category; the right-hand side term is the standard version. If the KB file cannot be read from, or an unexpected line is encountered in the KB, an error message is output to standard error and execution is halted with an error-code 0. @param fpath: (string) the path to the knowledge base file. @return: (tuple) containing 2 dictionaries. The first contains regexp search patterns used to identify preprint references in a line. This dictionary is keyed by a tuple containing the line number of the pattern in the KB and the non-standard category string. E.g.: (3, 'ASTRO PH'). The second dictionary contains the standardised category string, and is keyed by the non-standard category string. E.g.: 'astro-ph'.
381,695
def _setup_regex(self): self.RE_COMMENTS = cache.RE_COMMENTS self.RE_MODULE = cache.RE_MODULE self.RE_TYPE = cache.RE_TYPE self.RE_EXEC = cache.RE_EXEC self.RE_MEMBERS = cache.RE_MEMBERS self.RE_DEPEND = cache.RE_DEPEND
Sets up the constant regex strings etc. that can be used to parse the strings for determining context.
381,696
def _PopulateQuantilesHistogram(self, hist, nums): if not nums: return num_quantile_buckets = 10 quantiles_to_get = [ x * 100 / num_quantile_buckets for x in range(num_quantile_buckets + 1) ] quantiles = np.percentile(nums, quantiles_to_get) hist.type = self.histogram_proto.QUANTILES quantiles_sample_count = float(len(nums)) / num_quantile_buckets for low, high in zip(quantiles, quantiles[1:]): hist.buckets.add( low_value=low, high_value=high, sample_count=quantiles_sample_count)
Fills in the histogram with quantile information from the provided array. Args: hist: A Histogram proto message to fill in. nums: A list of numbers to create a quantiles histogram from.
381,697
def is_watching(self, username): if self.standard_grant_type is not "authorization_code": raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.") response = self._req(.format(username)) return response[]
Check if user is being watched by the given user :param username: Check if username is watching you
381,698
def with_connection(func): @wraps(func) def wrapped(*args, **kwargs): if kwargs.get() is None: kwargs[] = _choose_connection(host=kwargs.get(), port=kwargs.get()) try: return func(*args, **kwargs) except HTTPException: kwargs[] = reconnect(kwargs[]) return func(*args, **kwargs) return wrapped
Decorate a function to open a new datafind connection if required This method will inspect the ``connection`` keyword, and if `None` (or missing), will use the ``host`` and ``port`` keywords to open a new connection and pass it as ``connection=<new>`` to ``func``.
381,699
def sha256sum(filename): sha256 = hashlib.sha256() mem_view = memoryview(bytearray(128*1024)) with open(filename, , buffering=0) as stream: for i in iter(lambda: stream.readinto(mem_view), 0): sha256.update(mem_view[:i]) return sha256.hexdigest()
Return SHA256 hash of file.