Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
10,900
def _link_or_update_vars(self): for d, source in self.links.items(): target = os.path.join(self.inventory_directory, d) source = os.path.join(self._config.scenario.directory, source) if not os.path.exists(source): msg = "The source path does not exist.".format(source) util.sysexit_with_message(msg) msg = "Inventory {} linked to {}".format(source, target) LOG.info(msg) os.symlink(source, target)
Creates or updates the symlink to group_vars and returns None. :returns: None
10,901
def align(*objects, **kwargs): join = kwargs.pop(, ) copy = kwargs.pop(, True) indexes = kwargs.pop(, None) exclude = kwargs.pop(, _DEFAULT_EXCLUDE) if indexes is None: indexes = {} if kwargs: raise TypeError( % list(kwargs)) if not indexes and len(objects) == 1: obj, = objects return (obj.copy(deep=copy),) all_indexes = defaultdict(list) unlabeled_dim_sizes = defaultdict(set) for obj in objects: for dim in obj.dims: if dim not in exclude: try: index = obj.indexes[dim] except KeyError: unlabeled_dim_sizes[dim].add(obj.sizes[dim]) else: all_indexes[dim].append(index) joiner = _get_joiner(join) joined_indexes = {} for dim, matching_indexes in all_indexes.items(): if dim in indexes: index = utils.safe_cast_to_index(indexes[dim]) if (any(not index.equals(other) for other in matching_indexes) or dim in unlabeled_dim_sizes): joined_indexes[dim] = index else: if (any(not matching_indexes[0].equals(other) for other in matching_indexes[1:]) or dim in unlabeled_dim_sizes): if join == : raise ValueError( .format(dim)) index = joiner(matching_indexes) joined_indexes[dim] = index else: index = matching_indexes[0] if dim in unlabeled_dim_sizes: unlabeled_sizes = unlabeled_dim_sizes[dim] labeled_size = index.size if len(unlabeled_sizes | {labeled_size}) > 1: raise ValueError( % (dim, unlabeled_sizes, labeled_size)) for dim in unlabeled_dim_sizes: if dim not in all_indexes: sizes = unlabeled_dim_sizes[dim] if len(sizes) > 1: raise ValueError( % (dim, sizes)) result = [] for obj in objects: valid_indexers = {k: v for k, v in joined_indexes.items() if k in obj.dims} if not valid_indexers: new_obj = obj.copy(deep=copy) else: new_obj = obj.reindex(copy=copy, **valid_indexers) new_obj.encoding = obj.encoding result.append(new_obj) return tuple(result)
align(*objects, join='inner', copy=True, indexes=None, exclude=frozenset()) Given any number of Dataset and/or DataArray objects, returns new objects with aligned indexes and dimension sizes. Array from the aligned objects are suitable as input to mathematical operators, because along each dimension they have the same index and size. Missing values (if ``join != 'inner'``) are filled with NaN. Parameters ---------- *objects : Dataset or DataArray Objects to align. join : {'outer', 'inner', 'left', 'right', 'exact'}, optional Method for joining the indexes of the passed objects along each dimension: - 'outer': use the union of object indexes - 'inner': use the intersection of object indexes - 'left': use indexes from the first object with each dimension - 'right': use indexes from the last object with each dimension - 'exact': instead of aligning, raise `ValueError` when indexes to be aligned are not equal copy : bool, optional If ``copy=True``, data in the return values is always copied. If ``copy=False`` and reindexing is unnecessary, or can be performed with only slice operations, then the output may share memory with the input. In either case, new xarray objects are always returned. exclude : sequence of str, optional Dimensions that must be excluded from alignment indexes : dict-like, optional Any indexes explicitly provided with the `indexes` argument should be used in preference to the aligned indexes. Returns ------- aligned : same as *objects Tuple of objects with aligned coordinates. Raises ------ ValueError If any dimensions without labels on the arguments have different sizes, or a different size than the size of the aligned dimension labels.
10,902
def bundles(ctx): bundles = _get_bundles(ctx.obj.data[]) print_table((, ), [(bundle.name, f) for bundle in bundles])
List discovered bundles.
10,903
def parse_array(raw_array): array_strip_brackets = raw_array.replace(, ).replace(, ) array_strip_spaces = array_strip_brackets.replace(, ).replace(, ) return array_strip_spaces.split()
Parse a WMIC array.
10,904
def createModel(modelName, **kwargs): if modelName not in TemporalMemoryTypes.getTypes(): raise RuntimeError("Unknown model type: " + modelName) return getattr(TemporalMemoryTypes, modelName)(**kwargs)
Return a classification model of the appropriate type. The model could be any supported subclass of ClassficationModel based on modelName. @param modelName (str) A supported temporal memory type @param kwargs (dict) Constructor argument for the class that will be instantiated. Keyword parameters specific to each model type should be passed in here.
10,905
def run_next(self): while 1: (op, obj) = self.work_queue.get() if op is STOP_SIGNAL: return try: (job_id, command_line) = obj try: os.remove(self._job_file(job_id, JOB_FILE_COMMAND_LINE)) except Exception: log.exception("Running command but failed to delete - command may rerun on Pulsar boot.") self._run(job_id, command_line, background=False) except Exception: log.warn("Uncaught exception running job with job_id %s" % job_id) traceback.print_exc()
Run the next item in the queue (a job waiting to run).
10,906
def list_sources(embedding_name=None): text_embedding_reg = registry.get_registry(TokenEmbedding) if embedding_name is not None: embedding_name = embedding_name.lower() if embedding_name not in text_embedding_reg: raise KeyError( .format(embedding_name)) return list(text_embedding_reg[embedding_name].source_file_hash.keys()) else: return {embedding_name: list(embedding_cls.source_file_hash.keys()) for embedding_name, embedding_cls in registry.get_registry(TokenEmbedding).items()}
Get valid token embedding names and their pre-trained file names. To load token embedding vectors from an externally hosted pre-trained token embedding file, such as those of GloVe and FastText, one should use `gluonnlp.embedding.create(embedding_name, source)`. This method returns all the valid names of `source` for the specified `embedding_name`. If `embedding_name` is set to None, this method returns all the valid names of `embedding_name` with their associated `source`. Parameters ---------- embedding_name : str or None, default None The pre-trained token embedding name. Returns ------- dict or list: A list of all the valid pre-trained token embedding file names (`source`) for the specified token embedding name (`embedding_name`). If the text embedding name is set to None, returns a dict mapping each valid token embedding name to a list of valid pre-trained files (`source`). They can be plugged into `gluonnlp.embedding.create(embedding_name, source)`.
10,907
def cumulative_sum(self): from .. import extensions agg_op = "__builtin__cum_sum__" return SArray(_proxy = self.__proxy__.builtin_cumulative_aggregate(agg_op))
Return the cumulative sum of the elements in the SArray. Returns an SArray where each element in the output corresponds to the sum of all the elements preceding and including it. The SArray is expected to be of numeric type (int, float), or a numeric vector type. Returns ------- out : sarray[int, float, array.array] Notes ----- - Missing values are ignored while performing the cumulative aggregate operation. - For SArray's of type array.array, all entries are expected to be of the same size. Examples -------- >>> sa = SArray([1, 2, 3, 4, 5]) >>> sa.cumulative_sum() dtype: int rows: 3 [1, 3, 6, 10, 15]
10,908
def _get_all_children(self,): res = if self.child_nodes: for c in self.child_nodes: res += + str(c) + if c.child_nodes: for grandchild in c.child_nodes: res += + str(grandchild) + else: res += return res
return the list of children of a node
10,909
def get_context_file_name(pid_file): root = os.path.dirname(pid_file) port_file = os.path.join(root, "context.json") return port_file
When the daemon is started write out the information which port it was using.
10,910
def do_info(self, arg, arguments): if arguments["--all"]: Console.ok(70 * "-") Console.ok() Console.ok(70 * "-") for element in dir(self): Console.ok(str(element)) Console.ok(70 * "-") self.print_info()
:: Usage: info [--all] Options: --all -a more extensive information Prints some internal information about the shell
10,911
def go_to_step(self, step): self.stackedWidget.setCurrentWidget(step) self.pbnNext.setEnabled(step.is_ready_to_next_step()) if step == self.step_fc_analysis: self.step_fc_analysis.setup_and_run_analysis() if step == self.step_kw_purpose and self.parent_step: if self.parent_step in [self.step_fc_hazlayer_from_canvas, self.step_fc_hazlayer_from_browser]: text_label = category_question_hazard elif self.parent_step in [self.step_fc_explayer_from_canvas, self.step_fc_explayer_from_browser]: text_label = category_question_exposure else: text_label = category_question_aggregation self.step_kw_purpose.lblSelectCategory.setText(text_label)
Set the stacked widget to the given step, set up the buttons, and run all operations that should start immediately after entering the new step. :param step: The step widget to be moved to. :type step: WizardStep
10,912
def get_full_current_object(arn, current_model): LOG.debug(f) item = list(current_model.query(arn)) if not item: return None return item[0]
Utility method to fetch items from the Current table if they are too big for SNS/SQS. :param record: :param current_model: :return:
10,913
def connect(self): self._connect() logger.debug() self._detect_deluge_version() logger.debug(.format(self.deluge_version)) if self.deluge_version == 2: result = self.call(, self.username, self.password, client_version=) else: result = self.call(, self.username, self.password) logger.debug( % result) self.connected = True
Connects to the Deluge instance
10,914
def job_terminate(object_id, input_params={}, always_retry=True, **kwargs): return DXHTTPRequest( % object_id, input_params, always_retry=always_retry, **kwargs)
Invokes the /job-xxxx/terminate API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method%3A-%2Fjob-xxxx%2Fterminate
10,915
def remove(self, path, recursive=True): if recursive: cmd = ["rm", "-r", path] else: cmd = ["rm", path] self.remote_context.check_output(cmd)
Remove file or directory at location `path`.
10,916
def make_command(tasks, *args, **kwargs): command = TaskCommand(tasks=tasks, *args, **kwargs) return command
Create a TaskCommand with defined tasks. This is a helper function to avoid boiletplate when dealing with simple cases (e.g., all cli arguments can be handled by TaskCommand), with no special processing. In general, this means a command only needs to run established tasks. Arguments: tasks - the tasks to execute args - extra arguments to pass to the TargetCommand constructor kwargs - extra keyword arguments to pass to the TargetCommand constructor
10,917
def serve_forever(self, poll_interval=0.5): self._serving_event.set() self._shutdown_request_event.clear() TCPServer.serve_forever(self, poll_interval=poll_interval)
Handle one request at a time until shutdown. Polls for shutdown every poll_interval seconds. Ignores self.timeout. If you need to do periodic tasks, do them in another thread.
10,918
def _get_nets_other(self, *args, **kwargs): from warnings import warn warn( ) return self.get_nets_other(*args, **kwargs)
Deprecated. This will be removed in a future release.
10,919
def query_mongo_sort_decend( database_name, collection_name, query={}, skip=0, limit=getattr( settings, , 200), return_keys=(), sortkey=None): l = [] response_dict = {} try: mongodb_client_url = getattr(settings, , ) mc = MongoClient(mongodb_client_url,document_class=OrderedDict) db = mc[str(database_name)] collection = db[str(collection_name)] if return_keys: return_dict = {} for k in return_keys: return_dict[k] = 1 mysearchresult = collection.find( query, return_dict).skip(skip).limit(limit).sort( sortkey, DESCENDING) else: mysearchresult = collection.find(query).skip( skip).limit(limit).sort(sortkey, DESCENDING) response_dict[] = 200 response_dict[] = "search-results" for d in mysearchresult: d[] = d[].__str__() del d[] l.append(d) response_dict[] = l except: print("Error reading from Mongo") print(str(sys.exc_info())) response_dict[] = 0 response_dict[] = 500 response_dict[] = "Error" response_dict[] = [] response_dict[] = str(sys.exc_info()) return response_dict
return a response_dict with a list of search results in decending order based on a sort key
10,920
def setbit(self, key, offset, value): if not isinstance(offset, int): raise TypeError("offset argument must be int") if offset < 0: raise ValueError("offset must be greater equal 0") if value not in (0, 1): raise ValueError("value argument must be either 1 or 0") return self.execute(b, key, offset, value)
Sets or clears the bit at offset in the string value stored at key. :raises TypeError: if offset is not int :raises ValueError: if offset is less than 0 or value is not 0 or 1
10,921
def getChildren(self, forgetter, field=None, where=None, orderBy=None): if type(where) in (types.StringType, types.UnicodeType): where = (where,) if not field: for (i_field, i_class) in forgetter._userClasses.items(): if isinstance(self, i_class): field = i_field break if not field: raise "No field found, check forgetter%s'" % (sqlname, myID)] if where: whereList.extend(where) return forgetter.getAll(whereList, orderBy=orderBy)
Return the children that links to me. That means that I have to be listed in their _userClasses somehow. If field is specified, that field in my children is used as the pointer to me. Use this if you have multiple fields referring to my class.
10,922
async def call_command(bot: NoneBot, ctx: Context_T, name: Union[str, CommandName_T], *, current_arg: str = , args: Optional[CommandArgs_T] = None, check_perm: bool = True, disable_interaction: bool = False) -> bool: cmd = _find_command(name) if not cmd: return False session = CommandSession(bot, ctx, cmd, current_arg=current_arg, args=args) return await _real_run_command(session, context_id(session.ctx), check_perm=check_perm, disable_interaction=disable_interaction)
Call a command internally. This function is typically called by some other commands or "handle_natural_language" when handling NLPResult object. Note: If disable_interaction is not True, after calling this function, any previous command session will be overridden, even if the command being called here does not need further interaction (a.k.a asking the user for more info). :param bot: NoneBot instance :param ctx: message context :param name: command name :param current_arg: command current argument string :param args: command args :param check_perm: should check permission before running command :param disable_interaction: disable the command's further interaction :return: the command is successfully called
10,923
def parse_extension_arg(arg, arg_dict): match = re.match(r, arg) if match is None: raise ValueError( "invalid extension argument , must be in key=value form" % arg ) name = match.group(1) value = match.group(4) arg_dict[name] = value
Converts argument strings in key=value or key.namespace=value form to dictionary entries Parameters ---------- arg : str The argument string to parse, which must be in key=value or key.namespace=value form. arg_dict : dict The dictionary into which the key/value pair will be added
10,924
def _parse_abbreviation(self, abbr): abbr = re.sub(r, , str(abbr)) abbr = re.sub(r, , abbr) return abbr
Parse a team's abbreviation. Given the team's HTML name tag, parse their abbreviation. Parameters ---------- abbr : string A string of a team's HTML name tag. Returns ------- string Returns a ``string`` of the team's abbreviation.
10,925
def delete(self): r = self.jfs.post(url=self.path, params={:}) return r
Delete this file and return the new, deleted JFSFile
10,926
def inputAnalyzeCallback(self, *args, **kwargs): b_status = False filesRead = 0 filesAnalyzed = 0 for k, v in kwargs.items(): if k == : d_DCMRead = v if k == : str_path = v if len(args): at_data = args[0] str_path = at_data[0] d_read = at_data[1] b_status = True self.dp.qprint("analyzing:\n%s" % self.pp.pformat(d_read[]), level = 5) if int(self.f_sleepLength): self.dp.qprint("sleeping for: %f" % self.f_sleepLength, level = 5) time.sleep(self.f_sleepLength) filesAnalyzed = len(d_read[]) return { : b_status, : filesAnalyzed, : d_read[] }
Test method for inputAnalzeCallback This method loops over the passed number of files, and optionally "delays" in each loop to simulate some analysis. The delay length is specified by the '--test <delay>' flag.
10,927
def _reproduce_stages( G, stages, node, force, dry, interactive, ignore_build_cache, no_commit, downstream, ): r import networkx as nx if downstream: force = True result += ret except Exception as ex: raise ReproductionError(stages[n].relpath, ex) return result
r"""Derive the evaluation of the given node for the given graph. When you _reproduce a stage_, you want to _evaluate the descendants_ to know if it make sense to _recompute_ it. A post-ordered search will give us an order list of the nodes we want. For example, let's say that we have the following pipeline: E / \ D F / \ \ B C G \ / A The derived evaluation of D would be: [A, B, C, D] In case that `downstream` option is specifed, the desired effect is to derive the evaluation starting from the given stage up to the ancestors. However, the `networkx.ancestors` returns a set, without any guarantee of any order, so we are going to reverse the graph and use a pre-ordered search using the given stage as a starting point. E A / \ / \ D F B C G / \ \ --- reverse --> \ / / B C G D F \ / \ / A E The derived evaluation of _downstream_ B would be: [B, D, E]
10,928
def _get_data(self, url, config, send_sc=True): if config.username and config.password: auth = (config.username, config.password) else: auth = None if isinstance(config.ssl_verify, bool) or isinstance(config.ssl_verify, str): verify = config.ssl_verify else: verify = None if config.ssl_cert: if config.ssl_key: cert = (config.ssl_cert, config.ssl_key) else: cert = config.ssl_cert else: cert = None resp = None try: resp = requests.get( url, timeout=config.timeout, headers=headers(self.agentConfig), auth=auth, verify=verify, cert=cert ) resp.raise_for_status() except Exception as e: if resp and resp.status_code == 400: raise AuthenticationError("The ElasticSearch credentials are incorrect") if send_sc: self.service_check( self.SERVICE_CHECK_CONNECT_NAME, AgentCheck.CRITICAL, message="Error {} when hitting {}".format(e, url), tags=config.service_check_tags, ) raise self.log.debug("request to url {} returned: {}".format(url, resp)) return resp.json()
Hit a given URL and return the parsed json
10,929
async def restart(request): def wait_and_restart(): log.info() sleep(1) os.system() Thread(target=wait_and_restart).start() return web.json_response({"message": "restarting"})
Returns OK, then waits approximately 1 second and restarts container
10,930
def _build_kreemer_cell(data, loc): temp_poly = np.empty([5, 2], dtype=float) for ival in range(1, 6): value = data[loc + ival].rstrip() value = value.lstrip() value = np.array((value.split(, 1))).astype(float) temp_poly[ival - 1, :] = value.flatten() return temp_poly
Constructs the "Kreemer Cell" from the input file. The Kreemer cell is simply a set of five lines describing the four nodes of the square (closed) :param list data: Strain data as list of text lines (input from linecache.getlines) :param int loc: Pointer to location in data :returns: temp_poly - 5 by 2 numpy array of cell longitudes and latitudes
10,931
def simulation_manager(self, thing=None, **kwargs): if thing is None: thing = [ self.entry_state() ] elif isinstance(thing, (list, tuple)): if any(not isinstance(val, SimState) for val in thing): raise AngrError("Bad type to initialize SimulationManager") elif isinstance(thing, SimState): thing = [ thing ] else: raise AngrError("BadType to initialze SimulationManager: %s" % repr(thing)) return SimulationManager(self.project, active_states=thing, **kwargs)
Constructs a new simulation manager. :param thing: Optional - What to put in the new SimulationManager's active stash (either a SimState or a list of SimStates). :param kwargs: Any additional keyword arguments will be passed to the SimulationManager constructor :returns: The new SimulationManager :rtype: angr.sim_manager.SimulationManager Many different types can be passed to this method: * If nothing is passed in, the SimulationManager is seeded with a state initialized for the program entry point, i.e. :meth:`entry_state()`. * If a :class:`SimState` is passed in, the SimulationManager is seeded with that state. * If a list is passed in, the list must contain only SimStates and the whole list will be used to seed the SimulationManager.
10,932
def _variant_levels(level, variant): return (level + variant, level + variant) \ if variant != 0 else (variant, level)
Gets the level for the variant. :param int level: the current variant level :param int variant: the value for this level if variant :returns: a level for the object and one for the function :rtype: int * int
10,933
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
Private file object read method. Classes that inherit from this base class must implement this method. The ``read()`` method that each file object inherits from this base class performs the processes common to all file read methods, after which it calls the file object's ``_read()`` (the preceding underscore denotes that the method is a private method). The purpose of the ``_read()`` method is to perform the file read operations that are specific to the file that the file object represents. This method should add any supporting SQLAlchemy objects to the session without committing. The common ``read()`` method handles the database commit for all file objects. The ``read()`` method processes the user input and passes on the information through the many parameters of the ``_read()`` method. As the ``_read()`` method should never be called by the user directly, the arguments will be defined in terms of what they offer for the developer of a new file object needing to implement this method. Args: directory (str): Directory containing the file to be read. Same as given by user in ``read()``. filename (str): Name of the file which will be read (e.g.: 'example.prj'). Same as given by user. Same as given by user in ``read()``. session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database. Same as given by user in ``read()``. path (str): Directory and filename combined into the path to the file. This is a convenience parameter. name (str): Name of the file without extension. This is a convenience parameter. extension (str): Extension of the file without the name. This is a convenience parameter. spatial (bool, optional): If True, spatially enabled objects will be read in as PostGIS spatial objects. Defaults to False. Same as given by user in ``read()``. spatialReferenceID (int, optional): Integer id of spatial reference system for the model. Required if spatial is True. Same as given by user in ``read()``. replaceParamFile (:class:`gsshapy.orm.ReplaceParamFile`, optional): Handle the case when replacement parameters are used in place of normal variables. If this is not None, then the user expects there to be replacement variables in the file. Use the gsshapy.lib.parsetools.valueReadPreprocessor() to handle these.
10,934
def escape(u): if not isinstance(u, unicode_type): raise ValueError() return quote(u.encode(), safe=b)
Escape a string in an OAuth-compatible fashion. TODO: verify whether this can in fact be used for OAuth 2
10,935
def _constexpr_transform(fn): code_string = inspect.getsource(fn) while _s.match(code_string): code_string = textwrap.dedent(code_string) module = ast.parse(code_string) fn_ast = module.body[0] fn: types.FunctionType fn_name = fn.__name__ closure = fn.__closure__ closure_dict = {v: c.cell_contents for v, c in zip(fn.__code__.co_freevars, closure if closure else ())} ctx = CompilingTimeMapping(fn.__globals__, closure_dict) ce = ConstExpr(ctx, [], OrderedDict(), {}, [], fn.__code__.co_filename) body = fn_ast.body macro_def = new_transformer(ce, MazcroDef) macro_invoke = new_transformer(ce, MacroInvoke) const_def = new_transformer(ce, ConstExprConstDef) const_if = new_transformer(ce, ConstExprIf) name_fold = new_transformer(ce, ConstExprNameFold) body = _visit_suite(macro_def.visit, body) body = _visit_suite(macro_invoke.visit, body) body = _visit_suite(const_def.visit, body) body = _visit_suite(const_if.visit, body) body = _visit_suite(name_fold.visit, body) fn_ast.body = body module.body = [fn_ast] code = compile(module, "<const-optimize>", "exec") fn_code: types.CodeType = next( each for each in code.co_consts if isinstance(each, types.CodeType) and each.co_name == fn_name) fn_code = const_link(fn_code, ce.constant_symbols, ce.additional_consts, fn.__code__, ce.nonlocal_names) new_fn = types.FunctionType(fn_code, fn.__globals__, fn.__name__, fn.__defaults__, fn.__closure__) new_fn.__annotations__ = fn.__annotations__ new_fn.__doc__ = fn.__doc__ new_fn.__kwdefaults__ = fn.__kwdefaults__ new_fn.__module__ = fn.__module__ return new_fn
>>> from Redy.Opt.ConstExpr import constexpr, const, optimize, macro >>> import dis >>> a = 1; b = ""; c = object() >>> x = 1 >>> @optimize >>> def f(y): >>> val1: const[int] = a >>> val2: const = b >>> if constexpr[x is c]: >>> return val1, y >>> elif constexpr[x is 1]: >>> return None, y >>> else: >>> return val2, y >>> assert f(1) == (None, 1) >>> dis.dis(f) >>> @optimize >>> def f(x): >>> d: const = 1 >>> return x + d + constexpr[2] # # >>> dis.dis(f) >>> print('result:', f(1)) # # >>> @optimize >>> def f(z): >>> @macro >>> def g(a): >>> x = a + 1 # >>> g(z) >>> return x # # >>> dis.dis(f) >>> print('result:', f(1)) # >>> c = 10 # # >>> @optimize >>> def f(x): >>> if constexpr[1 + c < 10]: >>> return x + 1 >>> else: >>> return x - 1 # # >>> print(dis.dis(f)) >>> print(f(5)) # >>> @optimize >>> def f(x): >>> return (x + constexpr[c * 20]) if constexpr[c > 10] else constexpr[c - 2] # >>> dis.dis(f) >>> print(f(20)) >>> def g(lst: list): >>> k = 1 >>> @optimize >>> def _(): >>> nonlocal k >>> f: const = lst.append >>> for i in range(1000): >>> f(i) >>> k += 1 >>> f(k) >>> _() >>> return lst >>> # dis.dis(g) >>> print(g([]))
10,936
def CreateBiddingStrategy(client): bidding_strategy_service = client.GetService( , version=) shared_bidding_strategy = { : % uuid.uuid4(), : { : , : { : } } } operation = { : , : shared_bidding_strategy } response = bidding_strategy_service.mutate([operation]) new_bidding_strategy = response[][0] print ( % (new_bidding_strategy[], new_bidding_strategy[], new_bidding_strategy[][])) return new_bidding_strategy
Creates a bidding strategy object. Args: client: AdWordsClient the client to run the example with. Returns: dict An object representing a bidding strategy.
10,937
def load(filename: str, format: str = None): path = Path(filename).resolve() with path.open() as file: data = file.read() if format is None: loader, error_class = _load_autodetect, InvalidMofileFormat else: try: loader, error_class = formats[format] except KeyError: raise InvalidMofileFormat(f) try: config = loader(data) except error_class as e: raise InvalidMofileFormat(f) return Project(config, path.parent)
Load a task file and get a ``Project`` back.
10,938
def dump(self, output, close_after_write=True): try: output.write self.stream = output except AttributeError: self.stream = io.open(output, "w", encoding="utf-8") try: self.write_table() finally: if close_after_write: self.stream.close() self.stream = sys.stdout
Write data to the output with tabular format. Args: output (file descriptor or str): file descriptor or path to the output file. close_after_write (bool, optional): Close the output after write. Defaults to |True|.
10,939
def divide(self, other, out=None): return self.space.divide(self, other, out=out)
Return ``out = self / other``. If ``out`` is provided, the result is written to it. See Also -------- LinearSpace.divide
10,940
def flat_images(images, grid=None, bfill=1.0, bsz=(1, 1)): if images.ndim == 4 and images.shape[-1] == 1: images = images.squeeze(axis=-1) grid = grid or grid_recommend(len(images), sorted(images[0].shape[:2])) if not isinstance(bsz, (tuple, list)): bsz = (bsz, bsz) imshape = list(images.shape) imshape[0] = grid[0] * grid[1] imshape[1] += bsz[0] imshape[2] += bsz[1] data = np.empty(imshape, dtype=images.dtype) data.fill(bfill) bslice0 = slice(0, -bsz[0]) if bsz[0] else slice(None, None) bslice1 = slice(0, -bsz[1]) if bsz[1] else slice(None, None) data[:len(images), bslice0, bslice1] = images imshape = list(grid) + imshape[1:] data = data.reshape(imshape) if len(imshape) == 5: data = data.transpose(0, 2, 1, 3, 4) imshape = [imshape[0]*imshape[2], imshape[1]*imshape[3], imshape[4]] else: data = data.transpose(0, 2, 1, 3) imshape = [imshape[0]*imshape[2], imshape[1]*imshape[3]] data = data.reshape(imshape) data = data[bslice0, bslice1] return data
convert batch image to flat image with margin inserted [B,h,w,c] => [H,W,c] :param images: :param grid: patch grid cell size of (Row, Col) :param bfill: board filling value :param bsz: int or (int, int) board size :return: flatted image
10,941
def _get_exchange_key_ntlm_v1(negotiate_flags, session_base_key, server_challenge, lm_challenge_response, lm_hash): if negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY: key_exchange_key = hmac.new(session_base_key, server_challenge + lm_challenge_response[:8]).digest() elif negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_LM_KEY: des_handler = des.DES(lm_hash[:7]) first_des = des_handler.encrypt(lm_challenge_response[:8]) des_handler = des.DES(lm_hash[7:8] + binascii.unhexlify()) second_des = des_handler.encrypt(lm_challenge_response[:8]) key_exchange_key = first_des + second_des elif negotiate_flags & NegotiateFlags.NTLMSSP_REQUEST_NON_NT_SESSION_KEY: key_exchange_key = lm_hash[:8] + b * 8 else: key_exchange_key = session_base_key return key_exchange_key
[MS-NLMP] v28.0 2016-07-14 4.3.5.1 KXKEY Calculates the Key Exchange Key for NTLMv1 authentication. Used for signing and sealing messages @param negotiate_flags: @param session_base_key: A session key calculated from the user password challenge @param server_challenge: A random 8-byte response generated by the server in the CHALLENGE_MESSAGE @param lm_challenge_response: The LmChallengeResponse value computed in ComputeResponse @param lm_hash: The LMOWF computed in Compute Response @return key_exchange_key: The Key Exchange Key (KXKEY) used to sign and seal messages and compute the ExportedSessionKey
10,942
def estimate_tau_exp(chains, **kwargs): rho = np.nan * np.ones(chains.shape[1:]) for i in range(chains.shape[2]): try: rho[:, i] = autocorr.function(np.mean(chains[:, :, i], axis=0), **kwargs) except: continue rho_max = np.max(rho, axis=1) x = np.arange(rho_max.size) func = lambda tau_exp: np.exp(-x/tau_exp) chi = lambda tau_exp: func(tau_exp[0]) - rho_max tau_exp, ier = leastsq(chi, [chains.shape[1]/2.]) return (tau_exp, rho, func(tau_exp))
Estimate the exponential auto-correlation time for all parameters in a chain.
10,943
async def setup_watchdog(self, cb, timeout): self._watchdog_timeout = timeout self._watchdog_cb = cb self._watchdog_task = self.loop.create_task(self._watchdog(timeout))
Trigger a reconnect after @timeout seconds of inactivity.
10,944
def unzoom_all(self,event=None,panel=None): if panel is None: panel = self.current_panel self.panels[panel].unzoom_all(event=event)
zoom out full data range
10,945
def RandomShuffle(a, seed): if seed: np.random.seed(seed) r = a.copy() np.random.shuffle(r) return r,
Random uniform op.
10,946
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0): local_buffer = utils.BytearrayStream() if self._object_type: self._object_type.write(local_buffer, kmip_version=kmip_version) else: raise exceptions.InvalidField( "The DeriveKey request payload is missing the object type " "field." ) if self._unique_identifiers: for unique_identifier in self._unique_identifiers: unique_identifier.write( local_buffer, kmip_version=kmip_version ) else: raise exceptions.InvalidField( "The DeriveKey request payload is missing the unique " "identifiers field." ) if self._derivation_method: self._derivation_method.write( local_buffer, kmip_version=kmip_version ) else: raise exceptions.InvalidField( "The DeriveKey request payload is missing the derivation " "method field." ) if self._derivation_parameters: self._derivation_parameters.write( local_buffer, kmip_version=kmip_version ) else: raise exceptions.InvalidField( "The DeriveKey request payload is missing the derivation " "parameters field." ) if kmip_version < enums.KMIPVersion.KMIP_2_0: if self._template_attribute: self._template_attribute.write( local_buffer, kmip_version=kmip_version ) else: raise exceptions.InvalidField( "The DeriveKey request payload is missing the template " "attribute field." ) else: if self._template_attribute: attrs = objects.convert_template_attribute_to_attributes( self._template_attribute ) attrs.write(local_buffer, kmip_version=kmip_version) else: raise exceptions.InvalidField( "The DeriveKey request payload is missing the template " "attribute field." ) self.length = local_buffer.length() super(DeriveKeyRequestPayload, self).write( output_buffer, kmip_version=kmip_version ) output_buffer.write(local_buffer.buffer)
Write the data encoding the DeriveKey request payload to a stream. Args: output_buffer (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if the data attribute is not defined.
10,947
def _report_self(self): key = "stats:kafka-monitor:self:{m}:{u}".format( m=socket.gethostname(), u=self.my_uuid) self.redis_conn.set(key, time.time()) self.redis_conn.expire(key, self.settings[])
Reports the kafka monitor uuid to redis
10,948
def _filter_pb(field_or_unary): if isinstance(field_or_unary, query_pb2.StructuredQuery.FieldFilter): return query_pb2.StructuredQuery.Filter(field_filter=field_or_unary) elif isinstance(field_or_unary, query_pb2.StructuredQuery.UnaryFilter): return query_pb2.StructuredQuery.Filter(unary_filter=field_or_unary) else: raise ValueError("Unexpected filter type", type(field_or_unary), field_or_unary)
Convert a specific protobuf filter to the generic filter type. Args: field_or_unary (Union[google.cloud.proto.firestore.v1beta1.\ query_pb2.StructuredQuery.FieldFilter, google.cloud.proto.\ firestore.v1beta1.query_pb2.StructuredQuery.FieldFilter]): A field or unary filter to convert to a generic filter. Returns: google.cloud.firestore_v1beta1.types.\ StructuredQuery.Filter: A "generic" filter. Raises: ValueError: If ``field_or_unary`` is not a field or unary filter.
10,949
def charge_balance(self): rNa+Cl-water return sum([zi*ci for zi, ci in zip(self.zs, self.charges)])
r'''Charge imbalance of the mixture, in units of [faraday]. Mixtures meeting the electroneutrality condition will have an imbalance of 0. Examples -------- >>> Mixture(['Na+', 'Cl-', 'water'], zs=[.01, .01, .98]).charge_balance 0.0
10,950
def read_message_from_pipe(pipe_handle): data = yield From(read_message_bytes_from_pipe(pipe_handle)) assert isinstance(data, bytes) raise Return(data.decode(, ))
(coroutine) Read message from this pipe. Return text.
10,951
def set_content_type(self): if self.object_name and not self.content_type: self.content_type, encoding = mimetypes.guess_type( self.object_name, strict=False)
Set the content type based on the file extension used in the object name.
10,952
def hide_busy(self): self.progress_bar.hide() self.parent.pbnNext.setEnabled(True) self.parent.pbnBack.setEnabled(True) self.parent.pbnCancel.setEnabled(True) self.parent.repaint() disable_busy_cursor()
Unlock buttons A helper function to indicate processing is done.
10,953
def execute_on_key_owner(self, key, task): check_not_none(key, "key can't be None") key_data = self._to_data(key) partition_id = self._client.partition_service.get_partition_id(key_data) uuid = self._get_uuid() return self._encode_invoke_on_partition(executor_service_submit_to_partition_codec, partition_id, uuid=uuid, callable=self._to_data(task), partition_id=partition_id)
Executes a task on the owner of the specified key. :param key: (object), the specified key. :param task: (Task), a task executed on the owner of the specified key. :return: (:class:`~hazelcast.future.Future`), future representing pending completion of the task.
10,954
def truncate(sequence): if len(sequence) > LIST_SLICE: return ", ".join(sequence[:LIST_SLICE] + ["..."]) else: return ", ".join(sequence)
Create a potentially shortened text display of a list. Parameters ---------- sequence : list An indexable sequence of elements. Returns ------- str The list as a formatted string.
10,955
def run_server(self, port): try: self.server = MultiThreadedHTTPServer((, port), Handler) except socket.error, e: logger.error(str(e)) sys.exit(1) logger.info("HTTP serve at http://0.0.0.0:%d (ctrl-c to stop) ..." % port) try: self.server.serve_forever() except KeyboardInterrupt: logger.info("^C received, shutting down server") self.shutdown_server()
run a server binding to port
10,956
def filter_taxa(fasta_path: , taxids: , unclassified: = False, discard: = False, warnings: = False): configure_warnings(warnings) records = SeqIO.parse(fasta_path, ) filtered_records = tictax.filter_taxa(records, map(int, taxids.split()), unclassified, discard) SeqIO.write(filtered_records, sys.stdout, )
Customisable filtering of tictax flavoured fasta files
10,957
def register_id(self, id, module): assert isinstance(id, basestring) assert isinstance(module, basestring) self.id2module[id] = module
Associate the given id with the given project module.
10,958
def delete(self, key): path = self.object_path(key) if os.path.exists(path): os.remove(path)
Removes the object named by `key`. Args: key: Key naming the object to remove.
10,959
def clear(self, *args): for field in self.fields_to_clear + list(args): setattr(self, field, None)
Set default values to **self.fields_to_clear**. In addition, it is possible to pass extra fields to clear. :param args: extra fields to clear.
10,960
def exists(self, symbol): if isinstance(symbol, str): sym = symbol elif isinstance(symbol, Symbol): sym = symbol.name syms = self.ses.query(Symbol).filter(Symbol.name == sym).all() if len(syms) == 0: return False else: return True
Checks to if a symbol exists, by name. Parameters ---------- symbol : str or Symbol Returns ------- bool
10,961
def ParseFileObject(self, parser_mediator, file_object): file_object.seek(0, os.SEEK_SET) header = file_object.read(2) if not self.BENCODE_RE.match(header): raise errors.UnableToParseFile() file_object.seek(0, os.SEEK_SET) try: data_object = bencode.bdecode(file_object.read()) except (IOError, bencode.BTFailure) as exception: raise errors.UnableToParseFile( .format( self.NAME, parser_mediator.GetDisplayName(), exception)) if not data_object: raise errors.UnableToParseFile( .format( self.NAME, parser_mediator.GetDisplayName())) for plugin in self._plugins: try: plugin.UpdateChainAndProcess(parser_mediator, data=data_object) except errors.WrongBencodePlugin as exception: logger.debug(.format( self.NAME, exception))
Parses a bencoded file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the file cannot be parsed.
10,962
def LDREX(cpu, dest, src, offset=None): cpu._LDR(dest, src, 32, False, offset)
LDREX loads data from memory. * If the physical address has the shared TLB attribute, LDREX tags the physical address as exclusive access for the current processor, and clears any exclusive access tag for this processor for any other physical address. * Otherwise, it tags the fact that the executing processor has an outstanding tagged physical address. :param Armv7Operand dest: the destination register; register :param Armv7Operand src: the source operand: register
10,963
def _create_field_vectors(self): field_vectors = {} term_idf_cache = {} for field_ref, term_frequencies in self.field_term_frequencies.items(): _field_ref = FieldRef.from_string(field_ref) field_name = _field_ref.field_name field_length = self.field_lengths[field_ref] field_vector = Vector() field_boost = self._fields[field_name].boost doc_boost = self._documents[_field_ref.doc_ref].get("boost", 1) for term, tf in term_frequencies.items(): term_index = self.inverted_index[term]["_index"] if term not in term_idf_cache: idf = Idf(self.inverted_index[term], self.document_count) term_idf_cache[term] = idf else: idf = term_idf_cache[term] score = ( idf * ((self._k1 + 1) * tf) / ( self._k1 * ( 1 - self._b + self._b * (field_length / self.average_field_length[field_name]) ) + tf ) ) score *= field_boost score *= doc_boost score_with_precision = round(score, 3) field_vector.insert(term_index, score_with_precision) field_vectors[field_ref] = field_vector self.field_vectors = field_vectors
Builds a vector space model of every document using lunr.Vector.
10,964
def p_systemcall_signed(self, p): p[0] = SystemCall(p[2], p[4], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
systemcall : DOLLER SIGNED LPAREN sysargs RPAREN
10,965
def setup_conf(conf_globals): project_path = abspath(join(dirname(conf_globals["__file__"]), "..")) chdir(project_path) sys.path.insert(0, project_path) authors_file = "AUTHORS" version = None author = None setup = "setup.py" setup_path = join(project_path, setup) ignore = (setup,) if exists(setup_path): try: import setuptools except ImportError: pass else: version = get_setup_attribute("version", setup_path) if version == "0.0.0": version = None author = get_setup_attribute("author", setup_path) if author == "UNKNOWN": author = None for name in listdir(project_path): path = join(project_path, name) if name.upper() == authors_file: with open(path, "r") as f: for line in f.readlines(): line = line.strip("*- \n\r\t") if line: author = decode_utf8(line) break elif name not in ignore and (isdir(path) or splitext(name)[1] == ".py"): try: module = __import__(name) except (ImportError, ValueError): continue if not version: version = get_version(module) if version and not author: try: author = decode_utf8(getattr(module, "__author__")) except AttributeError: pass settings = { "version": version, "release": version, "project": project_path.rstrip(sep).split(sep)[-1], "master_doc": "index", "copyright": "%s, %s" % (datetime.now().year, author), } pad = max([len(k) for k in settings.keys()]) + 3 print() print("sphinx-me using the following values:") print() print("\n".join([(k + ":").ljust(pad) + v for k, v in settings.items()])) print() conf_globals.update(settings)
Setup function that is called from within the project's docs/conf.py module that takes the conf module's globals() and assigns the values that can be automatically determined from the current project, such as project name, package name, version and author.
10,966
def lookup_rdap(self, hr=True, show_name=False, colorize=True, **kwargs): ret = self.obj.lookup_rdap(**kwargs) if script_args.json: output = json.dumps(ret) else: output = self.generate_output_header(query_type=) output += self.generate_output_asn( json_data=ret, hr=hr, show_name=show_name, colorize=colorize ) output += self.generate_output_newline(colorize=colorize) output += self.generate_output_entities( json_data=ret, hr=hr, show_name=show_name, colorize=colorize ) output += self.generate_output_newline(colorize=colorize) output += self.generate_output_network( json_data=ret, hr=hr, show_name=show_name, colorize=colorize ) output += self.generate_output_newline(colorize=colorize) output += self.generate_output_objects( json_data=ret, hr=hr, show_name=show_name, colorize=colorize ) output += self.generate_output_newline(colorize=colorize) if in ret: output += self.generate_output_nir( json_data=ret, hr=hr, show_name=show_name, colorize=colorize ) output += self.generate_output_newline(colorize=colorize) return output
The function for wrapping IPWhois.lookup_rdap() and generating formatted CLI output. Args: hr (:obj:`bool`): Enable human readable key translations. Defaults to True. show_name (:obj:`bool`): Show human readable name (default is to only show short). Defaults to False. colorize (:obj:`bool`): Colorize the console output with ANSI colors. Defaults to True. kwargs: Arguments to pass to IPWhois.lookup_rdap(). Returns: str: The generated output.
10,967
def update(self): stats = self.get_init_value() if self.input_method == : stats[] = platform.system() stats[] = platform.node() stats[] = platform.architecture()[0] if stats[] == "Linux": try: linux_distro = platform.linux_distribution() except AttributeError: stats[] = _linux_os_release() else: if linux_distro[0] == : stats[] = _linux_os_release() else: stats[] = .join(linux_distro[:2]) stats[] = platform.release() elif (stats[].endswith() or stats[] == ): stats[] = platform.release() elif stats[] == "Darwin": stats[] = platform.mac_ver()[0] elif stats[] == "Windows": os_version = platform.win32_ver() stats[] = .join(os_version[::2]) if stats[] == and in os.environ: stats[] = else: stats[] = "" if stats[] == "Linux": stats[] = stats[] else: stats[] = .format( stats[], stats[]) stats[] += .format(stats[]) elif self.input_method == : try: stats = self.get_stats_snmp( snmp_oid=snmp_oid[self.short_system_name]) except KeyError: stats = self.get_stats_snmp(snmp_oid=snmp_oid[]) stats[] = stats[] if self.short_system_name == : for r, v in iteritems(snmp_to_human[]): if re.search(r, stats[]): stats[] = v break stats[] = stats[] self.stats = stats return self.stats
Update the host/system info using the input method. Return the stats (dict)
10,968
def to_bytesize(value, default_unit=None, base=DEFAULT_BASE): if isinstance(value, (int, float)): return unitized(value, default_unit, base) if value is None: return None try: if value[-1].lower() == "b": value = value[:-1] unit = value[-1:].lower() if unit.isdigit(): unit = default_unit else: value = value[:-1] return unitized(to_number(float, value), unit, base) except (IndexError, TypeError, ValueError): return None
Convert `value` to bytes, accepts notations such as "4k" to mean 4096 bytes Args: value (str | unicode | int | None): Number of bytes optionally suffixed by a char from UNITS default_unit (str | unicode | None): Default unit to use for unqualified values base (int): Base to use (usually 1024) Returns: (int | None): Deduced bytesize value, if possible
10,969
def _to_dict(self): _dict = {} if hasattr(self, ) and self.available is not None: _dict[] = self.available if hasattr(self, ) and self.processing is not None: _dict[] = self.processing if hasattr(self, ) and self.failed is not None: _dict[] = self.failed if hasattr(self, ) and self.pending is not None: _dict[] = self.pending return _dict
Return a json dictionary representing this model.
10,970
def dist_eudex(src, tar, weights=, max_length=8): return Eudex().dist(src, tar, weights, max_length)
Return normalized Hamming distance between Eudex hashes of two terms. This is a wrapper for :py:meth:`Eudex.dist`. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison weights : str, iterable, or generator function The weights or weights generator function max_length : int The number of characters to encode as a eudex hash Returns ------- int The normalized Eudex Hamming distance Examples -------- >>> round(dist_eudex('cat', 'hat'), 12) 0.062745098039 >>> round(dist_eudex('Niall', 'Neil'), 12) 0.000980392157 >>> round(dist_eudex('Colin', 'Cuilen'), 12) 0.004901960784 >>> round(dist_eudex('ATCG', 'TAGC'), 12) 0.197549019608
10,971
def loadfile(args): mestate.console.write("Loading %s...\n" % args) t0 = time.time() mlog = mavutil.mavlink_connection(args, notimestamps=False, zero_time_base=False, progress_callback=progress_bar) mestate.filename = args mestate.mlog = mlog mestate.status.msgs = mlog.messages t1 = time.time() mestate.console.write("\ndone (%u messages in %.1fs)\n" % (mestate.mlog._count, t1-t0)) global flightmodes flightmodes = mlog.flightmode_list() load_graphs() setup_menus()
load a log file (path given by arg)
10,972
def fastcc_consistent_subset(model, epsilon, solver): reaction_set = set(model.reactions) return reaction_set.difference(fastcc(model, epsilon, solver))
Return consistent subset of model. The largest consistent subset is returned as a set of reaction names. Args: model: :class:`MetabolicModel` to solve. epsilon: Flux threshold value. solver: LP solver instance to use. Returns: Set of reaction IDs in the consistent reaction subset.
10,973
def update(self): con = self.subpars.pars.control self(con.tt+con.dttm)
Update |TTM| based on :math:`TTM = TT+DTTM`. >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> nmbzones(1) >>> zonetype(FIELD) >>> tt(1.0) >>> dttm(-2.0) >>> derived.ttm.update() >>> derived.ttm ttm(-1.0)
10,974
def sync(self): self.log.info() def log_success(result): self.log.info() return result def log_failure(failure): self.log.failure(, failure, LogLevel.error) return failure return (self.marathon_client.get_apps() .addCallback(self._apps_acme_domains) .addCallback(self._filter_new_domains) .addCallback(self._issue_certs) .addCallbacks(log_success, log_failure))
Fetch the list of apps from Marathon, find the domains that require certificates, and issue certificates for any domains that don't already have a certificate.
10,975
def _from_any_pb(pb_type, any_pb): msg = pb_type() if not any_pb.Unpack(msg): raise TypeError( "Could not convert {} to {}".format( any_pb.__class__.__name__, pb_type.__name__ ) ) return msg
Converts an Any protobuf to the specified message type Args: pb_type (type): the type of the message that any_pb stores an instance of. any_pb (google.protobuf.any_pb2.Any): the object to be converted. Returns: pb_type: An instance of the pb_type message. Raises: TypeError: if the message could not be converted.
10,976
def replace_all(expression: Expression, rules: Iterable[ReplacementRule], max_count: int=math.inf) \ -> Union[Expression, Sequence[Expression]]: rules = [ReplacementRule(pattern, replacement) for pattern, replacement in rules] expression = expression replaced = True replace_count = 0 while replaced and replace_count < max_count: replaced = False for subexpr, pos in preorder_iter_with_position(expression): for pattern, replacement in rules: try: subst = next(match(subexpr, pattern)) result = replacement(**subst) expression = replace(expression, pos, result) replaced = True break except StopIteration: pass if replaced: break replace_count += 1 return expression
Replace all occurrences of the patterns according to the replacement rules. A replacement rule consists of a *pattern*, that is matched against any subexpression of the expression. If a match is found, the *replacement* callback of the rule is called with the variables from the match substitution. Whatever the callback returns is used as a replacement for the matched subexpression. This can either be a single expression or a sequence of expressions, which is then integrated into the surrounding operation in place of the subexpression. Note that the pattern can therefore not be a single sequence variable/wildcard, because only single expressions will be matched. Args: expression: The expression to which the replacement rules are applied. rules: A collection of replacement rules that are applied to the expression. max_count: If given, at most *max_count* applications of the rules are performed. Otherwise, the rules are applied until there is no more match. If the set of replacement rules is not confluent, the replacement might not terminate without a *max_count* set. Returns: The resulting expression after the application of the replacement rules. This can also be a sequence of expressions, if the root expression is replaced with a sequence of expressions by a rule.
10,977
def _get_py_dictionary(self, var, names=None, used___dict__=False): filter_private = False filter_special = True filter_function = True filter_builtin = True if not names: names, used___dict__ = self.get_names(var) d = {} if filter_builtin or filter_function: for name in names: try: name_as_str = name if name_as_str.__class__ != str: name_as_str = % (name_as_str,) if filter_special: if name_as_str.startswith() and name_as_str.endswith(): continue if filter_private: if name_as_str.startswith() or name_as_str.endswith(): continue if not used___dict__: attr = getattr(var, name) else: attr = var.__dict__[name] if filter_builtin: if inspect.isbuiltin(attr): continue if filter_function: if inspect.isroutine(attr) or isinstance(attr, MethodWrapperType): continue except: strIO = StringIO.StringIO() traceback.print_exc(file=strIO) attr = strIO.getvalue() d[name_as_str] = attr return d, used___dict__
:return tuple(names, used___dict__), where used___dict__ means we have to access using obj.__dict__[name] instead of getattr(obj, name)
10,978
def batch_to_ids(batch: List[List[str]]) -> torch.Tensor: instances = [] indexer = ELMoTokenCharactersIndexer() for sentence in batch: tokens = [Token(token) for token in sentence] field = TextField(tokens, {: indexer}) instance = Instance({"elmo": field}) instances.append(instance) dataset = Batch(instances) vocab = Vocabulary() dataset.index_instances(vocab) return dataset.as_tensor_dict()[][]
Converts a batch of tokenized sentences to a tensor representing the sentences with encoded characters (len(batch), max sentence length, max word length). Parameters ---------- batch : ``List[List[str]]``, required A list of tokenized sentences. Returns ------- A tensor of padded character ids.
10,979
def show_tree(model=None): if model is None: model = mx.cur_model() view = get_modeltree(model) app = QApplication.instance() if not app: raise RuntimeError("QApplication does not exist.") view.show() app.exec_()
Display the model tree window. Args: model: :class:`Model <modelx.core.model.Model>` object. Defaults to the current model. Warnings: For this function to work with Spyder, *Graphics backend* option of Spyder must be set to *inline*.
10,980
def parse_phones(self): phones = [] for i in self.phone_intervals: start = float(i[i.index()+7: i.index()+12].strip().strip()) end = float(i[i.index()+7: i.index()+12].strip().strip()) phone = i[i.index()+1:i.index("$")] phones.append(Phone(phone, start, end)) return phones
Parse TextGrid phone intervals. This method parses the phone intervals in a TextGrid to extract each phone and each phone's start and end times in the audio recording. For each phone, it instantiates the class Phone(), with the phone and its start and end times as attributes of that class instance.
10,981
def resize(self, width: int, height: int): self.width = width // self.widget.devicePixelRatio() self.height = height // self.widget.devicePixelRatio() self.buffer_width = width self.buffer_height = height if self.ctx: self.set_default_viewport() super().resize(self.buffer_width, self.buffer_height)
Replacement for Qt's resizeGL method.
10,982
def get_url(access_token, endpoint=ams_rest_endpoint, flag=True): return do_ams_get_url(endpoint, access_token, flag)
Get Media Services Final Endpoint URL. Args: access_token (str): A valid Azure authentication token. endpoint (str): Azure Media Services Initial Endpoint. flag (bol): flag. Returns: HTTP response. JSON body.
10,983
def CA_code_header(fname_out, Nca): dir_path = os.path.dirname(os.path.realpath(__file__)) ca = loadtxt(dir_path + , dtype=int16, usecols=(Nca - 1,), unpack=True) M = 1023 N = 23 Sca = + str(Nca) f = open(fname_out, ) f.write() f.write() f.write() f.write( % M) f.write() f.write(); f.write( \ % Nca); f.write( % Nca) kk = 0; for k in range(M): if (kk < N - 1) and (k < M - 1): f.write( % ca[k]) kk += 1 elif (kk == N - 1) & (k < M - 1): f.write( % ca[k]) if k < M: if Nca < 10: f.write() else: f.write() kk = 0 else: f.write( % ca[k]) f.write() f.write() f.close()
Write 1023 bit CA (Gold) Code Header Files Mark Wickert February 2015
10,984
def environment_as(**kwargs): new_environment = kwargs old_environment = {} def setenv(key, val): if val is not None: os.environ[key] = val if PY3 else _os_encode(val) else: if key in os.environ: del os.environ[key] for key, val in new_environment.items(): old_environment[key] = os.environ.get(key) setenv(key, val) try: yield finally: for key, val in old_environment.items(): setenv(key, val)
Update the environment to the supplied values, for example: with environment_as(PYTHONPATH='foo:bar:baz', PYTHON='/usr/bin/python2.7'): subprocess.Popen(foo).wait()
10,985
def shout(self, group, msg_p): return lib.zyre_shout(self._as_parameter_, group, byref(czmq.zmsg_p.from_param(msg_p)))
Send message to a named group Destroys message after sending
10,986
def action(callback=None, name=None, path=None, methods=Method.GET, resource=None, tags=None, summary=None, middleware=None): path = path or if name: path += name def inner(c): return Operation(c, path, methods, resource, tags, summary, middleware) return inner(callback) if callback else inner
Decorator to apply an action to a resource. An action is applied to a `detail` operation.
10,987
def fully_correlated_conditional(Kmn, Kmm, Knn, f, *, full_cov=False, full_output_cov=False, q_sqrt=None, white=False): m, v = fully_correlated_conditional_repeat(Kmn, Kmm, Knn, f, full_cov=full_cov, full_output_cov=full_output_cov, q_sqrt=q_sqrt, white=white) return m[0, ...], v[0, ...]
This function handles conditioning of multi-output GPs in the case where the conditioning points are all fully correlated, in both the prior and posterior. :param Kmn: LM x N x P :param Kmm: LM x LM :param Knn: N x P or N x P x N x P :param f: data matrix, LM x 1 :param q_sqrt: 1 x LM x LM or 1 x ML :param full_cov: calculate covariance between inputs :param full_output_cov: calculate covariance between outputs :param white: use whitened representation :return: - mean: N x P - variance: N x P, N x P x P, P x N x N, N x P x N x P
10,988
def encode_basestring(s): if isinstance(s, str) and HAS_UTF8.search(s) is not None: s = s.decode() def replace(match): return ESCAPE_DCT[match.group(0)] return u + ESCAPE.sub(replace, s) + u
Return a JSON representation of a Python string
10,989
def _construct_axes_dict(self, axes=None, **kwargs): d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)} d.update(kwargs) return d
Return an axes dictionary for myself.
10,990
def recipe_weinreb17(adata, log=True, mean_threshold=0.01, cv_threshold=2, n_pcs=50, svd_solver=, random_state=0, copy=False): from scipy.sparse import issparse if issparse(adata.X): raise ValueError() if copy: adata = adata.copy() if log: pp.log1p(adata) adata.X = pp.normalize_per_cell_weinreb16_deprecated(adata.X, max_fraction=0.05, mult_with_mean=True) gene_subset = filter_genes_cv_deprecated(adata.X, mean_threshold, cv_threshold) adata._inplace_subset_var(gene_subset) X_pca = pp.pca(pp.zscore_deprecated(adata.X), n_comps=n_pcs, svd_solver=svd_solver, random_state=random_state) adata.obsm[] = X_pca return adata if copy else None
Normalization and filtering as of [Weinreb17]_. Expects non-logarithmized data. If using logarithmized data, pass `log=False`. Parameters ---------- adata : :class:`~anndata.AnnData` Annotated data matrix. copy : bool (default: False) Return a copy if true.
10,991
def create_mssql_pymssql(username, password, host, port, database, **kwargs): return create_engine( _create_mssql_pymssql(username, password, host, port, database), **kwargs )
create an engine connected to a mssql database using pymssql.
10,992
def _init_read_gz(self): self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS) self.dbuf = b"" if self.__read(2) != b"\037\213": raise ReadError("not a gzip file") if self.__read(1) != b"\010": raise CompressionError("unsupported compression method") flag = ord(self.__read(1)) self.__read(6) if flag & 4: xlen = ord(self.__read(1)) + 256 * ord(self.__read(1)) self.read(xlen) if flag & 8: while True: s = self.__read(1) if not s or s == NUL: break if flag & 16: while True: s = self.__read(1) if not s or s == NUL: break if flag & 2: self.__read(2)
Initialize for reading a gzip compressed fileobj.
10,993
def save_parameters(path, params=None): _, ext = os.path.splitext(path) params = get_parameters(grad_only=False) if params is None else params if ext == : import warnings warnings.simplefilter(, category=FutureWarning) import h5py with h5py.File(path, ) as hd: for i, (k, v) in enumerate(iteritems(params)): hd[k] = v.d hd[k].attrs[] = v.need_grad hd[k].attrs[] = i elif ext == : proto = nnabla_pb2.NNablaProtoBuf() for variable_name, variable in params.items(): parameter = proto.parameter.add() parameter.variable_name = variable_name parameter.shape.dim.extend(variable.shape) parameter.data.extend(numpy.array(variable.d).flatten().tolist()) parameter.need_grad = variable.need_grad with open(path, "wb") as f: f.write(proto.SerializeToString()) else: logger.critical() assert False logger.info("Parameter save ({}): {}".format(ext, path))
Save all parameters into a file with the specified format. Currently hdf5 and protobuf formats are supported. Args: path : path or file object params (dict, optional): Parameters to be saved. Dictionary is of a parameter name (:obj:`str`) to :obj:`~nnabla.Variable`.
10,994
def approxSimilarityJoin(self, datasetA, datasetB, threshold, distCol="distCol"): threshold = TypeConverters.toFloat(threshold) return self._call_java("approxSimilarityJoin", datasetA, datasetB, threshold, distCol)
Join two datasets to approximately find all pairs of rows whose distance are smaller than the threshold. If the :py:attr:`outputCol` is missing, the method will transform the data; if the :py:attr:`outputCol` exists, it will use that. This allows caching of the transformed data when necessary. :param datasetA: One of the datasets to join. :param datasetB: Another dataset to join. :param threshold: The threshold for the distance of row pairs. :param distCol: Output column for storing the distance between each pair of rows. Use "distCol" as default value if it's not specified. :return: A joined dataset containing pairs of rows. The original rows are in columns "datasetA" and "datasetB", and a column "distCol" is added to show the distance between each pair.
10,995
def reboot(name, **kwargs): * conn = __get_conn(**kwargs) ret = _get_domain(conn, name).reboot(libvirt.VIR_DOMAIN_REBOOT_DEFAULT) == 0 conn.close() return ret
Reboot a domain via ACPI request :param vm_: domain name :param connection: libvirt connection URI, overriding defaults .. versionadded:: 2019.2.0 :param username: username to connect with, overriding defaults .. versionadded:: 2019.2.0 :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.reboot <domain>
10,996
def setup(name, path=, enable_debug=False): path_tmpl = os.path.join(path, ) info = path_tmpl.format(name=name, level=) warn = path_tmpl.format(name=name, level=) err = path_tmpl.format(name=name, level=) crit = path_tmpl.format(name=name, level=) setup = [ NullHandler(), TimedRotatingFileHandler(info, level=, encoding=, date_format=), TimedRotatingFileHandler(warn, level=, encoding=, date_format=), TimedRotatingFileHandler(err, level=, encoding=, date_format=), TimedRotatingFileHandler(crit, level=, encoding=, date_format=), ] if enable_debug: debug = path_tmpl.format(name=name, level=) setup.insert(1, TimedRotatingFileHandler(debug, level=, encoding=, date_format=)) if src_server is not None and smtp_server is not None \ and smtp_port != 0 and len(dest_mails) != 0: mail_tmpl = from_mail = mail_tmpl.format(name=name, src=src_server) subject = .format(name) setup.append(MailHandler(from_mail, dest_mails, subject, level=, bubble=True, server_addr=(smtp_server, smtp_port))) return NestedSetup(setup)
Prepare a NestedSetup. :param name: the channel name :param path: the path where the logs will be written :param enable_debug: do we want to save the message at the DEBUG level :return a nested Setup
10,997
def get(self, request, bot_id, id, format=None): return super(KikBotDetail, self).get(request, bot_id, id, format)
Get KikBot by id --- serializer: KikBotSerializer responseMessages: - code: 401 message: Not authenticated
10,998
def extract_python_dict_from_x509(x509): result = { "subject": ( (("commonName", x509.get_subject().commonName),), ) } for ext_idx in range(x509.get_extension_count()): ext = x509.get_extension(ext_idx) sn = ext.get_short_name() if sn != b"subjectAltName": continue data = pyasn1.codec.der.decoder.decode( ext.get_data(), asn1Spec=pyasn1_modules.rfc2459.SubjectAltName())[0] for name in data: dNSName = name.getComponentByPosition(2) if dNSName is None: continue if hasattr(dNSName, "isValue") and not dNSName.isValue: continue result.setdefault("subjectAltName", []).append( ("DNS", str(dNSName)) ) return result
Extract a python dictionary similar to the return value of :meth:`ssl.SSLSocket.getpeercert` from the given :class:`OpenSSL.crypto.X509` `x509` object. Note that by far not all attributes are included; only those required to use :func:`ssl.match_hostname` are extracted and put in the result. In the future, more attributes may be added.
10,999
def ax(self): if not hasattr(self, "_ax") or self._ax is None: self._ax = plt.gca() return self._ax
The matplotlib axes that the visualizer draws upon (can also be a grid of multiple axes objects). The visualizer automatically creates an axes for the user if one has not been specified.