code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def get_introspection_data(cls, tax_benefit_system): """ Get instrospection data about the code of the variable. :returns: (comments, source file path, source code, start line number) :rtype: tuple """ comments = inspect.getcomments(cls) # Handle dynamically generated variable classes or Jupyter Notebooks, which have no source. try: absolute_file_path = inspect.getsourcefile(cls) except TypeError: source_file_path = None else: source_file_path = absolute_file_path.replace(tax_benefit_system.get_package_metadata()['location'], '') try: source_lines, start_line_number = inspect.getsourcelines(cls) source_code = textwrap.dedent(''.join(source_lines)) except (IOError, TypeError): source_code, start_line_number = None, None return comments, source_file_path, source_code, start_line_number
Get instrospection data about the code of the variable. :returns: (comments, source file path, source code, start line number) :rtype: tuple
def _extract_object_params(self, name): """ Extract object params, return as dict """ params = self.request.query_params.lists() params_map = {} prefix = name[:-1] offset = len(prefix) for name, value in params: if name.startswith(prefix): if name.endswith('}'): name = name[offset:-1] elif name.endswith('}[]'): # strip off trailing [] # this fixes an Ember queryparams issue name = name[offset:-3] else: # malformed argument like: # filter{foo=bar raise exceptions.ParseError( '"%s" is not a well-formed filter key.' % name ) else: continue params_map[name] = value return params_map
Extract object params, return as dict
def required_from_env(key): """ Retrieve a required variable from the current environment variables. Raises a ValueError if the env variable is not found or has no value. """ val = os.environ.get(key) if not val: raise ValueError( "Required argument '{}' not supplied and not found in environment variables".format(key)) return val
Retrieve a required variable from the current environment variables. Raises a ValueError if the env variable is not found or has no value.
def system_qos_qos_service_policy_direction(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") system_qos = ET.SubElement(config, "system-qos", xmlns="urn:brocade.com:mgmt:brocade-policer") qos = ET.SubElement(system_qos, "qos") service_policy = ET.SubElement(qos, "service-policy") policy_map_name_key = ET.SubElement(service_policy, "policy-map-name") policy_map_name_key.text = kwargs.pop('policy_map_name') direction = ET.SubElement(service_policy, "direction") direction.text = kwargs.pop('direction') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def clique(graph, id): """ Returns the largest possible clique for the node with given id. """ clique = [id] for n in graph.nodes: friend = True for id in clique: if n.id == id or graph.edge(n.id, id) == None: friend = False break if friend: clique.append(n.id) return clique
Returns the largest possible clique for the node with given id.
def status(cwd, opts=None, user=None): ''' Show changed files of the given repository cwd The path to the Mercurial repository opts : None Any additional options to add to the command line user : None Run hg as a user other than what the minion runs as CLI Example: .. code-block:: bash salt '*' hg.status /path/to/repo ''' def _status(cwd): cmd = ['hg', 'status'] if opts: for opt in opts.split(): cmd.append('{0}'.format(opt)) out = __salt__['cmd.run_stdout']( cmd, cwd=cwd, runas=user, python_shell=False) types = { 'M': 'modified', 'A': 'added', 'R': 'removed', 'C': 'clean', '!': 'missing', '?': 'not tracked', 'I': 'ignored', ' ': 'origin of the previous file', } ret = {} for line in out.splitlines(): t, f = types[line[0]], line[2:] if t not in ret: ret[t] = [] ret[t].append(f) return ret if salt.utils.data.is_iter(cwd): return dict((cwd, _status(cwd)) for cwd in cwd) else: return _status(cwd)
Show changed files of the given repository cwd The path to the Mercurial repository opts : None Any additional options to add to the command line user : None Run hg as a user other than what the minion runs as CLI Example: .. code-block:: bash salt '*' hg.status /path/to/repo
def getitem_es(self, key): """ Override to support ACL filtering. To do so: passes `self.request` to `get_item` and uses `ACLFilterES`. """ from nefertari_guards.elasticsearch import ACLFilterES es = ACLFilterES(self.item_model.__name__) params = { 'id': key, 'request': self.request, } obj = es.get_item(**params) obj.__acl__ = self.item_acl(obj) obj.__parent__ = self obj.__name__ = key return obj
Override to support ACL filtering. To do so: passes `self.request` to `get_item` and uses `ACLFilterES`.
def set(self, safe_len=False, **kwds): '''Set one or more attributes. ''' if kwds: # double check d = self.kwds() d.update(kwds) self.reset(**d) if safe_len and self.item: self.leng = _len
Set one or more attributes.
def real(self): ''' Get realtime data :rtype: dict :returns: 代碼可以參考:http://goristock.appspot.com/API#apiweight ''' result = self.__raw['1'].copy() result['c'] = self.__raw['1']['value'] result['value'] = self.__raw['200']['v2'] result['date'] = self.__raw['0']['time'] return result
Get realtime data :rtype: dict :returns: 代碼可以參考:http://goristock.appspot.com/API#apiweight
def get_single(decl_matcher, decls, recursive=True): """ Returns a reference to declaration, that match `decl_matcher` defined criteria. If a unique declaration could not be found, an appropriate exception will be raised. :param decl_matcher: Python callable object, that takes one argument - reference to a declaration :param decls: the search scope, :class:declaration_t object or :class:declaration_t objects list t :param recursive: boolean, if True, the method will run `decl_matcher` on the internal declarations too """ answer = matcher.find(decl_matcher, decls, recursive) if len(answer) == 1: return answer[0] elif not answer: raise runtime_errors.declaration_not_found_t(decl_matcher) else: raise runtime_errors.multiple_declarations_found_t(decl_matcher)
Returns a reference to declaration, that match `decl_matcher` defined criteria. If a unique declaration could not be found, an appropriate exception will be raised. :param decl_matcher: Python callable object, that takes one argument - reference to a declaration :param decls: the search scope, :class:declaration_t object or :class:declaration_t objects list t :param recursive: boolean, if True, the method will run `decl_matcher` on the internal declarations too
def location(self, tag=None, fromdate=None, todate=None): """ Gets an overview of which part of the email links were clicked from (HTML or Text). This is only recorded when Link Tracking is enabled for that email. """ return self.call("GET", "/stats/outbound/clicks/location", tag=tag, fromdate=fromdate, todate=todate)
Gets an overview of which part of the email links were clicked from (HTML or Text). This is only recorded when Link Tracking is enabled for that email.
def decode_response(status: int, headers: MutableMapping, body: bytes) -> dict: """ Decode incoming response Args: status: Response status headers: Response headers body: Response body Returns: Response data """ data = decode_body(headers, body) raise_for_status(status, headers, data) raise_for_api_error(headers, data) return data
Decode incoming response Args: status: Response status headers: Response headers body: Response body Returns: Response data
def update_sql(table, filter, updates): ''' >>> update_sql('tbl', {'foo': 'a', 'bar': 1}, {'bar': 2, 'baz': 'b'}) ('UPDATE tbl SET bar=$1, baz=$2 WHERE bar=$3 AND foo=$4', [2, 'b', 1, 'a']) ''' where_keys, where_vals = _split_dict(filter) up_keys, up_vals = _split_dict(updates) changes = _pairs(up_keys, sep=', ') where = _pairs(where_keys, start=len(up_keys) + 1) sql = 'UPDATE {} SET {} WHERE {}'.format( table, changes, where) return sql, up_vals + where_vals
>>> update_sql('tbl', {'foo': 'a', 'bar': 1}, {'bar': 2, 'baz': 'b'}) ('UPDATE tbl SET bar=$1, baz=$2 WHERE bar=$3 AND foo=$4', [2, 'b', 1, 'a'])
def add_factuality(self,my_fact): """ Adds a factuality to the factuality layer @type my_fact: L{Cfactuality} @param my_fact: factuality object """ if self.factuality_layer is None: self.factuality_layer = Cfactualities() self.root.append(self.factuality_layer.get_node()) self.factuality_layer.add_factuality(my_fact)
Adds a factuality to the factuality layer @type my_fact: L{Cfactuality} @param my_fact: factuality object
def convert_identifiers(self, identifiers: Union[Identifier, List[Identifier]]): """ Convert an individual :class:`Identifier` to a model instance, or a list of Identifiers to a list of model instances. """ if not identifiers: return identifiers def _create_or_update(identifier): data = self._cache[identifier.key] return self.factory.create_or_update(identifier, data)[0] if isinstance(identifiers, Identifier): return _create_or_update(identifiers) elif isinstance(identifiers, list) and isinstance(identifiers[0], Identifier): return [_create_or_update(identifier) for identifier in identifiers] else: raise TypeError('`identifiers` must be an Identifier or list of Identifiers.')
Convert an individual :class:`Identifier` to a model instance, or a list of Identifiers to a list of model instances.
def dump(self): """Output all recorded metrics""" with self.lock: atexit.unregister(self.dump) self.fh.close()
Output all recorded metrics
def eval_constraints(self, constraints): """Returns whether the constraints is satisfied trivially by using the last model.""" # eval_ast is concretizing symbols and evaluating them, this can raise # exceptions. try: return all(self.eval_ast(c) for c in constraints) except errors.ClaripyZeroDivisionError: return False
Returns whether the constraints is satisfied trivially by using the last model.
def description(self): """ Cursor description, see http://legacy.python.org/dev/peps/pep-0249/#description """ if self._session is None: return None res = self._session.res_info if res: return res.description else: return None
Cursor description, see http://legacy.python.org/dev/peps/pep-0249/#description
def regular_to_sparse_from_sparse_mappings(regular_to_unmasked_sparse, unmasked_sparse_to_sparse): """Using the mapping between the regular-grid and unmasked pixelization grid, compute the mapping between each regular pixel and the masked pixelization grid. Parameters ----------- regular_to_unmasked_sparse : ndarray The index mapping between every regular-pixel and masked pixelization pixel. unmasked_sparse_to_sparse : ndarray The index mapping between every masked pixelization pixel and unmasked pixelization pixel. """ total_regular_pixels = regular_to_unmasked_sparse.shape[0] regular_to_sparse = np.zeros(total_regular_pixels) for regular_index in range(total_regular_pixels): # print(regular_index, regular_to_unmasked_sparse[regular_index], unmasked_sparse_to_sparse.shape[0]) regular_to_sparse[regular_index] = unmasked_sparse_to_sparse[regular_to_unmasked_sparse[regular_index]] return regular_to_sparse
Using the mapping between the regular-grid and unmasked pixelization grid, compute the mapping between each regular pixel and the masked pixelization grid. Parameters ----------- regular_to_unmasked_sparse : ndarray The index mapping between every regular-pixel and masked pixelization pixel. unmasked_sparse_to_sparse : ndarray The index mapping between every masked pixelization pixel and unmasked pixelization pixel.
def get_oembed(self, url): """Return an oEmbed format json dictionary :param url: Image page URL (ex. http://gyazo.com/xxxxx) """ api_url = self.api_url + '/api/oembed' parameters = { 'url': url } response = self._request_url(api_url, 'get', params=parameters) headers, result = self._parse_and_check(response) return result
Return an oEmbed format json dictionary :param url: Image page URL (ex. http://gyazo.com/xxxxx)
def disease(self, identifier=None, ref_id=None, ref_type=None, name=None, acronym=None, description=None, entry_name=None, limit=None, as_df=False ): """Method to query :class:`.models.Disease` objects in database :param identifier: disease UniProt identifier(s) :type identifier: str or tuple(str) or None :param ref_id: identifier(s) of referenced database :type ref_id: str or tuple(str) or None :param ref_type: database name(s) :type ref_type: str or tuple(str) or None :param name: disease name(s) :type name: str or tuple(str) or None :param acronym: disease acronym(s) :type acronym: str or tuple(str) or None :param description: disease description(s) :type description: str or tuple(str) or None :param entry_name: name(s) in :class:`.models.Entry` :type entry_name: str or tuple(str) or None :param limit: - if `isinstance(limit,int)==True` -> limit - if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page) - if limit == None -> all results :type limit: int or tuple(int) or None :param bool as_df: if `True` results are returned as :class:`pandas.DataFrame` :return: - if `as_df == False` -> list(:class:`.models.Disease`) - if `as_df == True` -> :class:`pandas.DataFrame` :rtype: list(:class:`.models.Disease`) or :class:`pandas.DataFrame` """ q = self.session.query(models.Disease) model_queries_config = ( (identifier, models.Disease.identifier), (ref_id, models.Disease.ref_id), (ref_type, models.Disease.ref_type), (name, models.Disease.name), (acronym, models.Disease.acronym), (description, models.Disease.description) ) q = self.get_model_queries(q, model_queries_config) if entry_name: q = q.session.query(models.Disease).join(models.DiseaseComment).join(models.Entry) if isinstance(entry_name, str): q = q.filter(models.Entry.name == entry_name) elif isinstance(entry_name, Iterable): q = q.filter(models.Entry.name.in_(entry_name)) return self._limit_and_df(q, limit, as_df)
Method to query :class:`.models.Disease` objects in database :param identifier: disease UniProt identifier(s) :type identifier: str or tuple(str) or None :param ref_id: identifier(s) of referenced database :type ref_id: str or tuple(str) or None :param ref_type: database name(s) :type ref_type: str or tuple(str) or None :param name: disease name(s) :type name: str or tuple(str) or None :param acronym: disease acronym(s) :type acronym: str or tuple(str) or None :param description: disease description(s) :type description: str or tuple(str) or None :param entry_name: name(s) in :class:`.models.Entry` :type entry_name: str or tuple(str) or None :param limit: - if `isinstance(limit,int)==True` -> limit - if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page) - if limit == None -> all results :type limit: int or tuple(int) or None :param bool as_df: if `True` results are returned as :class:`pandas.DataFrame` :return: - if `as_df == False` -> list(:class:`.models.Disease`) - if `as_df == True` -> :class:`pandas.DataFrame` :rtype: list(:class:`.models.Disease`) or :class:`pandas.DataFrame`
def del_option_by_number(self, number): """ Delete an option from the message by number :type number: Integer :param number: option naumber """ for o in list(self._options): assert isinstance(o, Option) if o.number == number: self._options.remove(o)
Delete an option from the message by number :type number: Integer :param number: option naumber
def get_form_label(self, request=None, obj=None, model=None, form=None): """Returns a customized form label, if condition is met, otherwise returns the default form label. * condition is an instance of CustomLabelCondition. """ label = form.base_fields[self.field].label condition = self.condition_cls(request=request, obj=obj, model=model) if condition.check(): additional_opts = condition.get_additional_options( request=request, obj=obj, model=model ) visit_datetime = "" if obj: visit_datetime = getattr( obj, obj.visit_model_attr() ).report_datetime.strftime("%B %Y") try: label = self.custom_label.format( appointment=condition.appointment, previous_appointment=condition.previous_appointment, previous_obj=condition.previous_obj, previous_visit=condition.previous_visit, visit_datetime=visit_datetime, **additional_opts, ) except KeyError as e: raise CustomFormLabelError( f"Custom label template has invalid keys. See {label}. Got {e}." ) return label
Returns a customized form label, if condition is met, otherwise returns the default form label. * condition is an instance of CustomLabelCondition.
def cwt(ts, freqs=np.logspace(0, 2), wavelet=cwtmorlet, plot=True): """Continuous wavelet transform Note the full results can use a huge amount of memory at 64-bit precision Args: ts: Timeseries of m variables, shape (n, m). Assumed constant timestep. freqs: list of frequencies (in Hz) to use for the tranform. (default is 50 frequency bins logarithmic from 1Hz to 100Hz) wavelet: the wavelet to use. may be complex. see scipy.signal.wavelets plot: whether to plot time-resolved power spectrum Returns: coefs: Continuous wavelet transform output array, shape (n,len(freqs),m) """ orig_ndim = ts.ndim if ts.ndim is 1: ts = ts[:, np.newaxis] channels = ts.shape[1] fs = (len(ts) - 1.0) / (1.0*ts.tspan[-1] - ts.tspan[0]) x = signal.detrend(ts, axis=0) dtype = wavelet(fs/freqs[0], fs/freqs[0]).dtype coefs = np.zeros((len(ts), len(freqs), channels), dtype) for i in range(channels): coefs[:, :, i] = roughcwt(x[:, i], cwtmorlet, fs/freqs).T if plot: _plot_cwt(ts, coefs, freqs) if orig_ndim is 1: coefs = coefs[:, :, 0] return coefs
Continuous wavelet transform Note the full results can use a huge amount of memory at 64-bit precision Args: ts: Timeseries of m variables, shape (n, m). Assumed constant timestep. freqs: list of frequencies (in Hz) to use for the tranform. (default is 50 frequency bins logarithmic from 1Hz to 100Hz) wavelet: the wavelet to use. may be complex. see scipy.signal.wavelets plot: whether to plot time-resolved power spectrum Returns: coefs: Continuous wavelet transform output array, shape (n,len(freqs),m)
def perimeter(patch, world_size=(60, 60), neighbor_func=get_rook_neighbors_toroidal): """ Count cell faces in patch that do not connect to part of patch. This preserves various square geometry features that would not be preserved by merely counting the number of cells that touch an edge. """ edge = 0 patch = set([tuple(i) for i in patch]) for cell in patch: neighbors = neighbor_func(cell, world_size) neighbors = [n for n in neighbors if n not in patch] edge += len(neighbors) return edge
Count cell faces in patch that do not connect to part of patch. This preserves various square geometry features that would not be preserved by merely counting the number of cells that touch an edge.
def unplug(self): '''Remove the actor's methods from the callback registry.''' if not self.__plugged: return members = set([method for _, method in inspect.getmembers(self, predicate=inspect.ismethod)]) for message in global_callbacks: global_callbacks[message] -= members self.__plugged = False
Remove the actor's methods from the callback registry.
def get_regular_expressions(taxonomy_name, rebuild=False, no_cache=False): """Return a list of patterns compiled from the RDF/SKOS ontology. Uses cache if it exists and if the taxonomy hasn't changed. """ # Translate the ontology name into a local path. Check if the name # relates to an existing ontology. onto_name, onto_path, onto_url = _get_ontology(taxonomy_name) if not onto_path: raise TaxonomyError("Unable to locate the taxonomy: '%s'." % taxonomy_name) cache_path = _get_cache_path(onto_name) current_app.logger.debug( 'Taxonomy discovered, now we load it ' '(from cache: %s, onto_path: %s, cache_path: %s)' % (not no_cache, onto_path, cache_path) ) if os.access(cache_path, os.R_OK): if os.access(onto_path, os.R_OK): if rebuild or no_cache: current_app.logger.debug( "Cache generation was manually forced.") return _build_cache(onto_path, skip_cache=no_cache) else: # ontology file not found. Use the cache instead. current_app.logger.warning( "The ontology couldn't be located. However " "a cached version of it is available. Using it as a " "reference." ) return _get_cache(cache_path, source_file=onto_path) if (os.path.getmtime(cache_path) > os.path.getmtime(onto_path)): # Cache is more recent than the ontology: use cache. current_app.logger.debug( "Normal situation, cache is older than ontology," " so we load it from cache" ) return _get_cache(cache_path, source_file=onto_path) else: # Ontology is more recent than the cache: rebuild cache. current_app.logger.warning( "Cache '%s' is older than '%s'. " "We will rebuild the cache" % (cache_path, onto_path) ) return _build_cache(onto_path, skip_cache=no_cache) elif os.access(onto_path, os.R_OK): if not no_cache and\ os.path.exists(cache_path) and\ not os.access(cache_path, os.W_OK): raise TaxonomyError('We cannot read/write into: %s. ' 'Aborting!' % cache_path) elif not no_cache and os.path.exists(cache_path): current_app.logger.warning( 'Cache %s exists, but is not readable!' % cache_path) current_app.logger.info( "Cache not available. Building it now: %s" % onto_path) return _build_cache(onto_path, skip_cache=no_cache) else: raise TaxonomyError("We miss both source and cache" " of the taxonomy: %s" % taxonomy_name)
Return a list of patterns compiled from the RDF/SKOS ontology. Uses cache if it exists and if the taxonomy hasn't changed.
def run_segment_operation(outdoc, filenames, segments, use_segment_table, operation, result_name = 'RESULT', preserve = True): """ Performs an operation (intersect or union) across a set of segments. That is, given a set of files each with segment definers DMT-FLAG1, DMT-FLAG2 etc and a list of segments DMT-FLAG1,DMT-FLAG1 this returns RESULT = (table 1's DMT-FLAG1 union table 2's DMT-FLAG1 union ...) operation (table 1's DMT-FLAG2 union table 2's DMT-FLAG2 union ...) operation etc """ proc_id = table.get_table(outdoc, lsctables.ProcessTable.tableName)[0].process_id if preserve: indoc = ligolw_add.ligolw_add(outdoc, filenames) else: indoc = ligolw_add.ligolw_add(ligolw.Document(), filenames) # Start with a segment covering all of time, then # intersect with each of the fields of interest keys = segments.split(',') if operation == INTERSECT: sgmntlist = pycbc_glue.segments.segmentlist([pycbc_glue.segments.segment(-pycbc_glue.segments.infinity(), pycbc_glue.segments.infinity())]) for key in keys: sgmntlist &= find_segments(indoc, key, use_segment_table) elif operation == UNION: sgmntlist = pycbc_glue.segments.segmentlist([]) for key in keys: sgmntlist |= find_segments(indoc, key, use_segment_table) elif operation == DIFF: sgmntlist = find_segments(indoc, keys[0], use_segment_table) for key in keys[1:]: sgmntlist -= find_segments(indoc, key, use_segment_table) else: raise NameError("%s is not a known operation (intersect, union or diff)" % operation) # Add a segment definer and segments seg_def_id = add_to_segment_definer(outdoc, proc_id, '', result_name, 1) if use_segment_table: add_to_segment(outdoc, proc_id, seg_def_id, sgmntlist) else: add_to_segment_summary(outdoc, proc_id, seg_def_id, sgmntlist) return outdoc, abs(sgmntlist)
Performs an operation (intersect or union) across a set of segments. That is, given a set of files each with segment definers DMT-FLAG1, DMT-FLAG2 etc and a list of segments DMT-FLAG1,DMT-FLAG1 this returns RESULT = (table 1's DMT-FLAG1 union table 2's DMT-FLAG1 union ...) operation (table 1's DMT-FLAG2 union table 2's DMT-FLAG2 union ...) operation etc
def addVariant(self,variant): '''Appends one Variant to variants ''' if isinstance(variant, Variant): self.variants.append(variant) else: raise (VariantError, 'variant Type should be Variant, not %s' % type(variant))
Appends one Variant to variants
def _throat_props(self): r''' Helper Function to calculate the throat normal vectors ''' network = self.network net_Ts = network.throats(self.name) conns = network['throat.conns'][net_Ts] p1 = conns[:, 0] p2 = conns[:, 1] coords = network['pore.coords'] normals = tr.unit_vector(coords[p2]-coords[p1]) self['throat.normal'] = normals self['throat.centroid'] = (coords[p1] + coords[p2])/2 self['throat.incenter'] = self['throat.centroid']
r''' Helper Function to calculate the throat normal vectors
def translations_lists(self): '''Iterator over lists of content translations''' return (getattr(self.generator, name) for name in self.info.get('translations_lists', []))
Iterator over lists of content translations
def new_process(self, consumer_name): """Create a new consumer instances :param str consumer_name: The name of the consumer :return tuple: (str, process.Process) """ process_name = '%s-%s' % (consumer_name, self.new_process_number(consumer_name)) kwargs = { 'config': self.config.application, 'consumer_name': consumer_name, 'profile': self.profile, 'daemon': False, 'stats_queue': self.stats_queue, 'logging_config': self.config.logging } return process_name, process.Process(name=process_name, kwargs=kwargs)
Create a new consumer instances :param str consumer_name: The name of the consumer :return tuple: (str, process.Process)
def init(*, threshold_lvl=1, quiet_stdout=False, log_file): """ Initiate the log module :param threshold_lvl: messages under this level won't be issued/logged :param to_stdout: activate stdout log stream """ global _logger, _log_lvl # translate lvl to those used by 'logging' module _log_lvl = _set_lvl(threshold_lvl) # logger Creation _logger = logging.getLogger(PKG_NAME) _logger.setLevel(_log_lvl) # create syslog handler and set level to info log_h = logging.FileHandler(log_file) # Base message format base_fmt = '%(asctime)s - %(name)s - [%(levelname)s] - %(message)s' # set formatter log_fmt = logging.Formatter(base_fmt) log_h.setFormatter(log_fmt) # add Handler _logger.addHandler(log_h) # create stout handler if not quiet_stdout: global _stdout _stdout = True
Initiate the log module :param threshold_lvl: messages under this level won't be issued/logged :param to_stdout: activate stdout log stream
def spawn_shell(self, context_file, tmpdir, rcfile=None, norc=False, stdin=False, command=None, env=None, quiet=False, pre_command=None, add_rez=True, package_commands_sourced_first=None, **Popen_args): """Spawn a possibly interactive subshell. Args: context:_file File that must be sourced in the new shell, this configures the Rez environment. tmpdir: Tempfiles, if needed, should be created within this path. rcfile: Custom startup script. norc: Don't run startup scripts. Overrides rcfile. stdin: If True, read commands from stdin in a non-interactive shell. If a different non-False value, such as subprocess.PIPE, the same occurs, but stdin is also passed to the resulting subprocess.Popen object. command: If not None, execute this command in a non-interactive shell. If an empty string, don't run a command, but don't open an interactive shell either. env: Environ dict to execute the shell within; uses the current environment if None. quiet: If True, don't show the configuration summary, and suppress any stdout from startup scripts. pre_command: Command to inject before the shell command itself. This is for internal use. add_rez: If True, assume this shell is being used with rez, and do things such as set the prompt etc. package_commands_sourced_first: If True, source the context file before sourcing startup scripts (such as .bashrc). If False, source the context file AFTER. If None, use the configured setting. popen_args: args to pass to the shell process object constructor. Returns: A subprocess.Popen object representing the shell process. """ raise NotImplementedError
Spawn a possibly interactive subshell. Args: context:_file File that must be sourced in the new shell, this configures the Rez environment. tmpdir: Tempfiles, if needed, should be created within this path. rcfile: Custom startup script. norc: Don't run startup scripts. Overrides rcfile. stdin: If True, read commands from stdin in a non-interactive shell. If a different non-False value, such as subprocess.PIPE, the same occurs, but stdin is also passed to the resulting subprocess.Popen object. command: If not None, execute this command in a non-interactive shell. If an empty string, don't run a command, but don't open an interactive shell either. env: Environ dict to execute the shell within; uses the current environment if None. quiet: If True, don't show the configuration summary, and suppress any stdout from startup scripts. pre_command: Command to inject before the shell command itself. This is for internal use. add_rez: If True, assume this shell is being used with rez, and do things such as set the prompt etc. package_commands_sourced_first: If True, source the context file before sourcing startup scripts (such as .bashrc). If False, source the context file AFTER. If None, use the configured setting. popen_args: args to pass to the shell process object constructor. Returns: A subprocess.Popen object representing the shell process.
def _deserialize(self, value, attr, data, **kwargs): """Deserialize an ISO8601-formatted time to a :class:`datetime.time` object.""" if not value: # falsy values are invalid self.fail('invalid') try: return utils.from_iso_time(value) except (AttributeError, TypeError, ValueError): self.fail('invalid')
Deserialize an ISO8601-formatted time to a :class:`datetime.time` object.
def evaluate(self, script): """ Evaluate script in page frame. :param script: The script to evaluate. """ if WEBENGINE: return self.dom.runJavaScript("{}".format(script)) else: return self.dom.evaluateJavaScript("{}".format(script))
Evaluate script in page frame. :param script: The script to evaluate.
def camel(theta): """Three-hump camel function""" x, y = theta obj = 2 * x ** 2 - 1.05 * x ** 4 + x ** 6 / 6 + x * y + y ** 2 grad = np.array([ 4 * x - 4.2 * x ** 3 + x ** 5 + y, x + 2 * y ]) return obj, grad
Three-hump camel function
def path_exists_or_creatable(pathname: str) -> bool: """Checks whether the given path exists or is creatable. This function is guaranteed to _never_ raise exceptions. Returns ------- `True` if the passed pathname is a valid pathname for the current OS _and_ either currently exists or is hypothetically creatable; `False` otherwise. """ try: # To prevent "os" module calls from raising undesirable exceptions on # invalid pathnames, is_pathname_valid() is explicitly called first. return is_pathname_valid(pathname) and ( os.path.exists(pathname) or is_path_creatable(pathname)) # Report failure on non-fatal filesystem complaints (e.g., connection # timeouts, permissions issues) implying this path to be inaccessible. All # other exceptions are unrelated fatal issues and should not be caught # here. except OSError: return False
Checks whether the given path exists or is creatable. This function is guaranteed to _never_ raise exceptions. Returns ------- `True` if the passed pathname is a valid pathname for the current OS _and_ either currently exists or is hypothetically creatable; `False` otherwise.
def return_error(self, status, payload=None): """Error handler called by request handlers when an error occurs and the request should be aborted. Usage:: def handle_post_request(self, *args, **kwargs): self.request_handler = self.get_request_handler() try: self.request_handler.process(self.get_data()) except SomeException as e: self.return_error(400, payload=self.request_handler.errors) return self.return_create_response() """ resp = None if payload is not None: payload = json.dumps(payload) resp = self.make_response(payload, status=status) if status in [405]: abort(status) else: abort(status, response=resp)
Error handler called by request handlers when an error occurs and the request should be aborted. Usage:: def handle_post_request(self, *args, **kwargs): self.request_handler = self.get_request_handler() try: self.request_handler.process(self.get_data()) except SomeException as e: self.return_error(400, payload=self.request_handler.errors) return self.return_create_response()
def from_file(proto_file): ''' Take a filename |protoc_file|, compile it via the Protobuf compiler, and import the module. Return the module if successfully compiled, otherwise raise either a ProtocNotFound or BadProtobuf exception. ''' if not proto_file.endswith('.proto'): raise BadProtobuf() dest = tempfile.mkdtemp() full_path = os.path.abspath(proto_file) _compile_proto(full_path, dest) filename = os.path.split(full_path)[-1] name = re.search(r'^(.*)\.proto$', filename).group(1) target = os.path.join(dest, name+'_pb2.py') return _load_module(target)
Take a filename |protoc_file|, compile it via the Protobuf compiler, and import the module. Return the module if successfully compiled, otherwise raise either a ProtocNotFound or BadProtobuf exception.
def mkp(*args, **kwargs): """ Generate a directory path, and create it if requested. .. code-block:: Python filepath = mkp('base', 'folder', 'file') dirpath = mkp('root', 'path', 'folder', mk=True) Args: \*args: File or directory path segments to be concatenated mk (bool): Make the directory (if it doesn't exist) Returns: path (str): File or directory path """ mk = kwargs.pop('mk', False) path = os.sep.join(list(args)) if mk: while sep2 in path: path = path.replace(sep2, os.sep) try: os.makedirs(path) except FileExistsError: pass return path
Generate a directory path, and create it if requested. .. code-block:: Python filepath = mkp('base', 'folder', 'file') dirpath = mkp('root', 'path', 'folder', mk=True) Args: \*args: File or directory path segments to be concatenated mk (bool): Make the directory (if it doesn't exist) Returns: path (str): File or directory path
def flush_cache(self): ''' Use a cache to save state changes to avoid opening a session for every change. The cache will be flushed at the end of the simulation, and when history is accessed. ''' logger.debug('Flushing cache {}'.format(self.db_path)) with self.db: for rec in self._tups: self.db.execute("replace into history(agent_id, t_step, key, value) values (?, ?, ?, ?)", (rec.agent_id, rec.t_step, rec.key, rec.value)) self._tups = list()
Use a cache to save state changes to avoid opening a session for every change. The cache will be flushed at the end of the simulation, and when history is accessed.
def peep_hash(argv): """Return the peep hash of one or more files, returning a shell status code or raising a PipException. :arg argv: The commandline args, starting after the subcommand """ parser = OptionParser( usage='usage: %prog hash file [file ...]', description='Print a peep hash line for one or more files: for ' 'example, "# sha256: ' 'oz42dZy6Gowxw8AelDtO4gRgTW_xPdooH484k7I5EOY".') _, paths = parser.parse_args(args=argv) if paths: for path in paths: print('# sha256:', hash_of_file(path)) return ITS_FINE_ITS_FINE else: parser.print_usage() return COMMAND_LINE_ERROR
Return the peep hash of one or more files, returning a shell status code or raising a PipException. :arg argv: The commandline args, starting after the subcommand
def parse_report(self, lines, table_names): """ Parse a GATK report https://software.broadinstitute.org/gatk/documentation/article.php?id=1244 Only GATTable entries are parsed. Tables are returned as a dict of tables. Each table is a dict of arrays, where names correspond to column names, and arrays correspond to column values. Args: lines (file handle): an iterable over the lines of a GATK report. table_names (dict): a dict with keys that are GATK report table names (e.g. "#:GATKTable:Quantized:Quality quantization map"), and values that are the keys in the returned dict. Returns: { table_1: { col_1: [ val_1, val_2, ... ] col_2: [ val_1, val_2, ... ] ... } table_2: ... } """ report = dict() lines = (l for l in lines) for line in lines: line = line.rstrip() if line in table_names.keys(): report[table_names[line]] = self.parse_gatk_report_table(lines) return report
Parse a GATK report https://software.broadinstitute.org/gatk/documentation/article.php?id=1244 Only GATTable entries are parsed. Tables are returned as a dict of tables. Each table is a dict of arrays, where names correspond to column names, and arrays correspond to column values. Args: lines (file handle): an iterable over the lines of a GATK report. table_names (dict): a dict with keys that are GATK report table names (e.g. "#:GATKTable:Quantized:Quality quantization map"), and values that are the keys in the returned dict. Returns: { table_1: { col_1: [ val_1, val_2, ... ] col_2: [ val_1, val_2, ... ] ... } table_2: ... }
def get_partition(url, headers, source_id, container, partition): """Serializable function for fetching a data source partition Parameters ---------- url: str Server address headers: dict HTTP header parameters source_id: str ID of the source in the server's cache (unique per user) container: str Type of data, like "dataframe" one of ``intake.container.container_map`` partition: serializable Part of data to fetch, e.g., an integer for a dataframe. """ accepted_formats = list(serializer.format_registry.keys()) accepted_compression = list(serializer.compression_registry.keys()) payload = dict(action='read', source_id=source_id, accepted_formats=accepted_formats, accepted_compression=accepted_compression) if partition is not None: payload['partition'] = partition try: resp = requests.post(urljoin(url, '/v1/source'), data=msgpack.packb(payload, use_bin_type=True), **headers) if resp.status_code != 200: raise Exception('Error reading data') msg = msgpack.unpackb(resp.content, **unpack_kwargs) format = msg['format'] compression = msg['compression'] compressor = serializer.compression_registry[compression] encoder = serializer.format_registry[format] chunk = encoder.decode(compressor.decompress(msg['data']), container) return chunk finally: if resp is not None: resp.close()
Serializable function for fetching a data source partition Parameters ---------- url: str Server address headers: dict HTTP header parameters source_id: str ID of the source in the server's cache (unique per user) container: str Type of data, like "dataframe" one of ``intake.container.container_map`` partition: serializable Part of data to fetch, e.g., an integer for a dataframe.
def _set_interface_vlan_ospf_conf(self, v, load=False): """ Setter method for interface_vlan_ospf_conf, mapped from YANG variable /routing_system/interface/ve/ip/interface_vlan_ospf_conf (container) If this variable is read-only (config: false) in the source YANG file, then _set_interface_vlan_ospf_conf is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_interface_vlan_ospf_conf() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=interface_vlan_ospf_conf.interface_vlan_ospf_conf, is_container='container', presence=False, yang_name="interface-vlan-ospf-conf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'OSPFVlanInterfaceCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """interface_vlan_ospf_conf must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=interface_vlan_ospf_conf.interface_vlan_ospf_conf, is_container='container', presence=False, yang_name="interface-vlan-ospf-conf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'OSPFVlanInterfaceCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)""", }) self.__interface_vlan_ospf_conf = t if hasattr(self, '_set'): self._set()
Setter method for interface_vlan_ospf_conf, mapped from YANG variable /routing_system/interface/ve/ip/interface_vlan_ospf_conf (container) If this variable is read-only (config: false) in the source YANG file, then _set_interface_vlan_ospf_conf is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_interface_vlan_ospf_conf() directly.
def write(self, data): """Writes json data to the output directory.""" cnpj, data = data path = os.path.join(self.output, '%s.json' % cnpj) with open(path, 'w') as f: json.dump(data, f, encoding='utf-8')
Writes json data to the output directory.
def _get_unique_ch(text, all_common_encodes): """ text : encode sample strings returns unique word / characters from input text encode strings. """ unique_chars = '' if isinstance(text, str): text = text.split("\n") elif isinstance(text, (list, tuple)): pass special_chars = ['.', ',', ';', ':','', ' ', '\r', '\t', '=', '\n'] for line in text: for word in line.split(' '): if ( not PYTHON3 ): word = word.decode( 'utf-8') for ch in all_common_encodes: if ch in word: word = word.replace(ch, '') # end of for ch in _all_common_encodes_: # if len of word is zero, then go for another word if not word: continue for ch in word: if ch.isdigit() or ch in special_chars: # remove special common chars word = word.replace(ch, '') continue # end of if ch.isdigit() or ...: # Whola, got unique chars from user passed text return word # end of for ch in word: # end of for word in line.split(' '): # end of for line in text: return ''
text : encode sample strings returns unique word / characters from input text encode strings.
def update_alarm(self, alarm, criteria=None, disabled=False, label=None, name=None, metadata=None): """ Updates an existing alarm on this entity. """ return self._alarm_manager.update(alarm, criteria=criteria, disabled=disabled, label=label, name=name, metadata=metadata)
Updates an existing alarm on this entity.
def get_paths(self, key): """ Same as `ConfigParser.get_path` for a list of paths. Args: key: str, the key to lookup the paths with Returns: list: The paths. """ final_paths = [] if key in self.__cli: paths = self.__cli[key] or [] from_conf = False else: paths = self.__config.get(key) or [] from_conf = True for path in flatten_list(paths): final_path = self.__abspath(path, from_conf) if final_path: final_paths.append(final_path) return final_paths
Same as `ConfigParser.get_path` for a list of paths. Args: key: str, the key to lookup the paths with Returns: list: The paths.
def is_alive(self): """Returns a flag with the state of the SSH connection.""" null = chr(0) try: if self.device is None: return {"is_alive": False} else: # Try sending ASCII null byte to maintain the connection alive self._send_command(null) except (socket.error, EOFError): # If unable to send, we can tell for sure that the connection is unusable, # hence return False. return {"is_alive": False} return {"is_alive": self.device.remote_conn.transport.is_active()}
Returns a flag with the state of the SSH connection.
def _create_prefix_notification(outgoing_msg, rpc_session): """Constructs prefix notification with data from given outgoing message. Given RPC session is used to create RPC notification message. """ assert outgoing_msg path = outgoing_msg.path assert path vpn_nlri = path.nlri assert path.source is not None if path.source != VRF_TABLE: # Extract relevant info for update-add/update-delete. params = [{ROUTE_DISTINGUISHER: outgoing_msg.route_dist, PREFIX: vpn_nlri.prefix, NEXT_HOP: path.nexthop, VRF_RF: VrfConf.rf_2_vrf_rf(path.route_family)}] if path.nlri.ROUTE_FAMILY.safi not in (subaddr_family.IP_FLOWSPEC, subaddr_family.VPN_FLOWSPEC): params[VPN_LABEL] = path.label_list[0] if not path.is_withdraw: # Create notification to NetworkController. rpc_msg = rpc_session.create_notification( NOTIFICATION_ADD_REMOTE_PREFIX, params) else: # Create update-delete request to NetworkController. rpc_msg = rpc_session.create_notification( NOTIFICATION_DELETE_REMOTE_PREFIX, params) else: # Extract relevant info for update-add/update-delete. params = [{ROUTE_DISTINGUISHER: outgoing_msg.route_dist, PREFIX: vpn_nlri.prefix, NEXT_HOP: path.nexthop, VRF_RF: VrfConf.rf_2_vrf_rf(path.route_family), ORIGIN_RD: path.origin_rd}] if not path.is_withdraw: # Create notification to NetworkController. rpc_msg = rpc_session.create_notification( NOTIFICATION_ADD_LOCAL_PREFIX, params) else: # Create update-delete request to NetworkController. rpc_msg = rpc_session.create_notification( NOTIFICATION_DELETE_LOCAL_PREFIX, params) return rpc_msg
Constructs prefix notification with data from given outgoing message. Given RPC session is used to create RPC notification message.
def to_match(self): """Return a unicode object with the MATCH representation of this expression.""" self.validate() mark_name, _ = self.fold_scope_location.get_location_name() validate_safe_string(mark_name) template = u'$%(mark_name)s.size()' template_data = { 'mark_name': mark_name, } return template % template_data
Return a unicode object with the MATCH representation of this expression.
def flush(self): """Write all data from buffer to socket and reset write buffer.""" if self._wbuf: buffer = ''.join(self._wbuf) self._wbuf = [] self.write(buffer)
Write all data from buffer to socket and reset write buffer.
def createModel(self, model, context, owner='', includeReferences=True): """ Creates a new table in the database based cff the inputted schema information. If the dryRun flag is specified, then the SQLConnection will only be logged to the current logger, and not actually executed in the database. :param model | <orb.Model> context | <orb.Context> :return <bool> success """ CREATE = self.statement('CREATE') sql, data = CREATE(model, includeReferences=includeReferences, owner=owner) if not sql: log.error('Failed to create {0}'.format(model.schema().dbname())) return False else: if context.dryRun: print sql % data else: self.execute(sql, data, writeAccess=True) log.info('Created {0}'.format(model.schema().dbname())) return True
Creates a new table in the database based cff the inputted schema information. If the dryRun flag is specified, then the SQLConnection will only be logged to the current logger, and not actually executed in the database. :param model | <orb.Model> context | <orb.Context> :return <bool> success
def find_pareto_front(metrics, metadata, columns, depth=1, epsilon=None, progress=None): """ Return the subset of the given metrics that are Pareto optimal with respect to the given columns. Arguments ========= metrics: DataFrame A dataframe where each row is a different model or design and each column is a different score metric. metadata: dict Extra information about each score metric, in particular whether or not bigger values are considered better or worse. You can get this data structure from structures.load(). columns: list The score metrics to consider when calculating the Pareto front. depth: int The number of Pareto fronts to return. In other words, if depth=2, the Pareto front will be calculated, then those points (and any within epsilon of them) will be set aside, then the Pareto front of the remaining points will be calculated, then the union of both fronts will be returned. epsilon: float How close two points can be (in all the dimensions considered) before they are considered the same and one is excluded from the Pareto front (even if it is non-dominated). This is roughly in units of percent of the range of the points. By default this is small enough that you can basically assume that no two points will be considered the same. progress: func A function that will be called in the innermost loop as follows: `progress(curr_depth, tot_depth, curr_hit, tot_hits)`. This is primarily intended to allow the caller to present a progress bar, since increasing the depth can dramatically increase the amount of time this function takes. Returns ======= front: DataFrame The subset of the given metrics that is Pareto optimal with respect to the given score metrics. There are several ways to tune the number of models that are returned by this function. These are important to know, because this function is used to filter models between rounds of design, and there are always practical constraints on the number of models that can be carried on: - Columns: This is only mentioned for completeness, because you should pick your score metrics based on which scores you think are informative, not based on how many models you need. But increasing the number of score metrics increases the number of models that are selected, sometimes dramatically. - Depth: Increasing the depth increases the number of models that are selected by including models that are just slightly behind the Pareto front. - Epsilon: Increasing the epsilon decreases the number of models that are selected by discarding the models in the Pareto front that are too similar to each other. In short, tune depth to get more models and epsilon to get fewer. You can also tune both at once to get a large but diverse set of models. """ # Bail out if the data frame is empty, because otherwise the Pareto front # calculation will choke on something. if len(metrics) == 0: return metrics # https://github.com/matthewjwoodruff/pareto.py import pareto indices_from_cols = lambda xs: [metrics.columns.get_loc(x) for x in xs] percentile = lambda x, q: metrics[x].quantile(q/100) epsilons = [ (epsilon or 1e-7) * abs(percentile(x, 90) - percentile(x, 10)) / (90 - 10) for x in columns ] maximize = [x for x in columns if metadata[x].direction == '+'] maximize_indices = indices_from_cols(maximize) column_indices = indices_from_cols(columns) def boxify(df): # boxed_df = pd.DataFrame() for col, eps in zip(columns, epsilons): boxed_df[col] = np.floor(df[col] / eps) return boxed_df mask = np.zeros(len(metrics), dtype='bool') too_close = np.zeros(len(metrics), dtype='bool') all_boxes = boxify(metrics) labeled_metrics = metrics.copy() labeled_metrics['_pip_index'] = range(len(metrics)) id = labeled_metrics.columns.get_loc('_pip_index') for i in range(depth): # Figure out which points are within epsilon of points that are already # in the front, so they can be excluded from the search. Without this, # points that are rejected for being too similar at one depth will be # included in the next depth. # # This check is unfortunately very expensive, so we skip it for the # default value of epsilon, which is so small (1e-7) that we assume no # points will be rejected for being too similar. if epsilon is None: candidates = [labeled_metrics[~mask]] else: front_boxes = boxify(metrics[mask]) for j, (_, row) in enumerate(front_boxes.iterrows()): if progress: progress(i+1, depth, j+1, len(front_boxes)) too_close |= all_boxes.apply( lambda x: (x == row).all(), axis='columns') candidates = [labeled_metrics[too_close == False]] front = pareto.eps_sort( candidates, column_indices, epsilons, maximize=maximize_indices) for row in front: assert not mask[row[id]] mask[row[id]] = True return metrics[mask]
Return the subset of the given metrics that are Pareto optimal with respect to the given columns. Arguments ========= metrics: DataFrame A dataframe where each row is a different model or design and each column is a different score metric. metadata: dict Extra information about each score metric, in particular whether or not bigger values are considered better or worse. You can get this data structure from structures.load(). columns: list The score metrics to consider when calculating the Pareto front. depth: int The number of Pareto fronts to return. In other words, if depth=2, the Pareto front will be calculated, then those points (and any within epsilon of them) will be set aside, then the Pareto front of the remaining points will be calculated, then the union of both fronts will be returned. epsilon: float How close two points can be (in all the dimensions considered) before they are considered the same and one is excluded from the Pareto front (even if it is non-dominated). This is roughly in units of percent of the range of the points. By default this is small enough that you can basically assume that no two points will be considered the same. progress: func A function that will be called in the innermost loop as follows: `progress(curr_depth, tot_depth, curr_hit, tot_hits)`. This is primarily intended to allow the caller to present a progress bar, since increasing the depth can dramatically increase the amount of time this function takes. Returns ======= front: DataFrame The subset of the given metrics that is Pareto optimal with respect to the given score metrics. There are several ways to tune the number of models that are returned by this function. These are important to know, because this function is used to filter models between rounds of design, and there are always practical constraints on the number of models that can be carried on: - Columns: This is only mentioned for completeness, because you should pick your score metrics based on which scores you think are informative, not based on how many models you need. But increasing the number of score metrics increases the number of models that are selected, sometimes dramatically. - Depth: Increasing the depth increases the number of models that are selected by including models that are just slightly behind the Pareto front. - Epsilon: Increasing the epsilon decreases the number of models that are selected by discarding the models in the Pareto front that are too similar to each other. In short, tune depth to get more models and epsilon to get fewer. You can also tune both at once to get a large but diverse set of models.
def import_profile(self): """ Import minimum needs from an existing json file. The minimum needs are loaded from a file into the table. This state is only saved if the form is accepted. """ # noinspection PyCallByClass,PyTypeChecker file_name_dialog = QFileDialog(self) file_name_dialog.setAcceptMode(QFileDialog.AcceptOpen) file_name_dialog.setNameFilter(self.tr('JSON files (*.json *.JSON)')) file_name_dialog.setDefaultSuffix('json') path_name = resources_path('minimum_needs') file_name_dialog.setDirectory(path_name) if file_name_dialog.exec_(): file_name = file_name_dialog.selectedFiles()[0] else: return -1 if self.minimum_needs.read_from_file(file_name) == -1: return -1 self.clear_resource_list() self.populate_resource_list() self.switch_context(self.profile_edit_page)
Import minimum needs from an existing json file. The minimum needs are loaded from a file into the table. This state is only saved if the form is accepted.
def iter_shortcuts(): """Iterate over keyboard shortcuts.""" for context_name, keystr in CONF.items('shortcuts'): context, name = context_name.split("/", 1) yield context, name, keystr
Iterate over keyboard shortcuts.
def _init(self): """Read resource information into self._cache, for cached access. See DAVResource._init() """ # TODO: recalc self.path from <self._file_path>, to fix correct file system case # On windows this would lead to correct URLs self.provider._count_get_resource_inst_init += 1 tableName, primKey = self.provider._split_path(self.path) display_type = "Unknown" displayTypeComment = "" contentType = "text/html" # _logger.debug("getInfoDict(%s), nc=%s" % (path, self.connectCount)) if tableName is None: display_type = "Database" elif primKey is None: # "database" and table name display_type = "Database Table" else: contentType = "text/csv" if primKey == "_ENTIRE_CONTENTS": display_type = "Database Table Contents" displayTypeComment = "CSV Representation of Table Contents" else: display_type = "Database Record" displayTypeComment = "Attributes available as properties" # Avoid calling is_collection, since it would call isExisting -> _init_connection is_collection = primKey is None self._cache = { "content_length": None, "contentType": contentType, "created": time.time(), "display_name": self.name, "etag": hashlib.md5().update(self.path).hexdigest(), # "etag": md5.new(self.path).hexdigest(), "modified": None, "support_ranges": False, "display_info": {"type": display_type, "typeComment": displayTypeComment}, } # Some resource-only infos: if not is_collection: self._cache["modified"] = time.time() _logger.debug("---> _init, nc=%s" % self.provider._count_initConnection)
Read resource information into self._cache, for cached access. See DAVResource._init()
def inline(self) -> str: """ Return inline string format of the instance :return: """ return "{0}:{1}".format(self.index, ' '.join([str(p) for p in self.parameters]))
Return inline string format of the instance :return:
def plot(message, duration=1, ax=None): """ Plot a message Returns: ax a Matplotlib Axe """ lst_bin = _encode_binary(message) x, y = _create_x_y(lst_bin, duration) ax = _create_ax(ax) ax.plot(x, y, linewidth=2.0) delta_y = 0.1 ax.set_ylim(-delta_y, 1 + delta_y) ax.set_yticks([0, 1]) delta_x = 0.5 * duration ax.set_xlim(-delta_x, len(lst_bin) * duration + delta_x) return ax
Plot a message Returns: ax a Matplotlib Axe
def _set_get_vnetwork_dvs(self, v, load=False): """ Setter method for get_vnetwork_dvs, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_dvs (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_get_vnetwork_dvs is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_get_vnetwork_dvs() directly. YANG Description: Shows discovered Distributed Virtual Switches """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=get_vnetwork_dvs.get_vnetwork_dvs, is_leaf=True, yang_name="get-vnetwork-dvs", rest_name="get-vnetwork-dvs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'dvs-name'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='rpc', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """get_vnetwork_dvs must be of a type compatible with rpc""", 'defined-type': "rpc", 'generated-type': """YANGDynClass(base=get_vnetwork_dvs.get_vnetwork_dvs, is_leaf=True, yang_name="get-vnetwork-dvs", rest_name="get-vnetwork-dvs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'dvs-name'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='rpc', is_config=True)""", }) self.__get_vnetwork_dvs = t if hasattr(self, '_set'): self._set()
Setter method for get_vnetwork_dvs, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_dvs (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_get_vnetwork_dvs is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_get_vnetwork_dvs() directly. YANG Description: Shows discovered Distributed Virtual Switches
def driver_for_path(path, drivers=None): """Returns the gdal.Driver for a path or None based on the file extension. Arguments: path -- file path as str with a GDAL supported file extension """ ext = (os.path.splitext(path)[1][1:] or path).lower() drivers = drivers or ImageDriver.registry if ext else {} for name, meta in drivers.items(): if ext == meta.get('DMD_EXTENSION', '').lower(): return ImageDriver(name) return None
Returns the gdal.Driver for a path or None based on the file extension. Arguments: path -- file path as str with a GDAL supported file extension
def transit_generate_hmac(self, name, hmac_input, key_version=None, algorithm=None, mount_point='transit'): """POST /<mount_point>/hmac/<name>(/<algorithm>) :param name: :type name: :param hmac_input: :type hmac_input: :param key_version: :type key_version: :param algorithm: :type algorithm: :param mount_point: :type mount_point: :return: :rtype: """ if algorithm is not None: url = '/v1/{0}/hmac/{1}/{2}'.format(mount_point, name, algorithm) else: url = '/v1/{0}/hmac/{1}'.format(mount_point, name) params = { 'input': hmac_input } if key_version is not None: params['key_version'] = key_version return self._adapter.post(url, json=params).json()
POST /<mount_point>/hmac/<name>(/<algorithm>) :param name: :type name: :param hmac_input: :type hmac_input: :param key_version: :type key_version: :param algorithm: :type algorithm: :param mount_point: :type mount_point: :return: :rtype:
def calculate_columns(self): """Assuming the number of rows is constant, work out the best number of columns to use.""" self.cols = int(math.ceil(len(self.elements) / float(self.rows)))
Assuming the number of rows is constant, work out the best number of columns to use.
def attachment(attachment: Attachment, text: str = None, speak: str = None, input_hint: Union[InputHints, str] = None): """ Returns a single message activity containing an attachment. :Example: message = MessageFactory.attachment(CardFactory.hero_card(HeroCard(title='White T-Shirt', images=[CardImage(url='https://example.com/whiteShirt.jpg')], buttons=[CardAction(title='buy')]))) await context.send_activity(message) :param attachment: :param text: :param speak: :param input_hint: :return: """ return attachment_activity(AttachmentLayoutTypes.list, [attachment], text, speak, input_hint)
Returns a single message activity containing an attachment. :Example: message = MessageFactory.attachment(CardFactory.hero_card(HeroCard(title='White T-Shirt', images=[CardImage(url='https://example.com/whiteShirt.jpg')], buttons=[CardAction(title='buy')]))) await context.send_activity(message) :param attachment: :param text: :param speak: :param input_hint: :return:
def _load_source_model(self): """ Loads and gets the source model of the FieldTranslation as a dynamic attribute. It is used only when deleting orphan translations (translations without a parent object associated). """ # If source_model exists, return it if hasattr(self, "source_model"): return self.source_model # Getting the source model module = self.get_python_module() # Test if module has inside the model we are looking for if hasattr(module, self.model): # Setting of source model self.source_model = getattr(module, self.model) # Setting of verbose name and its plural for later use self.source_model.meta__verbose_name = self.source_model._meta.verbose_name self.source_model.meta__verbose_name_plural = self.source_model._meta.verbose_name_plural return self.source_model raise ValueError(u"Model {0} does not exist in module".format(self.model, self.module))
Loads and gets the source model of the FieldTranslation as a dynamic attribute. It is used only when deleting orphan translations (translations without a parent object associated).
def from_e164(text, origin=public_enum_domain): """Convert an E.164 number in textual form into a Name object whose value is the ENUM domain name for that number. @param text: an E.164 number in textual form. @type text: str @param origin: The domain in which the number should be constructed. The default is e164.arpa. @type: dns.name.Name object or None @rtype: dns.name.Name object """ parts = [d for d in text if d.isdigit()] parts.reverse() return dns.name.from_text('.'.join(parts), origin=origin)
Convert an E.164 number in textual form into a Name object whose value is the ENUM domain name for that number. @param text: an E.164 number in textual form. @type text: str @param origin: The domain in which the number should be constructed. The default is e164.arpa. @type: dns.name.Name object or None @rtype: dns.name.Name object
def init_sentry(self): """Install Sentry handler if config defines 'SENTRY_DSN'.""" dsn = self.config.get("SENTRY_DSN") if not dsn: return try: import sentry_sdk except ImportError: logger.error( 'SENTRY_DSN is defined in config but package "sentry-sdk"' " is not installed." ) return from sentry_sdk.integrations.flask import FlaskIntegration sentry_sdk.init(dsn=dsn, integrations=[FlaskIntegration()])
Install Sentry handler if config defines 'SENTRY_DSN'.
def get_collection_rules_gpg(self, collection_rules): """ Download the collection rules gpg signature """ sig_text = self.fetch_gpg() sig_response = NamedTemporaryFile(suffix=".asc") sig_response.write(sig_text.encode('utf-8')) sig_response.file.flush() self.validate_gpg_sig(collection_rules.name, sig_response.name) self.write_collection_data(self.collection_rules_file + ".asc", sig_text)
Download the collection rules gpg signature
def _find_multiple(self, match_class, **keywds): """implementation details""" self._logger.debug('find all query execution - started') start_time = timeit.default_timer() norm_keywds = self.__normalize_args(**keywds) decl_matcher = self.__create_matcher(match_class, **norm_keywds) dtype = self.__findout_decl_type(match_class, **norm_keywds) recursive_ = self.__findout_recursive(**norm_keywds) allow_empty = self.__findout_allow_empty(**norm_keywds) decls = self.__findout_range(norm_keywds['name'], dtype, recursive_) found = matcher.find(decl_matcher, decls, False) mfound = mdecl_wrapper.mdecl_wrapper_t(found) self._logger.debug('%d declaration(s) that match query', len(mfound)) self._logger.debug( 'find single query execution - done( %f seconds )', (timeit.default_timer() - start_time)) if not mfound and not allow_empty: raise RuntimeError( "Multi declaration query returned 0 declarations.") return mfound
implementation details
def map(lst, serialize_func): """ Applies serialize_func to every element in lst """ if not isinstance(lst, list): return lst return [serialize_func(e) for e in lst]
Applies serialize_func to every element in lst
def create_identity(self, user, sp_mapping, **extra_config): """ Generate an identity dictionary of the user based on the given mapping of desired user attributes by the SP """ return { out_attr: getattr(user, user_attr) for user_attr, out_attr in sp_mapping.items() if hasattr(user, user_attr) }
Generate an identity dictionary of the user based on the given mapping of desired user attributes by the SP
def flexifunction_directory_ack_encode(self, target_system, target_component, directory_type, start_index, count, result): ''' Acknowldge sucess or failure of a flexifunction command target_system : System ID (uint8_t) target_component : Component ID (uint8_t) directory_type : 0=inputs, 1=outputs (uint8_t) start_index : index of first directory entry to write (uint8_t) count : count of directory entries to write (uint8_t) result : result of acknowledge, 0=fail, 1=good (uint16_t) ''' return MAVLink_flexifunction_directory_ack_message(target_system, target_component, directory_type, start_index, count, result)
Acknowldge sucess or failure of a flexifunction command target_system : System ID (uint8_t) target_component : Component ID (uint8_t) directory_type : 0=inputs, 1=outputs (uint8_t) start_index : index of first directory entry to write (uint8_t) count : count of directory entries to write (uint8_t) result : result of acknowledge, 0=fail, 1=good (uint16_t)
def _convert_xml_to_queues(response): ''' <?xml version="1.0" encoding="utf-8"?> <EnumerationResults ServiceEndpoint="https://myaccount.queue.core.windows.net/"> <Prefix>string-value</Prefix> <Marker>string-value</Marker> <MaxResults>int-value</MaxResults> <Queues> <Queue> <Name>string-value</Name> <Metadata> <metadata-name>value</metadata-name> </Metadata> </Queue> <NextMarker /> </EnumerationResults> ''' if response is None or response.body is None: return None queues = _list() list_element = ETree.fromstring(response.body) # Set next marker next_marker = list_element.findtext('NextMarker') or None setattr(queues, 'next_marker', next_marker) queues_element = list_element.find('Queues') for queue_element in queues_element.findall('Queue'): # Name element queue = Queue() queue.name = queue_element.findtext('Name') # Metadata metadata_root_element = queue_element.find('Metadata') if metadata_root_element is not None: queue.metadata = dict() for metadata_element in metadata_root_element: queue.metadata[metadata_element.tag] = metadata_element.text # Add queue to list queues.append(queue) return queues
<?xml version="1.0" encoding="utf-8"?> <EnumerationResults ServiceEndpoint="https://myaccount.queue.core.windows.net/"> <Prefix>string-value</Prefix> <Marker>string-value</Marker> <MaxResults>int-value</MaxResults> <Queues> <Queue> <Name>string-value</Name> <Metadata> <metadata-name>value</metadata-name> </Metadata> </Queue> <NextMarker /> </EnumerationResults>
def run_solr_on(solrInstance, category, id, fields): """ Return the result of a solr query on the given solrInstance (Enum ESOLR), for a certain document_category (ESOLRDoc) and id """ query = solrInstance.value + "select?q=*:*&fq=document_category:\"" + category.value + "\"&fq=id:\"" + id + "\"&fl=" + fields + "&wt=json&indent=on" response = requests.get(query) return response.json()['response']['docs'][0]
Return the result of a solr query on the given solrInstance (Enum ESOLR), for a certain document_category (ESOLRDoc) and id
def _encode_json(obj): ''' Encode object as json str. ''' def _dump_obj(obj): if isinstance(obj, dict): return obj d = dict() for k in dir(obj): if not k.startswith('_'): d[k] = getattr(obj, k) return d return json.dumps(obj, default=_dump_obj)
Encode object as json str.
def warning(self, message, print_location=True): """Displays warning message. Uses exshared for current location of parsing""" msg = "Warning" if print_location and (exshared.location != None): wline = lineno(exshared.location, exshared.text) wcol = col(exshared.location, exshared.text) wtext = line(exshared.location, exshared.text) msg += " at line %d, col %d" % (wline, wcol) msg += ": %s" % message if print_location and (exshared.location != None): msg += "\n%s" % wtext print(msg)
Displays warning message. Uses exshared for current location of parsing
def rand_crop(*args, padding_mode='reflection', p:float=1.): "Randomized version of `crop_pad`." return crop_pad(*args, **rand_pos, padding_mode=padding_mode, p=p)
Randomized version of `crop_pad`.
def burst_run(self): """ Run CPU as fast as Python can... """ # https://wiki.python.org/moin/PythonSpeed/PerformanceTips#Avoiding_dots... get_and_call_next_op = self.get_and_call_next_op for __ in range(self.outer_burst_op_count): for __ in range(self.inner_burst_op_count): get_and_call_next_op() self.call_sync_callbacks()
Run CPU as fast as Python can...
def remove_header(self, name): """Remove a field from the header""" if name in self.info_dict: self.info_dict.pop(name) logger.info("Removed '{0}' from INFO".format(name)) if name in self.filter_dict: self.filter_dict.pop(name) logger.info("Removed '{0}' from FILTER".format(name)) if name in self.format_dict: self.format_dict.pop(name) logger.info("Removed '{0}' from FORMAT".format(name)) if name in self.contig_dict: self.contig_dict.pop(name) logger.info("Removed '{0}' from CONTIG".format(name)) if name in self.alt_dict: self.alt_dict.pop(name) logger.info("Removed '{0}' from ALT".format(name)) if name in self.other_dict: self.other_dict.pop(name) logger.info("Removed '{0}' from OTHER".format(name)) return
Remove a field from the header
def name(self): # pylint: disable=no-self-use """ Name returns user's name or user's email or user_id :return: best guess of name to use to greet user """ if 'lis_person_sourcedid' in self.session: return self.session['lis_person_sourcedid'] elif 'lis_person_contact_email_primary' in self.session: return self.session['lis_person_contact_email_primary'] elif 'user_id' in self.session: return self.session['user_id'] else: return ''
Name returns user's name or user's email or user_id :return: best guess of name to use to greet user
def start(self): """ Start running the interactive session (blocking) """ self.running = True while self.running: self.update_prompt() try: self.cmdloop() except KeyboardInterrupt: print() except botocore.exceptions.BotoCoreError as e: print(e) except ParseException as e: print(self.engine.pformat_exc(e)) except Exception: traceback.print_exc() self.engine.reset()
Start running the interactive session (blocking)
def _maybe_replace_path(self, match): """ Regex replacement method that will sub paths when needed """ path = match.group(0) if self._should_replace(path): return self._replace_path(path) else: return path
Regex replacement method that will sub paths when needed
def _collect_output(self, process, result, writer=None, stdin=None): """Drain the subprocesses output streams, writing the collected output to the result. If a writer thread (writing to the subprocess) is given, make sure it's joined before returning. If a stdin stream is given, close it before returning. """ stderr = codecs.getreader(self._encoding)(process.stderr) rr = threading.Thread(target=self._read_response, args=(stderr, result)) rr.setDaemon(True) log.debug('stderr reader: %r', rr) rr.start() stdout = process.stdout dr = threading.Thread(target=self._read_data, args=(stdout, result)) dr.setDaemon(True) log.debug('stdout reader: %r', dr) dr.start() dr.join() rr.join() if writer is not None: writer.join() process.wait() if stdin is not None: try: stdin.close() except IOError: pass stderr.close() stdout.close()
Drain the subprocesses output streams, writing the collected output to the result. If a writer thread (writing to the subprocess) is given, make sure it's joined before returning. If a stdin stream is given, close it before returning.
def expand_path(s): """Expand $VARS and ~names in a string, like a shell :Examples: In [2]: os.environ['FOO']='test' In [3]: expand_path('variable FOO is $FOO') Out[3]: 'variable FOO is test' """ # This is a pretty subtle hack. When expand user is given a UNC path # on Windows (\\server\share$\%username%), os.path.expandvars, removes # the $ to get (\\server\share\%username%). I think it considered $ # alone an empty var. But, we need the $ to remains there (it indicates # a hidden share). if os.name=='nt': s = s.replace('$\\', 'IPYTHON_TEMP') s = os.path.expandvars(os.path.expanduser(s)) if os.name=='nt': s = s.replace('IPYTHON_TEMP', '$\\') return s
Expand $VARS and ~names in a string, like a shell :Examples: In [2]: os.environ['FOO']='test' In [3]: expand_path('variable FOO is $FOO') Out[3]: 'variable FOO is test'
def list_hosts_by_cluster(kwargs=None, call=None): ''' List hosts for each cluster; or hosts for a specified cluster in this VMware environment To list hosts for each cluster: CLI Example: .. code-block:: bash salt-cloud -f list_hosts_by_cluster my-vmware-config To list hosts for a specified cluster: CLI Example: .. code-block:: bash salt-cloud -f list_hosts_by_cluster my-vmware-config cluster="clusterName" ''' if call != 'function': raise SaltCloudSystemExit( 'The list_hosts_by_cluster function must be called with ' '-f or --function.' ) ret = {} cluster_name = kwargs.get('cluster') if kwargs and 'cluster' in kwargs else None cluster_properties = ["name"] cluster_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.ClusterComputeResource, cluster_properties) for cluster in cluster_list: ret[cluster['name']] = [] for host in cluster['object'].host: if isinstance(host, vim.HostSystem): ret[cluster['name']].append(host.name) if cluster_name and cluster_name == cluster['name']: return {'Hosts by Cluster': {cluster_name: ret[cluster_name]}} return {'Hosts by Cluster': ret}
List hosts for each cluster; or hosts for a specified cluster in this VMware environment To list hosts for each cluster: CLI Example: .. code-block:: bash salt-cloud -f list_hosts_by_cluster my-vmware-config To list hosts for a specified cluster: CLI Example: .. code-block:: bash salt-cloud -f list_hosts_by_cluster my-vmware-config cluster="clusterName"
def start(self, on_done): """ Starts the genesis block creation process. Will call the given `on_done` callback on successful completion. Args: on_done (function): a function called on completion Raises: InvalidGenesisStateError: raises this error if a genesis block is unable to be produced, or the resulting block-chain-id saved. """ genesis_file = os.path.join(self._data_dir, 'genesis.batch') try: with open(genesis_file, 'rb') as batch_file: genesis_data = genesis_pb2.GenesisData() genesis_data.ParseFromString(batch_file.read()) LOGGER.info('Producing genesis block from %s', genesis_file) except IOError: raise InvalidGenesisStateError( "Genesis File {} specified, but unreadable".format( genesis_file)) initial_state_root = self._context_manager.get_first_root() genesis_batches = [batch for batch in genesis_data.batches] if genesis_batches: scheduler = SerialScheduler( self._context_manager.get_squash_handler(), initial_state_root, always_persist=True) LOGGER.debug('Adding %s batches', len(genesis_data.batches)) for batch in genesis_data.batches: scheduler.add_batch(batch) self._transaction_executor.execute(scheduler) scheduler.finalize() scheduler.complete(block=True) txn_receipts = [] state_hash = initial_state_root for batch in genesis_batches: result = scheduler.get_batch_execution_result( batch.header_signature) if result is None or not result.is_valid: raise InvalidGenesisStateError( 'Unable to create genesis block, due to batch {}' .format(batch.header_signature)) if result.state_hash is not None: state_hash = result.state_hash txn_results = scheduler.get_transaction_execution_results( batch.header_signature) txn_receipts += self._make_receipts(txn_results) settings_view = SettingsView( self._state_view_factory.create_view(state_hash)) name = settings_view.get_setting('sawtooth.consensus.algorithm.name') version = settings_view.get_setting( 'sawtooth.consensus.algorithm.version') if name is None or version is None: raise LocalConfigurationError( 'Unable to start validator; sawtooth.consensus.algorithm.name ' 'and sawtooth.consensus.algorithm.version must be set in the ' 'genesis block.') LOGGER.debug('Produced state hash %s for genesis block.', state_hash) block_builder = self._generate_genesis_block() block_builder.add_batches(genesis_batches) block_builder.set_state_hash(state_hash) block_publisher = self._get_block_publisher(initial_state_root) if not block_publisher.initialize_block(block_builder.block_header): LOGGER.error('Consensus refused to initialize consensus block.') raise InvalidGenesisConsensusError( 'Consensus refused to initialize genesis block.') if not block_publisher.finalize_block(block_builder.block_header): LOGGER.error('Consensus refused to finalize genesis block.') raise InvalidGenesisConsensusError( 'Consensus refused to finalize genesis block.') self._sign_block(block_builder) block = block_builder.build_block() blkw = BlockWrapper(block=block) LOGGER.info('Genesis block created: %s', blkw) self._block_manager.put([blkw.block]) self._block_manager.persist(blkw.identifier, "commit_store") self._txn_receipt_store.chain_update(block, txn_receipts) self._chain_id_manager.save_block_chain_id(block.header_signature) LOGGER.debug('Deleting genesis data.') os.remove(genesis_file) if on_done is not None: on_done()
Starts the genesis block creation process. Will call the given `on_done` callback on successful completion. Args: on_done (function): a function called on completion Raises: InvalidGenesisStateError: raises this error if a genesis block is unable to be produced, or the resulting block-chain-id saved.
def _path_hash(path, transform, kwargs): """ Generate a hash of source file path + transform + args """ sortedargs = ["%s:%r:%s" % (key, value, type(value)) for key, value in sorted(iteritems(kwargs))] srcinfo = "{path}:{transform}:{{{kwargs}}}".format(path=os.path.abspath(path), transform=transform, kwargs=",".join(sortedargs)) return digest_string(srcinfo)
Generate a hash of source file path + transform + args
def fit(self, features, classes): """Constructs the MDR ensemble from the provided training data Parameters ---------- features: array-like {n_samples, n_features} Feature matrix classes: array-like {n_samples} List of class labels for prediction Returns ------- None """ self.ensemble.fit(features, classes) # Construct the feature map from the ensemble predictions unique_rows = list(set([tuple(row) for row in features])) for row in unique_rows: self.feature_map[row] = self.ensemble.predict([row])[0]
Constructs the MDR ensemble from the provided training data Parameters ---------- features: array-like {n_samples, n_features} Feature matrix classes: array-like {n_samples} List of class labels for prediction Returns ------- None
def connect(receiver, signal=Any, sender=Any, weak=True): """Connect receiver to sender for signal receiver -- a callable Python object which is to receive messages/signals/events. Receivers must be hashable objects. if weak is True, then receiver must be weak-referencable (more precisely saferef.safeRef() must be able to create a reference to the receiver). Receivers are fairly flexible in their specification, as the machinery in the robustApply module takes care of most of the details regarding figuring out appropriate subsets of the sent arguments to apply to a given receiver. Note: if receiver is itself a weak reference (a callable), it will be de-referenced by the system's machinery, so *generally* weak references are not suitable as receivers, though some use might be found for the facility whereby a higher-level library passes in pre-weakrefed receiver references. signal -- the signal to which the receiver should respond if Any, receiver will receive any signal from the indicated sender (which might also be Any, but is not necessarily Any). Otherwise must be a hashable Python object other than None (DispatcherError raised on None). sender -- the sender to which the receiver should respond if Any, receiver will receive the indicated signals from any sender. if Anonymous, receiver will only receive indicated signals from send/sendExact which do not specify a sender, or specify Anonymous explicitly as the sender. Otherwise can be any python object. weak -- whether to use weak references to the receiver By default, the module will attempt to use weak references to the receiver objects. If this parameter is false, then strong references will be used. returns None, may raise DispatcherTypeError """ if signal is None: raise errors.DispatcherTypeError( 'Signal cannot be None (receiver=%r sender=%r)'%( receiver,sender) ) if weak: receiver = saferef.safeRef(receiver, onDelete=_removeReceiver) senderkey = id(sender) if senderkey in connections: signals = connections[senderkey] else: connections[senderkey] = signals = {} # Keep track of senders for cleanup. # Is Anonymous something we want to clean up? if sender not in (None, Anonymous, Any): def remove(object, senderkey=senderkey): _removeSender(senderkey=senderkey) # Skip objects that can not be weakly referenced, which means # they won't be automatically cleaned up, but that's too bad. try: weakSender = weakref.ref(sender, remove) senders[senderkey] = weakSender except: pass receiverID = id(receiver) # get current set, remove any current references to # this receiver in the set, including back-references if signal in signals: receivers = signals[signal] _removeOldBackRefs(senderkey, signal, receiver, receivers) else: receivers = signals[signal] = [] try: current = sendersBack.get( receiverID ) if current is None: sendersBack[ receiverID ] = current = [] if senderkey not in current: current.append(senderkey) except: pass receivers.append(receiver)
Connect receiver to sender for signal receiver -- a callable Python object which is to receive messages/signals/events. Receivers must be hashable objects. if weak is True, then receiver must be weak-referencable (more precisely saferef.safeRef() must be able to create a reference to the receiver). Receivers are fairly flexible in their specification, as the machinery in the robustApply module takes care of most of the details regarding figuring out appropriate subsets of the sent arguments to apply to a given receiver. Note: if receiver is itself a weak reference (a callable), it will be de-referenced by the system's machinery, so *generally* weak references are not suitable as receivers, though some use might be found for the facility whereby a higher-level library passes in pre-weakrefed receiver references. signal -- the signal to which the receiver should respond if Any, receiver will receive any signal from the indicated sender (which might also be Any, but is not necessarily Any). Otherwise must be a hashable Python object other than None (DispatcherError raised on None). sender -- the sender to which the receiver should respond if Any, receiver will receive the indicated signals from any sender. if Anonymous, receiver will only receive indicated signals from send/sendExact which do not specify a sender, or specify Anonymous explicitly as the sender. Otherwise can be any python object. weak -- whether to use weak references to the receiver By default, the module will attempt to use weak references to the receiver objects. If this parameter is false, then strong references will be used. returns None, may raise DispatcherTypeError
def reuse_variables(method): """Wraps an arbitrary method so it does variable sharing. This decorator creates variables the first time it calls `method`, and reuses them for subsequent calls. The object that calls `method` provides a `tf.VariableScope`, either as a `variable_scope` attribute or as the return value of an `_enter_variable_scope()` method. The first time the wrapped method is invoked, it enters the caller's `tf.VariableScope` with `reuse=False`. On all subsequent calls it enters the same variable scope with `reuse=True`. Variables are created in the context of the `tf.VariableScope` provided by the caller object. Ops are created with an additional `tf.name_scope()`, which adds a scope for the wrapped method name. For example: ```python class MyClass(object): def __init__(self, name): with tf.variable_scope(None, default_name=name) as variable_scope: self.variable_scope = variable_scope @snt.reuse_variables def add_x(self, tensor): x = tf.get_variable("x", shape=tensor.get_shape()) return tensor + x module = MyClass("my_module_name") input_tensor = tf.zeros(shape=(5,)) # This creates the variable "my_module_name/x" # and op "my_module_name/add_x/add" output = module.add_x(input_tensor) ``` For performance when executing eagerly it may be desirable to additionally annotate these methods using `defun`, such that they are encapsulated as graph functions. This is not recommended if your method returns a variable since the output of `defun` would be an op that returned the variable's value when evaluated (rather than the variable instance). ```python class FooModule(snt.AbstractModule): def _build(self, inputs): return complex_math(inputs) @tfe.defun @snt.reuse_variables def more_complex_stuff(self, inputs): return more_complex_math(inputs) ``` Args: method: The method to wrap. Returns: The wrapped method. """ initialized_variable_scopes_eager = set() initialized_variable_scopes_graph = weakref.WeakKeyDictionary() # Ensure that the argument passed in is really a method by checking that the # first positional argument to it is "self". arg_spec = inspect.getargspec(method) is_method = arg_spec.args and arg_spec.args[0] == "self" if not is_method: raise TypeError("reuse_variables can only be used with methods.") @wrapt.decorator def eager_test(method, obj, args, kwargs): """Validates runtime state in eager mode.""" # If @reuse_variables is combined with @property, obj is passed in args # and method is still unbound at this stage. if obj is None: obj = args[0] if tf.executing_eagerly() and not hasattr(obj, "_template"): raise ValueError( "reuse_variables is not supported in eager mode except in Sonnet " "modules.") return method(*args, **kwargs) @wrapt.decorator def call_method(method, obj, args, kwargs): """Calls `method` with a variable scope whose reuse flag is set correctly. The first time the wrapper is called it creates a `(tf.Graph, tf.VariableScope)` key and checks it for membership in `initialized_variable_scopes`. The check is `False` if and only if this is the first time the wrapper has been called with the key, otherwise the check is `True`. The result of this check is used as the `reuse` flag for entering the provided variable scope before calling `method`. Here are two examples of how to use the reuse_variables decorator. 1. Decorate an arbitrary instance method with a `variable_scope` attribute: ```python class Reusable(object): def __init__(self, name): with tf.variable_scope(None, default_name=name) as vs: self.variable_scope = vs @snt.reuse_variables def add_a(self, input_tensor): a = tf.get_variable("a", shape=input_tensor.get_shape()) return a + input_tensor obj = Reusable("reusable") x = tf.constant(5.0) out1 = obj.add_a(x) out2 = obj.add_a(x) # out1 == out2 ``` 2. Decorating a snt.AbstractModule instance method: ```python class ReusableModule(snt.AbstractModule): @snt.reuse_variables def add_a(self, input_tensor): a = tf.get_variable("a", shape=input_tensor.get_shape()) return a + input_tensor # We don't need @snt.reuse_variables here because build is wrapped by # `tf.make_template` inside `snt.AbstractModule`. def _build(self, input_tensor): b = tf.get_variable("b", shape=input_tensor.get_shape()) return b + self.add_a(input_tensor) obj = Reusable("reusable") x = tf.constant(5.0) out1 = obj(x) out2 = obj(x) # out1 == out2 ``` Args: method: The method to wrap. obj: The object instance passed to the wrapped method. args: The positional arguments (Tensors) passed to the wrapped method. kwargs: The keyword arguments passed to the wrapped method. Returns: Output of the wrapped method. Raises: ValueError: If no variable scope is provided or if `method` is a method and a variable_scope keyword argument is also provided. """ # If @reuse_variables is combined with @property, obj is passed in args # and method is still unbound at this stage. if obj is None: obj = args[0] def default_context_manager(reuse=None): variable_scope = obj.variable_scope return tf.variable_scope(variable_scope, reuse=reuse) variable_scope_context_manager = getattr(obj, "_enter_variable_scope", default_context_manager) with tf.init_scope(): # We need `init_scope` incase we're running inside a defun. In that case # what we want is information about where the function will be called not # where the function is being built. graph = tf.get_default_graph() will_call_in_eager_context = tf.executing_eagerly() if will_call_in_eager_context: initialized_variable_scopes = initialized_variable_scopes_eager else: if graph not in initialized_variable_scopes_graph: initialized_variable_scopes_graph[graph] = set() initialized_variable_scopes = initialized_variable_scopes_graph[graph] # Temporarily enter the variable scope to capture it with variable_scope_context_manager() as tmp_variable_scope: variable_scope = tmp_variable_scope reuse = variable_scope.name in initialized_variable_scopes # Enter the pure variable scope with reuse correctly set with variable_scope_ops._pure_variable_scope( # pylint:disable=protected-access variable_scope, reuse=reuse) as pure_variable_scope: current_name_scope = tf.get_default_graph().get_name_scope() # Force tf.name_scope to treat current_name_scope as an "absolute" scope # so we can re-enter it. if current_name_scope and current_name_scope[-1] != "/": current_name_scope += "/" with tf.name_scope(current_name_scope): module_name = pure_variable_scope.name method_name = to_snake_case(method.__name__) method_name_scope = "{}/{}".format(module_name, method_name) with tf.name_scope(method_name_scope) as scope: if hasattr(obj, "_capture_variables"): with obj._capture_variables(): # pylint: disable=protected-access out_ops = method(*args, **kwargs) else: out_ops = method(*args, **kwargs) initialized_variable_scopes.add(pure_variable_scope.name) try: # If `obj` is a Sonnet module, let it know it's been connected # to the TF graph. obj._is_connected = True # pylint: disable=protected-access if not tf.executing_eagerly(): obj._add_connected_subgraph( # pylint: disable=protected-access method, out_ops, scope, args, kwargs) except AttributeError: pass return out_ops return eager_test(call_method(method))
Wraps an arbitrary method so it does variable sharing. This decorator creates variables the first time it calls `method`, and reuses them for subsequent calls. The object that calls `method` provides a `tf.VariableScope`, either as a `variable_scope` attribute or as the return value of an `_enter_variable_scope()` method. The first time the wrapped method is invoked, it enters the caller's `tf.VariableScope` with `reuse=False`. On all subsequent calls it enters the same variable scope with `reuse=True`. Variables are created in the context of the `tf.VariableScope` provided by the caller object. Ops are created with an additional `tf.name_scope()`, which adds a scope for the wrapped method name. For example: ```python class MyClass(object): def __init__(self, name): with tf.variable_scope(None, default_name=name) as variable_scope: self.variable_scope = variable_scope @snt.reuse_variables def add_x(self, tensor): x = tf.get_variable("x", shape=tensor.get_shape()) return tensor + x module = MyClass("my_module_name") input_tensor = tf.zeros(shape=(5,)) # This creates the variable "my_module_name/x" # and op "my_module_name/add_x/add" output = module.add_x(input_tensor) ``` For performance when executing eagerly it may be desirable to additionally annotate these methods using `defun`, such that they are encapsulated as graph functions. This is not recommended if your method returns a variable since the output of `defun` would be an op that returned the variable's value when evaluated (rather than the variable instance). ```python class FooModule(snt.AbstractModule): def _build(self, inputs): return complex_math(inputs) @tfe.defun @snt.reuse_variables def more_complex_stuff(self, inputs): return more_complex_math(inputs) ``` Args: method: The method to wrap. Returns: The wrapped method.
def add_callback(self, func): """ Registers a call back function """ if func is None: return func_list = to_list(func) if not hasattr(self, 'callback_list'): self.callback_list = func_list else: self.callback_list.extend(func_list)
Registers a call back function
def _make_stream_reader(cls, stream): """ Return a |StreamReader| instance with wrapping *stream* and having "endian-ness" determined by the 'MM' or 'II' indicator in the TIFF stream header. """ endian = cls._detect_endian(stream) return StreamReader(stream, endian)
Return a |StreamReader| instance with wrapping *stream* and having "endian-ness" determined by the 'MM' or 'II' indicator in the TIFF stream header.
def _process_validation_function_s(validation_func, # type: ValidationFuncs auto_and_wrapper=True # type: bool ): # type: (...) -> Union[Callable, List[Callable]] """ This function handles the various ways that users may enter 'validation functions', so as to output a single callable method. Setting "auto_and_wrapper" to False allows callers to get a list of callables instead. valid8 supports the following expressions for 'validation functions' * <ValidationFunc> * List[<ValidationFunc>(s)]. The list must not be empty. <ValidationFunc> may either be * a callable or a mini-lambda expression (instance of LambdaExpression - in which case it is automatically 'closed'). * a Tuple[callable or mini-lambda expression ; failure_type]. Where failure type should be a subclass of valid8.Failure. In which case the tuple will be replaced with a _failure_raiser(callable, failure_type) When the contents provided does not match the above, this function raises a ValueError. Otherwise it produces a list of callables, that will typically be turned into a `and_` in the nominal case except if this is called inside `or_` or `xor_`. :param validation_func: the base validation function or list of base validation functions to use. A callable, a tuple(callable, help_msg_str), a tuple(callable, failure_type), or a list of several such elements. Nested lists are supported and indicate an implicit `and_`. Tuples indicate an implicit `_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be used instead of callables, they will be transformed to functions automatically. :param auto_and_wrapper: if True (default), this function returns a single callable that is a and_() of all functions. Otherwise a list is returned. :return: """ # handle the case where validation_func is not yet a list or is empty or none if validation_func is None: raise ValueError('mandatory validation_func is None') elif not isinstance(validation_func, list): # so not use list() because we do not want to convert tuples here. validation_func = [validation_func] elif len(validation_func) == 0: raise ValueError('provided validation_func list is empty') # now validation_func is a non-empty list final_list = [] for v in validation_func: # special case of a LambdaExpression: automatically convert to a function # note: we have to do it before anything else (such as .index) otherwise we may get failures v = as_function(v) if isinstance(v, tuple): # convert all the tuples to failure raisers if len(v) == 2: if isinstance(v[1], str): final_list.append(_failure_raiser(v[0], help_msg=v[1])) elif isinstance(v[1], type) and issubclass(v[1], WrappingFailure): final_list.append(_failure_raiser(v[0], failure_type=v[1])) else: raise TypeError('base validation function(s) not compliant with the allowed syntax. Base validation' ' function(s) can be {}. Found [{}].'.format(supported_syntax, str(v))) else: raise TypeError('base validation function(s) not compliant with the allowed syntax. Base validation' ' function(s) can be {}. Found [{}].'.format(supported_syntax, str(v))) elif callable(v): # use the validator directly final_list.append(v) elif isinstance(v, list): # a list is an implicit and_, make it explicit final_list.append(and_(*v)) else: raise TypeError('base validation function(s) not compliant with the allowed syntax. Base validation' ' function(s) can be {}. Found [{}].'.format(supported_syntax, str(v))) # return what is required: if auto_and_wrapper: # a single callable doing the 'and' return and_(*final_list) else: # or the list (typically for use inside or_(), xor_()...) return final_list
This function handles the various ways that users may enter 'validation functions', so as to output a single callable method. Setting "auto_and_wrapper" to False allows callers to get a list of callables instead. valid8 supports the following expressions for 'validation functions' * <ValidationFunc> * List[<ValidationFunc>(s)]. The list must not be empty. <ValidationFunc> may either be * a callable or a mini-lambda expression (instance of LambdaExpression - in which case it is automatically 'closed'). * a Tuple[callable or mini-lambda expression ; failure_type]. Where failure type should be a subclass of valid8.Failure. In which case the tuple will be replaced with a _failure_raiser(callable, failure_type) When the contents provided does not match the above, this function raises a ValueError. Otherwise it produces a list of callables, that will typically be turned into a `and_` in the nominal case except if this is called inside `or_` or `xor_`. :param validation_func: the base validation function or list of base validation functions to use. A callable, a tuple(callable, help_msg_str), a tuple(callable, failure_type), or a list of several such elements. Nested lists are supported and indicate an implicit `and_`. Tuples indicate an implicit `_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be used instead of callables, they will be transformed to functions automatically. :param auto_and_wrapper: if True (default), this function returns a single callable that is a and_() of all functions. Otherwise a list is returned. :return:
def numberOfDistalSegments(self, cells=None): """ Returns the total number of distal segments for these cells. A segment "exists" if its row in the matrix has any permanence values > 0. Parameters: ---------------------------- @param cells (iterable) Indices of the cells """ if cells is None: cells = xrange(self.numberOfCells()) n = 0 for cell in cells: if self.internalDistalPermanences.nNonZerosOnRow(cell) > 0: n += 1 for permanences in self.distalPermanences: if permanences.nNonZerosOnRow(cell) > 0: n += 1 return n
Returns the total number of distal segments for these cells. A segment "exists" if its row in the matrix has any permanence values > 0. Parameters: ---------------------------- @param cells (iterable) Indices of the cells
def post(method, hmc, uri, uri_parms, body, logon_required, wait_for_completion): """Operation: Set CPC Power Capping (any CPC mode).""" assert wait_for_completion is True # async not supported yet cpc_oid = uri_parms[0] try: cpc = hmc.cpcs.lookup_by_oid(cpc_oid) except KeyError: raise InvalidResourceError(method, uri) check_required_fields(method, uri, body, ['power-capping-state']) power_capping_state = body['power-capping-state'] power_cap_current = body.get('power-cap-current', None) if power_capping_state not in ['disabled', 'enabled', 'custom']: raise BadRequestError(method, uri, reason=7, message="Invalid power-capping-state value: " "%r" % power_capping_state) if power_capping_state == 'enabled' and power_cap_current is None: raise BadRequestError(method, uri, reason=7, message="Power-cap-current must be provided " "when enabling power capping") cpc.properties['cpc-power-capping-state'] = power_capping_state cpc.properties['cpc-power-cap-current'] = power_cap_current cpc.properties['zcpc-power-capping-state'] = power_capping_state cpc.properties['zcpc-power-cap-current'] = power_cap_current
Operation: Set CPC Power Capping (any CPC mode).
def handle_event(self, packet): """Handle incoming packet from rflink gateway.""" if packet.get('command'): task = self.send_command_ack(packet['id'], packet['command']) self.loop.create_task(task)
Handle incoming packet from rflink gateway.
def form_sent(request, slug, template="forms/form_sent.html"): """ Show the response message. """ published = Form.objects.published(for_user=request.user) context = {"form": get_object_or_404(published, slug=slug)} return render_to_response(template, context, RequestContext(request))
Show the response message.
def trigger_methods(instance, args): """" Triggers specific class methods using a simple reflection mechanism based on the given input dictionary params. Arguments: instance (object): target instance to dynamically trigger methods. args (iterable): input arguments to trigger objects to Returns: None """ # Start the magic for name in sorted(args): value = args[name] target = instance # If response attibutes if name.startswith('response_') or name.startswith('reply_'): name = name.replace('response_', '').replace('reply_', '') # If instance has response attribute, use it if hasattr(instance, '_response'): target = instance._response # Retrieve class member for inspection and future use member = getattr(target, name, None) # Is attribute isattr = name in dir(target) iscallable = ismethod(member) and not isfunction(member) if not iscallable and not isattr: raise PookInvalidArgument('Unsupported argument: {}'.format(name)) # Set attribute or trigger method if iscallable: member(value) else: setattr(target, name, value)
Triggers specific class methods using a simple reflection mechanism based on the given input dictionary params. Arguments: instance (object): target instance to dynamically trigger methods. args (iterable): input arguments to trigger objects to Returns: None