Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
19,500
def range_(range, no_border, html): from .hand import Range border = not no_border result = Range(range).to_html() if html else Range(range).to_ascii(border) click.echo(result)
Prints the given range in a formatted table either in a plain ASCII or HTML. The only required argument is the range definition, e.g. "A2s+ A5o+ 55+"
19,501
def _compute(self): self.min_ = self._min or 0 self.max_ = self._max or 0 if self.max_ - self.min_ == 0: self.min_ -= 1 self.max_ += 1 self._box.set_polar_box(0, 1, self.min_, self.max_)
Compute y min and max and y scale and set labels
19,502
def deprecated(replacement=None, message=None): def wrap(old): def wrapped(*args, **kwargs): msg = "%s is deprecated" % old.__name__ if replacement is not None: if isinstance(replacement, property): r = replacement.fget elif isinstance(replacement, (classmethod, staticmethod)): r = replacement.__func__ else: r = replacement msg += "; use %s in %s instead." % (r.__name__, r.__module__) if message is not None: msg += "\n" + message warnings.simplefilter() warnings.warn(msg, DeprecationWarning, stacklevel=2) return old(*args, **kwargs) return wrapped return wrap
Decorator to mark classes or functions as deprecated, with a possible replacement. Args: replacement (callable): A replacement class or method. message (str): A warning message to be displayed. Returns: Original function, but with a warning to use the updated class.
19,503
def hget(self, hashkey, attribute): redis_hash = self._get_hash(hashkey, ) return redis_hash.get(self._encode(attribute))
Emulate hget.
19,504
def get_parser(): "Specifies the arguments and defaults, and returns the parser." parser = argparse.ArgumentParser(prog="hiwenet") parser.add_argument("-f", "--in_features_path", action="store", dest="in_features_path", required=True, help="Abs. path to file containing features for a given subject") parser.add_argument("-g", "--groups_path", action="store", dest="groups_path", required=True, help="path to a file containing element-wise membership into groups/nodes/patches.") parser.add_argument("-w", "--weight_method", action="store", dest="weight_method", default=default_weight_method, required=False, help="Method used to estimate the weight between the pair of nodes. Default : {}".format( default_weight_method)) parser.add_argument("-o", "--out_weights_path", action="store", dest="out_weights_path", default=default_out_weights_path, required=False, help="Where to save the extracted weight matrix. If networkx output is returned, it would be saved in GraphML format. Default: nothing saved.") parser.add_argument("-n", "--num_bins", action="store", dest="num_bins", default=default_num_bins, required=False, help="Number of bins used to construct the histogram. Default : {}".format(default_num_bins)) parser.add_argument("-r", "--edge_range", action="store", dest="edge_range", default=default_edge_range, required=False, nargs = 2, help="The range of edges (two finite values) within which to bin the given values e.g. --edge_range 1 6 " "This can be helpful to ensure correspondence across multiple invocations of hiwenet (for different subjects)," " in terms of range across all bins as well as individual bin edges. " "Default : {}, to automatically compute from the given values.".format(default_edge_range)) parser.add_argument("-t", "--trim_outliers", action="store", dest="trim_outliers", default=default_trim_behaviour, required=False, help="Boolean flag indicating whether to trim the extreme/outlying values. Default True.") parser.add_argument("-p", "--trim_percentile", action="store", dest="trim_percentile", default=default_trim_percentile, required=False, help="Small value specifying the percentile of outliers to trim. " "Default: {0}%% , must be in open interval (0, 100).".format(default_trim_percentile)) parser.add_argument("-x", "--return_networkx_graph", action="store", dest="return_networkx_graph", default=default_return_networkx_graph, required=False, help="Boolean flag indicating whether to return a networkx graph populated with weights computed. Default: False") return parser
Specifies the arguments and defaults, and returns the parser.
19,505
def enabled(name, runas=None): ret = {: name, : True, : , : {}} try: plugin_enabled = __salt__[](name, runas=runas) except CommandExecutionError as err: ret[] = False ret[] = .format(err) return ret if plugin_enabled: ret[] = {0}\.format(name) return ret if not __opts__[]: try: __salt__[](name, runas=runas) except CommandExecutionError as err: ret[] = False ret[] = .format(err) return ret ret[].update({: , : name}) if __opts__[] and ret[]: ret[] = None ret[] = {0}\.format(name) return ret ret[] = {0}\.format(name) return ret
Ensure the RabbitMQ plugin is enabled. name The name of the plugin runas The user to run the rabbitmq-plugin command as
19,506
def find_from(path): realpath = os.path.realpath(path) config_path = os.path.join(realpath, ) if os.path.isfile(config_path): return config_path elif realpath == os.path.abspath(): return None else: dirname = os.path.dirname(realpath) return ProjectConfig.find_from(dirname)
Find path of an .ensime config, searching recursively upward from path. Args: path (str): Path of a file or directory from where to start searching. Returns: str: Canonical path of nearest ``.ensime``, or ``None`` if not found.
19,507
def segment_intersection1(start0, end0, start1, end1, s): if NO_IMAGES: return line0 = bezier.Curve.from_nodes(stack1d(start0, end0)) line1 = bezier.Curve.from_nodes(stack1d(start1, end1)) ax = line0.plot(2) line1.plot(256, ax=ax) (x_val,), (y_val,) = line0.evaluate(s) ax.plot([x_val], [y_val], color="black", marker="o") ax.axis("scaled") save_image(ax.figure, "segment_intersection1.png")
Image for :func:`.segment_intersection` docstring.
19,508
def make_tuple(stream, tuple_key, values, roots=None): component_name = stream.component_name stream_id = stream.id gen_task = roots[0].taskid if roots is not None and len(roots) > 0 else None return HeronTuple(id=str(tuple_key), component=component_name, stream=stream_id, task=gen_task, values=values, creation_time=time.time(), roots=roots)
Creates a HeronTuple :param stream: protobuf message ``StreamId`` :param tuple_key: tuple id :param values: a list of values :param roots: a list of protobuf message ``RootId``
19,509
def elasticsearch_matcher(text_log_error): if not settings.ELASTICSEARCH_URL: return [] failure_line = text_log_error.metadata.failure_line if failure_line.action != "test_result" or not failure_line.message: logger.debug("Skipped elasticsearch matching") return filters = [ {: {: failure_line.test}}, {: {: failure_line.status}}, {: {: failure_line.expected}}, {: {: }} ] if failure_line.subtest: query = filters.append({: {: failure_line.subtest}}) query = { : { : { : filters, : [{ : { : failure_line.message[:1024], }, }], }, }, } try: results = search(query) except Exception: logger.error("Elasticsearch lookup failed: %s %s %s %s %s", failure_line.test, failure_line.subtest, failure_line.status, failure_line.expected, failure_line.message) raise if len(results) > 1: args = ( text_log_error.id, failure_line.id, len(results), ) logger.info( % args) newrelic.agent.record_custom_event(, { : len(results), : text_log_error.id, : failure_line.id, }) scorer = MatchScorer(failure_line.message) matches = [(item, item[]) for item in results] best_match = scorer.best_match(matches) if not best_match: return score, es_result = best_match return [(score, es_result[])]
Query Elasticsearch and score the results. Uses a filtered search checking test, status, expected, and the message as a phrase query with non-alphabet tokens removed.
19,510
def _get_dataruns(self): if self._data_runs is None: raise DataStreamError("Resident datastream don't have dataruns") if not self._data_runs_sorted: self._data_runs.sort(key=_itemgetter(0)) self._data_runs_sorted = True return [data[1] for data in self._data_runs]
Returns a list of dataruns, in order.
19,511
def main(argv=None): if argv is None: argv = sys.argv parser = U.OptionParser(version="%prog version: $Id$", usage=usage, description=globals()["__doc__"]) group = U.OptionGroup(parser, "count_tab-specific options") group.add_option("--barcode-separator", dest="bc_sep", type="string", help="separator between read id and UMI " " and (optionally) the cell barcode", default="_") group.add_option("--per-cell", dest="per_cell", action="store_true", help="Readname includes cell barcode as well as UMI in " "format: read[sep]UMI[sep]CB") parser.add_option_group(group) (options, args) = U.Start(parser, argv=argv, add_group_dedup_options=False, add_sam_options=False) nInput, nOutput = 0, 0 if options.per_cell: bc_getter = partial( sam_methods.get_cell_umi_read_string, sep=options.bc_sep) else: bc_getter = partial( sam_methods.get_umi_read_string, sep=options.bc_sep) if options.per_cell: options.stdout.write("%s\t%s\t%s\n" % ("cell", "gene", "count")) else: options.stdout.write("%s\t%s\n" % ("gene", "count")) processor = network.UMIClusterer(options.method) for gene, counts in sam_methods.get_gene_count_tab( options.stdin, bc_getter=bc_getter): for cell in counts.keys(): umis = counts[cell].keys() nInput += sum(counts[cell].values()) groups = processor( counts[cell], threshold=options.threshold) gene_count = len(groups) if options.per_cell: options.stdout.write("%s\t%s\t%i\n" % (cell, gene, gene_count)) else: options.stdout.write("%s\t%i\n" % (gene, gene_count)) nOutput += gene_count U.info("Number of reads counted: %i" % nOutput) U.Stop()
script main. parses command line options in sys.argv, unless *argv* is given.
19,512
def IterateAllClientSnapshots(self, min_last_ping=None, batch_size=50000): all_client_ids = self.ReadAllClientIDs(min_last_ping=min_last_ping) for batch in collection.Batch(all_client_ids, batch_size): res = self.MultiReadClientSnapshot(batch) for snapshot in itervalues(res): if snapshot: yield snapshot
Iterates over all available clients and yields client snapshot objects. Args: min_last_ping: If provided, only snapshots for clients with last-ping timestamps newer than (or equal to) the given value will be returned. batch_size: Always reads <batch_size> snapshots at a time. Yields: An rdfvalues.objects.ClientSnapshot object for each client in the db.
19,513
def _sendline(self, line): logging.info(, self.port) self._lines = [] try: self._read() except socket.error: logging.debug(, self.port) print % line self._write(line + ) time.sleep(0.1)
Send exactly one line to the device Args: line str: data send to device
19,514
def interp_value(mass, age, feh, icol, grid, mass_col, ages, fehs, grid_Ns): Nage = len(ages) Nfeh = len(fehs) ifeh = searchsorted(fehs, Nfeh, feh) iage = searchsorted(ages, Nage, age) pts = np.zeros((8,3)) vals = np.zeros(8) i_f = ifeh - 1 i_a = iage - 1 Nmass = grid_Ns[i_f, i_a] imass = searchsorted(grid[i_f, i_a, :, mass_col], Nmass, mass) pts[0, 0] = grid[i_f, i_a, imass, mass_col] pts[0, 1] = ages[i_a] pts[0, 2] = fehs[i_f] vals[0] = grid[i_f, i_a, imass, icol] pts[1, 0] = grid[i_f, i_a, imass-1, mass_col] pts[1, 1] = ages[i_a] pts[1, 2] = fehs[i_f] vals[1] = grid[i_f, i_a, imass-1, icol] i_f = ifeh - 1 i_a = iage Nmass = grid_Ns[i_f, i_a] imass = searchsorted(grid[i_f, i_a, :, mass_col], Nmass, mass) pts[2, 0] = grid[i_f, i_a, imass, mass_col] pts[2, 1] = ages[i_a] pts[2, 2] = fehs[i_f] vals[2] = grid[i_f, i_a, imass, icol] pts[3, 0] = grid[i_f, i_a, imass-1, mass_col] pts[3, 1] = ages[i_a] pts[3, 2] = fehs[i_f] vals[3] = grid[i_f, i_a, imass-1, icol] i_f = ifeh i_a = iage - 1 Nmass = grid_Ns[i_f, i_a] imass = searchsorted(grid[i_f, i_a, :, mass_col], Nmass, mass) pts[4, 0] = grid[i_f, i_a, imass, mass_col] pts[4, 1] = ages[i_a] pts[4, 2] = fehs[i_f] vals[4] = grid[i_f, i_a, imass, icol] pts[5, 0] = grid[i_f, i_a, imass-1, mass_col] pts[5, 1] = ages[i_a] pts[5, 2] = fehs[i_f] vals[5] = grid[i_f, i_a, imass-1, icol] i_f = ifeh i_a = iage Nmass = grid_Ns[i_f, i_a] imass = searchsorted(grid[i_f, i_a, :, mass_col], Nmass, mass) pts[6, 0] = grid[i_f, i_a, imass, mass_col] pts[6, 1] = ages[i_a] pts[6, 2] = fehs[i_f] vals[6] = grid[i_f, i_a, imass, icol] pts[7, 0] = grid[i_f, i_a, imass-1, mass_col] pts[7, 1] = ages[i_a] pts[7, 2] = fehs[i_f] vals[7] = grid[i_f, i_a, imass-1, icol] return interp_box(mass, age, feh, pts, vals)
mass, age, feh are *single values* at which values are desired icol is the column index of desired value grid is nfeh x nage x max(nmass) x ncols array mass_col is the column index of mass ages is grid of ages fehs is grid of fehs grid_Ns keeps track of nmass in each slice (beyond this are nans)
19,515
def validate_gcs_path(path, require_object): bucket, key = datalab.storage._bucket.parse_name(path) if bucket is None: raise Exception( % path) if require_object and key is None: raise Exception( % path)
Check whether a given path is a valid GCS path. Args: path: the config to check. require_object: if True, the path has to be an object path but not bucket path. Raises: Exception if the path is invalid
19,516
def view_dupl_sources(token, dstore): fields = [, , , , ] dic = group_array(dstore[].value[fields], ) sameid = [] dupl = [] for source_id, group in dic.items(): if len(group) > 1: sources = [] for rec in group: geom = dstore[][rec[]:rec[]] src = Source(source_id, rec[], geom, rec[]) sources.append(src) if all_equal(sources): dupl.append(source_id) sameid.append(source_id) if not dupl: return msg = str(dupl) + msg += ( % (len(sameid), len(dupl))) fakedupl = set(sameid) - set(dupl) if fakedupl: msg += % fakedupl.pop() return msg
Show the sources with the same ID and the truly duplicated sources
19,517
def open( self, fs_url, writeable=True, create=False, cwd=".", default_protocol="osfs", ): if "://" not in fs_url: fs_url = "{}://{}".format(default_protocol, fs_url) parse_result = parse_fs_url(fs_url) protocol = parse_result.protocol open_path = parse_result.path opener = self.get_opener(protocol) open_fs = opener.open_fs(fs_url, parse_result, writeable, create, cwd) return open_fs, open_path
Open a filesystem from a FS URL. Returns a tuple of a filesystem object and a path. If there is no path in the FS URL, the path value will be `None`. Arguments: fs_url (str): A filesystem URL. writeable (bool, optional): `True` if the filesystem must be writeable. create (bool, optional): `True` if the filesystem should be created if it does not exist. cwd (str): The current working directory. Returns: (FS, str): a tuple of ``(<filesystem>, <path from url>)``
19,518
def sepBy1(p, sep): return separated(p, sep, 1, maxt=float(), end=False)
`sepBy1(p, sep)` parses one or more occurrences of `p`, separated by `sep`. Returns a list of values returned by `p`.
19,519
def _save_json_file( self, file, val, pretty=False, compact=True, sort=True, encoder=None ): try: save_json_file(file, val, pretty, compact, sort, encoder) except: self.exception("Failed to save to {}".format(file)) raise IOError("Saving file failed")
Save data to json file :param file: Writable file or path to file :type file: FileIO | str | unicode :param val: Value or struct to save :type val: None | int | float | str | list | dict :param pretty: Format data to be readable (default: False) :type pretty: bool :param compact: Format data to be compact (default: True) :type compact: bool :param sort: Sort keys (default: True) :type sort: bool :param encoder: Use custom json encoder :type encoder: T <= flotils.loadable.DateTimeEncoder :rtype: None :raises IOError: Failed to save
19,520
def _format_text(self, text): text_width = max(self.width - self.current_indent, 11) indent = " "*self.current_indent return textwrap.fill(text, text_width, initial_indent=indent, subsequent_indent=indent)
Format a paragraph of free-form text for inclusion in the help output at the current indentation level.
19,521
def constant_tuples(self): return [constant_tuple for tuple_prior in self.tuple_prior_tuples for constant_tuple in tuple_prior[1].constant_tuples] + self.direct_constant_tuples
Returns ------- constants: [(String, Constant)]
19,522
def energy(self, strand, dotparens, temp=37.0, pseudo=False, material=None, dangles=, sodium=1.0, magnesium=0.0): energy\dnarnarna1999none\some\A dangle energy is incorporated for each unpaired base flanking a duplex\all material = self._set_material(strand, material) cmd_args = self._prep_cmd_args(temp, dangles, material, pseudo, sodium, magnesium, multi=False) lines = [str(strand), dotparens] stdout = self._run(, cmd_args, lines).split() return float(stdout[-2])
Calculate the free energy of a given sequence structure. Runs the \'energy\' command. :param strand: Strand on which to run energy. Strands must be either coral.DNA or coral.RNA). :type strand: coral.DNA or coral.RNA :param dotparens: The structure in dotparens notation. :type dotparens: str :param temp: Temperature setting for the computation. Negative values are not allowed. :type temp: float :param pseudo: Enable pseudoknots. :type pseudo: bool :param material: The material setting to use in the computation. If set to None (the default), the material type is inferred from the strands. Other settings available: 'dna' for DNA parameters, 'rna' for RNA (1995) parameters, and 'rna1999' for the RNA 1999 parameters. :type material: str :param dangles: How to treat dangles in the computation. From the user guide: For \'none\': Dangle energies are ignored. For \'some\': \'A dangle energy is incorporated for each unpaired base flanking a duplex\'. For 'all': all dangle energy is considered. :type dangles: str :param sodium: Sodium concentration in solution (molar), only applies to DNA. :type sodium: float :param magnesium: Magnesium concentration in solution (molar), only applies to DNA> :type magnesium: float :returns: The free energy of the sequence with the specified secondary structure. :rtype: float
19,523
def _finalize_ticks(self, axis, element, xticks, yticks, zticks): yalignments = None if xticks is not None: ticks, labels, yalignments = zip(*sorted(xticks, key=lambda x: x[0])) xticks = (list(ticks), list(labels)) super(BarPlot, self)._finalize_ticks(axis, element, xticks, yticks, zticks) if yalignments: for t, y in zip(axis.get_xticklabels(), yalignments): t.set_y(y)
Apply ticks with appropriate offsets.
19,524
def check_errors(self, response): " Check some common errors." content = response.content if not in content: raise self.GeneralError() if content[] == : response._content = content return if not in content: raise self.GeneralError() try: messages = list(content[]) except: raise self.GeneralError("Messages must be a list.") for msg in messages: if in msg and msg[] == : if msg[] == : raise self.NotFoundError(msg[]) elif msg[] == : raise self.TargetExistsError(msg[]) else: raise self.DynectError(msg[]) raise self.GeneralError("We need at least one error message.")
Check some common errors.
19,525
def applyMassCalMs1(msrunContainer, specfile, dataFit, **kwargs): toleranceMode = kwargs.get(, ) if toleranceMode == : for si in msrunContainer.getItems(specfile, selector=lambda si: si.msLevel==1): mzArr = msrunContainer.saic[specfile][si.id].arrays[] corrArr = dataFit.corrArray(mzArr) mzArr *= (1 + corrArr) elif toleranceMode == : for si in msrunContainer.getItems(specfile, selector=lambda si: si.msLevel==1): mzArr = msrunContainer.saic[specfile][si.id].arrays[] corrArr = dataFit.corrArray(mzArr) mzArr += corrArr else: raise Exception()
Applies a correction function to the MS1 ion m/z arrays in order to correct for a m/z dependent m/z error. :param msrunContainer: intance of :class:`maspy.core.MsrunContainer`, containing the :class:`maspy.core.Sai` items of the "specfile". :param specfile: filename of an ms-run file to which the m/z calibration should be applied :param dataFit: a :class:`maspy.auxiliary.DataFit` object, containing processed calibration data. :param toleranceMode: "relative" or "absolute" Specifies how the ``massTolerance`` value is applied, by default "relative".
19,526
def _determine_scheduled_actions(scheduled_actions, scheduled_actions_from_pillar): tmp = copy.deepcopy( __salt__[](scheduled_actions_from_pillar, {}) ) if scheduled_actions: tmp = dictupdate.update(tmp, scheduled_actions) return tmp
helper method for present, ensure scheduled actions are setup
19,527
def toggle_spot_cfg(self): if self.app.manager.current == : dummyplace = self.screendummyplace self.ids.placetab.remove_widget(dummyplace) dummyplace.clear() if self.app.spotcfg.prefix: dummyplace.prefix = self.app.spotcfg.prefix dummyplace.num = dummynum( self.app.character, dummyplace.prefix ) + 1 dummyplace.paths = self.app.spotcfg.imgpaths self.ids.placetab.add_widget(dummyplace) else: self.app.spotcfg.prefix = self.ids.dummyplace.prefix self.app.spotcfg.toggle()
Show the dialog where you select graphics and a name for a place, or hide it if already showing.
19,528
def as_xml(self): result = ElementTree.Element(self.error_qname) result.append(deepcopy(self.condition)) if self.text: text = ElementTree.SubElement(result, self.text_qname) if self.language: text.set(XML_LANG_QNAME, self.language) text.text = self.text return result
Return the XML error representation. :returntype: :etree:`ElementTree.Element`
19,529
def register(self, lookup: Lookup, encoder: Encoder, decoder: Decoder, label: str=None) -> None: self.register_encoder(lookup, encoder, label=label) self.register_decoder(lookup, decoder, label=label)
Registers the given ``encoder`` and ``decoder`` under the given ``lookup``. A unique string label may be optionally provided that can be used to refer to the registration by name. :param lookup: A type string or type string matcher function (predicate). When the registry is queried with a type string ``query`` to determine which encoder or decoder to use, ``query`` will be checked against every registration in the registry. If a registration was created with a type string for ``lookup``, it will be considered a match if ``lookup == query``. If a registration was created with a matcher function for ``lookup``, it will be considered a match if ``lookup(query) is True``. If more than one registration is found to be a match, then an exception is raised. :param encoder: An encoder callable or class to use if ``lookup`` matches a query. If ``encoder`` is a callable, it must accept a python value and return a ``bytes`` value. If ``encoder`` is a class, it must be a valid subclass of :any:`encoding.BaseEncoder` and must also implement the :any:`from_type_str` method on :any:`base.BaseCoder`. :param decoder: A decoder callable or class to use if ``lookup`` matches a query. If ``decoder`` is a callable, it must accept a stream-like object of bytes and return a python value. If ``decoder`` is a class, it must be a valid subclass of :any:`decoding.BaseDecoder` and must also implement the :any:`from_type_str` method on :any:`base.BaseCoder`. :param label: An optional label that can be used to refer to this registration by name. This label can be used to unregister an entry in the registry via the :any:`unregister` method and its variants.
19,530
def _wait_for_exec_ready(self): while not self.response_received.wait(1) and self.query_timeout != 0: if self.query_timeout != 0 and self.query_timeout < self.get_time(): if self.prev: cmd = self.prev.cmd else: cmd = "???" self.logger.error("CMD timeout: "+ cmd) self.query_timeout = 0 raise TestStepTimeout(self.name + " CMD timeout: " + cmd) self.logger.debug("Waiting for response... " "timeout=%d", self.query_timeout - self.get_time()) self._dut_is_alive() if self.response_coming_in == -1: if self.query_async_response is not None: self.query_async_response.set_response(CliResponse()) self.query_async_response = None self.logger.error("No response received, DUT died") raise TestStepError("No response received, DUT "+self.name+" died") if self.query_async_response is not None: self.query_async_response.set_response(self.response_coming_in) self.query_async_response = None self.query_timeout = 0 return self.response_coming_in
Wait for response. :return: CliResponse object coming in :raises: TestStepTimeout, TestStepError
19,531
def gen(self): data_hash = self.get_hash() return "{prefix}{hash}".format(prefix=self._prefix, hash=data_hash)
Generate stable LogicalIds based on the prefix and given data. This method ensures that the logicalId is deterministic and stable based on input prefix & data object. In other words: logicalId changes *if and only if* either the `prefix` or `data_obj` changes Internally we simply use a SHA1 of the data and append to the prefix to create the logicalId. NOTE: LogicalIDs are how CloudFormation identifies a resource. If this ID changes, CFN will delete and create a new resource. This can be catastrophic for most resources. So it is important to be *always* backwards compatible here. :return: LogicalId that can be used to construct resources :rtype string
19,532
def post_signup(self, user, login_user=None, send_email=None): self.signup_signal.send(self, user=user) if (login_user is None and self.options["login_user_on_signup"]) or login_user: self._login(user, user.signup_provider) to_email = getattr(user, self.options["email_column"], None) if to_email and ((send_email is None and self.options["send_welcome_email"]) or send_email): template = "users/welcome.txt" if self.options["send_welcome_email"] == True else self.options["send_welcome_email"] current_app.features.emails.send(to_email, template, user=user)
Executes post signup actions: sending the signal, logging in the user and sending the welcome email
19,533
def get_objectives(self): if self.retrieved: raise errors.IllegalState() self.retrieved = True return objects.ObjectiveList(self._results, runtime=self._runtime)
Gets the objective list resulting from the search. return: (osid.learning.ObjectiveList) - the objective list raise: IllegalState - list already retrieved *compliance: mandatory -- This method must be implemented.*
19,534
def get_unix_ioctl_terminal_size(): def ioctl_gwinsz(fd): try: import fcntl import termios import struct return struct.unpack(, fcntl.ioctl(fd, termios.TIOCGWINSZ, )) except (IOError, OSError): return None cr = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2) if not cr: try: f = open(os.ctermid()) cr = ioctl_gwinsz(f.fileno()) f.close() except (IOError, OSError): pass if not cr: try: cr = (os.environ[], os.environ[]) except KeyError: return None return int(cr[1]), int(cr[0])
Get the terminal size of a UNIX terminal using the ioctl UNIX command.
19,535
def upload(self): if self.upload_method == "setup": self.upload_by_setup() if self.upload_method == "twine": self.upload_by_twine() if self.upload_method == "gemfury": self.upload_by_gemfury()
upload via the method configured :return:
19,536
def getChangeSets(self): changeset_tag = ("rtc_cm:com.ibm.team.filesystem.workitems." "change_set.com.ibm.team.scm.ChangeSet") return (self.rtc_obj ._get_paged_resources("ChangeSet", workitem_id=self.identifier, customized_attr=changeset_tag, page_size="10"))
Get all the ChangeSets of this workitem :return: a :class:`list` contains all the :class:`rtcclient.models.ChangeSet` objects :rtype: list
19,537
async def get_object(self, Bucket: str, Key: str, **kwargs) -> dict: if self._s3_client is None: await self.setup() return s3_response
S3 GetObject. Takes same args as Boto3 documentation Decrypts any CSE :param Bucket: S3 Bucket :param Key: S3 Key (filepath) :return: returns same response as a normal S3 get_object
19,538
def wrsamp(self, expanded=False, write_dir=): self.wrheader(write_dir=write_dir) if self.n_sig > 0: self.wr_dats(expanded=expanded, write_dir=write_dir)
Write a wfdb header file and any associated dat files from this object. Parameters ---------- expanded : bool, optional Whether to write the expanded signal (e_d_signal) instead of the uniform signal (d_signal). write_dir : str, optional The directory in which to write the files.
19,539
def as_ul(self, show_leaf=True, current_linkable=False, class_current="active_link"): return self.__do_menu("as_ul", show_leaf, current_linkable, class_current)
It returns breadcrumb as ul
19,540
def _fuzzy_custom_query(issn, titles): custom_queries = journal_titles.load(issn).get(, []) titles = [{: i} for i in titles if i not in [x[] for x in custom_queries]] titles.extend(custom_queries) for item in titles: if len(item[].strip()) == 0: continue query = { "fuzzy": { "reference_source_cleaned": { "value": utils.cleanup_string(item[]), "fuzziness": item.get(, 3), "max_expansions": 50 } } } yield query
Este metodo constroi a lista de filtros por título de periódico que será aplicada na pesquisa boleana como match por similaridade "should". A lista de filtros é coletada do template de pesquisa customizada do periódico, quanto este template existir.
19,541
def public_copy(self): d = dict(chain_code=self._chain_code, depth=self._depth, parent_fingerprint=self._parent_fingerprint, child_index=self._child_index, public_pair=self.public_pair()) return self.__class__(**d)
Yield the corresponding public node for this node.
19,542
def get(self, request, bot_id, id, format=None): return super(HookDetail, self).get(request, bot_id, id, format)
Get hook by id --- serializer: HookSerializer responseMessages: - code: 401 message: Not authenticated
19,543
def cmd_show(docid): dsearch = get_docsearch() doc = dsearch.get(docid) r = { : str(type(doc)), : doc.nb_pages, : [l.name for l in doc.labels], : _get_first_line(doc), : [] } for page in doc.pages: nb_lines = 0 nb_words = 0 for line in page.boxes: nb_lines += 1 nb_words += len(line.word_boxes) r[].append({ "n": page.page_nb + 1, "lines": nb_lines, "words": nb_words, }) reply(r)
Arguments: <doc_id> Show document information (but not its content, see 'dump'). See 'search' for the document id. Possible JSON replies: -- { "status": "error", "exception": "yyy", "reason": "xxxx", "args": "(xxxx, )" } -- { "status": "ok", "type": "ImgDoc", "nb_pages": 3, "pages": [ {"n": 1, "lines": 10, "words": 22}, {"n": 2, "lines": 20, "words": 22}, {"n": 3, "lines": 30, "words": 34}, ], "labels": ["aaa", "bbb"], "first_line": "vwklsd wldkwq", }
19,544
def make_witness_input_and_witness(outpoint, sequence, stack=None, **kwargs): if in riemann.get_current_network_name(): return(make_witness_input(outpoint, sequence), make_decred_witness(value=kwargs[], height=kwargs[], index=kwargs[], stack_script=kwargs[], redeem_script=kwargs[])) return (make_witness_input(outpoint, sequence), make_witness(stack))
Outpoint, int, list(bytearray) -> (Input, InputWitness)
19,545
def result(self): return {k: torch.stack(v) for k, v in self.accumulants.items()}
Concatenate accumulated tensors
19,546
def describe_keypairs(self, *keypair_names): keypairs = {} for index, keypair_name in enumerate(keypair_names): keypairs["KeyName.%d" % (index + 1)] = keypair_name query = self.query_factory( action="DescribeKeyPairs", creds=self.creds, endpoint=self.endpoint, other_params=keypairs) d = query.submit() return d.addCallback(self.parser.describe_keypairs)
Returns information about key pairs available.
19,547
def subprogram_prototype(vo): plist = .join(str(p) for p in vo.parameters) if isinstance(vo, VhdlFunction): if len(vo.parameters) > 0: proto = .format(vo.name, plist, vo.return_type) else: proto = .format(vo.name, vo.return_type) else: proto = .format(vo.name, plist) return proto
Generate a canonical prototype string Args: vo (VhdlFunction, VhdlProcedure): Subprogram object Returns: Prototype string.
19,548
def create_table(cls): schema_editor = getattr(connection, , None) if schema_editor: with schema_editor() as schema_editor: schema_editor.create_model(cls) else: raw_sql, _ = connection.creation.sql_create_model( cls, no_style(), []) cls.delete_table() cursor = connection.cursor() try: cursor.execute(*raw_sql) finally: cursor.close()
create_table Manually create a temporary table for model in test data base. :return:
19,549
def find(self, title): files = backend.iterfiles(self._drive, name=title) try: return next(self[id] for id, _ in files) except StopIteration: raise KeyError(title)
Fetch and return the first spreadsheet with the given title. Args: title(str): title/name of the spreadsheet to return Returns: SpreadSheet: new SpreadSheet instance Raises: KeyError: if no spreadsheet with the given ``title`` is found
19,550
def tgread_bool(self): value = self.read_int(signed=False) if value == 0x997275b5: return True elif value == 0xbc799737: return False else: raise RuntimeError(.format(hex(value)))
Reads a Telegram boolean value.
19,551
def delete_queue(queues): current_queues.delete(queues=queues) click.secho( .format( queues or current_queues.queues.keys()), fg= )
Delete the given queues.
19,552
def random_filter(objects, reduction_factor, seed=42): assert 0 < reduction_factor <= 1, reduction_factor rnd = random.Random(seed) out = [] for obj in objects: if rnd.random() <= reduction_factor: out.append(obj) return out
Given a list of objects, returns a sublist by extracting randomly some elements. The reduction factor (< 1) tells how small is the extracted list compared to the original list.
19,553
def pos(self): if self._pos is None: tr = self.visual.get_transform(, ) self._pos = tr.map(self.mouse_event.pos) return self._pos
The position of this event in the local coordinate system of the visual.
19,554
def _get_owner_cover_photo_upload_server(session, group_id, crop_x=0, crop_y=0, crop_x2=795, crop_y2=200): group_id = abs(group_id) response = session.fetch("photos.getOwnerCoverPhotoUploadServer", group_id=group_id, crop_x=crop_x, crop_y=crop_y, crop_x2=crop_x2, crop_y2=crop_y2) return response[]
https://vk.com/dev/photos.getOwnerCoverPhotoUploadServer
19,555
def parse(self, rrstr): if self._initialized: raise pycdlibexception.PyCdlibInternalError() (su_len, su_entry_version_unused, check_byte1, check_byte2, self.bytes_to_skip) = struct.unpack_from(, rrstr[:7], 2) self._initialized = True
Parse a Rock Ridge Sharing Protocol record out of a string. Parameters: rrstr - The string to parse the record out of. Returns: Nothing.
19,556
def _get_aggregated_node_list(self, data): node_list = [] for node in data: local_addresses = [node[]] if in node: local_addresses += node[] node_list.append(local_addresses) return node_list
Returns list of main and secondary mac addresses.
19,557
def color_from_hls(hue, light, sat): if light > 0.95: return -1 else: hue = (-hue + 1 + 2.0/3.0) % 1 return int(floor(hue * 256))
Takes a hls color and converts to proper hue Bulbs use a BGR order instead of RGB
19,558
def delay(self, seconds=0, minutes=0): minutes += int(seconds / 60) seconds = seconds % 60 seconds += float(minutes * 60) self.robot.pause() if not self.robot.is_simulating(): _sleep(seconds) self.robot.resume() return self
Parameters ---------- seconds: float The number of seconds to freeze in place.
19,559
def __request_mark_sent(self, requestId): with self.__requests: try: req = self.__requests[requestId] except KeyError: pass else: req.exception = None req._send_time = monotonic()
Set send time & clear exception from request if set, ignoring non-existent requests
19,560
async def build_proof_req_json(self, cd_id2spec: dict, cache_only: bool = False) -> str: LOGGER.debug(, cd_id2spec, cache_only) cd_id2schema = {} now = int(time()) proof_req = { : str(int(time())), : , : , : {}, : {} } for cd_id in cd_id2spec: interval = None cred_def = json.loads(await self.get_cred_def(cd_id)) seq_no = cred_def_id2seq_no(cd_id) cd_id2schema[cd_id] = json.loads(await self.get_schema(seq_no)) if in cred_def[]: if cache_only and not (cd_id2spec.get(cd_id, {}) or {}).get(, None): with REVO_CACHE.lock: (fro, to) = REVO_CACHE.dflt_interval(cd_id) if not (fro and to): LOGGER.debug( , cd_id) raise AbsentInterval(.format(cd_id)) interval = { : fro, : to } else: fro_to = cd_id2spec[cd_id].get(, (now, now)) if cd_id2spec[cd_id] else (now, now) interval = { : fro_to if isinstance(fro_to, int) else min(fro_to), : fro_to if isinstance(fro_to, int) else max(fro_to) } for attr in (cd_id2spec[cd_id].get(, cd_id2schema[cd_id][]) or [] if cd_id2spec[cd_id] else cd_id2schema[cd_id][]): attr_uuid = .format(seq_no, attr) proof_req[][attr_uuid] = { : attr, : [{ : cd_id }] } if interval: proof_req[][attr_uuid][] = interval for attr in (cd_id2spec[cd_id].get(, {}) or {} if cd_id2spec[cd_id] else {}): pred_uuid = .format(seq_no, attr) try: proof_req[][pred_uuid] = { : attr, : , : int(cd_id2spec[cd_id][][attr]), : [{ : cd_id }] } except ValueError: LOGGER.info( , cd_id2spec[cd_id][][attr], attr) continue if interval: proof_req[][pred_uuid][] = interval rv_json = json.dumps(proof_req) LOGGER.debug(, rv_json) return rv_json
Build and return indy-sdk proof request for input attributes and timestamps by cred def id. Raise AbsentInterval if caller specifies cache_only and default non-revocation intervals, but revocation cache does not have delta frames for any revocation registries on a specified cred def. :param cd_id2spec: dict mapping cred def ids to: - (optionally) 'attrs': lists of names of attributes of interest (omit for all, empty list or None for none) - (optionally) 'minima': (pred) integer lower-bounds of interest (omit, empty list, or None for none) - (optionally), 'interval': (2-tuple) pair of epoch second counts marking 'from' and 'to' timestamps, or single epoch second count to set 'from' and 'to' the same: default (now, now) if cache_only is clear, or latest values from cache if cache_only is set. e.g., :: { 'Vx4E82R17q...:3:CL:16:0': { 'attrs': [ # request attrs 'name' and 'favouriteDrink' from this cred def's schema 'name', 'favouriteDrink' ], 'minima': { # request predicate score>=80 from this cred def 'score': 80 } 'interval': 1528116008 # same instant for all attrs and preds of corresponding schema }, 'R17v42T4pk...:3:CL:19:0': None, # request all attrs, no preds, default intervals on all attrs 'e3vc5K168n...:3:CL:23:0': {}, # request all attrs, no preds, default intervals on all attrs 'Z9ccax812j...:3:CL:27:0': { # request all attrs, no preds, this interval on all attrs 'interval': (1528112408, 1528116008) }, '9cHbp54C8n...:3:CL:37:0': { # request no attrs, one pred, specify interval on pred 'attrs': [], # or equivalently, 'attrs': None 'minima': { 'employees': '50' # nicety: implementation converts to int for caller }, 'interval': (1528029608, 1528116008) }, '6caBcmLi33...:3:CL:41:0': { # all attrs, one pred, default intervals to now on attrs & pred 'minima': { 'regEpoch': 1514782800 } } ... } :param cache_only: (True) take default intervals (per cred def id) from latest cached deltas, or (default False) use current time :return: indy-sdk proof request json
19,561
def result(self, state, action): rows = string_to_list(state) row_e, col_e = find_location(rows, ) row_n, col_n = find_location(rows, action) rows[row_e][col_e], rows[row_n][col_n] = rows[row_n][col_n], rows[row_e][col_e] return list_to_string(rows)
Return the resulting state after moving a piece to the empty space. (the "action" parameter contains the piece to move)
19,562
def _iso_handler(obj): if hasattr(obj, ): result = obj.isoformat() else: raise TypeError("Unserializable object {} of type {}".format(obj, type(obj))) return result
Transforms an object into it's ISO format, if possible. If the object can't be transformed, then an error is raised for the JSON parser. This is meant to be used on datetime instances, but will work with any object having a method called isoformat. :param obj: object to transform into it's ISO format :return: the ISO format of the object
19,563
def get_advances_declines(self, as_json=False): url = self.advances_declines_url req = Request(url, None, self.headers) resp = self.opener.open(req) resp = byte_adaptor(resp) resp_dict = json.load(resp) resp_list = [self.clean_server_response(item) for item in resp_dict[]] return self.render_response(resp_list, as_json)
:return: a list of dictionaries with advance decline data :raises: URLError, HTTPError
19,564
def is_avro(path_or_buffer): if is_str(path_or_buffer): fp = open(path_or_buffer, ) close = True else: fp = path_or_buffer close = False try: header = fp.read(len(MAGIC)) return header == MAGIC finally: if close: fp.close()
Return True if path (or buffer) points to an Avro file. Parameters ---------- path_or_buffer: path to file or file-like object Path to file
19,565
def sendhello(self): try: cli_hello_msg = "<hello>\n" +\ " <capabilities>\n" +\ " <capability>urn:ietf:params:netconf:base:1.0</capability>\n" +\ " </capabilities>\n" +\ "</hello>\n" self._cParams.set(, cli_hello_msg) self._hConn.sendmsg(cli_hello_msg) ser_hello_msg = self._hConn.recvmsg() self._cParams.set(, ser_hello_msg) except: print sys.exit()
end of function exchgcaps
19,566
def field(self, name): if self.fields.has_key(name): return self.fields[name] elif self.extends: if not self.parent: self.parent = self.contract.struct(self.extends) return self.parent.field(name) else: return None
Returns the field on this struct with the given name. Will try to find this name on all ancestors if this struct extends another. If found, returns a dict with keys: 'name', 'comment', 'type', 'is_array' If not found, returns None :Parameters: name string name of field to lookup
19,567
def create_marking_iobject(self, uid=None, timestamp=timezone.now(), metadata_dict=None, id_namespace_uri=DINGOS_DEFAULT_ID_NAMESPACE_URI, iobject_family_name=DINGOS_IOBJECT_FAMILY_NAME, iobject_family_revison_name=DINGOS_REVISION_NAME, iobject_type_name=DINGOS_DEFAULT_IMPORT_MARKING_TYPE_NAME, iobject_type_namespace_uri=DINGOS_NAMESPACE_URI, iobject_type_revision_name=DINGOS_REVISION_NAME, ): if not uid: uid = uuid.uuid1() iobject, created = self.create_iobject(iobject_family_name=iobject_family_name, iobject_family_revision_name=iobject_family_revison_name, iobject_type_name=iobject_type_name, iobject_type_namespace_uri=iobject_type_namespace_uri, iobject_type_revision_name=iobject_type_revision_name, iobject_data=metadata_dict, uid=uid, identifier_ns_uri=id_namespace_uri, timestamp=timestamp, ) return iobject
A specialized version of create_iobject with defaults set such that a default marking object is created.
19,568
def _check_samples_line(klass, arr): if len(arr) <= len(REQUIRE_NO_SAMPLE_HEADER): if tuple(arr) != REQUIRE_NO_SAMPLE_HEADER: raise exceptions.IncorrectVCFFormat( "Sample header line indicates no sample but does not " "equal required prefix {}".format("\t".join(REQUIRE_NO_SAMPLE_HEADER)) ) elif tuple(arr[: len(REQUIRE_SAMPLE_HEADER)]) != REQUIRE_SAMPLE_HEADER: raise exceptions.IncorrectVCFFormat( "start with required prefix {}".format("\t".join(REQUIRE_SAMPLE_HEADER)) )
Peform additional check on samples line
19,569
def user_lookup(self, ids, id_type="user_id"): if id_type not in [, ]: raise RuntimeError("id_type must be user_id or screen_name") if not isinstance(ids, types.GeneratorType): ids = iter(ids) lookup_ids = [] def do_lookup(): ids_str = ",".join(lookup_ids) log.info("looking up users %s", ids_str) url = params = {id_type: ids_str} try: resp = self.get(url, params=params, allow_404=True) except requests.exceptions.HTTPError as e: if e.response.status_code == 404: log.warning("no users matching %s", ids_str) raise e return resp.json() for id in ids: lookup_ids.append(id.strip()) if len(lookup_ids) == 100: for u in do_lookup(): yield u lookup_ids = [] if len(lookup_ids) > 0: for u in do_lookup(): yield u
A generator that returns users for supplied user ids, screen_names, or an iterator of user_ids of either. Use the id_type to indicate which you are supplying (user_id or screen_name)
19,570
def _generate_type_code_query(self, value): mapping_for_value = self.TYPECODE_VALUE_TO_FIELD_AND_VALUE_PAIRS_MAPPING.get(value, None) if mapping_for_value: return generate_match_query(*mapping_for_value, with_operator_and=True) else: return { : { : 1, : [ generate_match_query(, value, with_operator_and=True), generate_match_query(, value, with_operator_and=True), ] } }
Generate type-code queries. Notes: If the value of the type-code query exists in `TYPECODE_VALUE_TO_FIELD_AND_VALUE_PAIRS_MAPPING, then we query the specified field, along with the given value according to the mapping. See: https://github.com/inspirehep/inspire-query-parser/issues/79 Otherwise, we query both ``document_type`` and ``publication_info``.
19,571
def convert_rect(self, rect): return self.container.convert_rect(rect.move(self.pos))
Converts the relative position of @rect into an absolute position.To be used for event considerations, blitting is handled directly by the Container().
19,572
def inspiral_range(psd, snr=8, mass1=1.4, mass2=1.4, fmin=None, fmax=None, horizon=False): mass1 = units.Quantity(mass1, ).to() mass2 = units.Quantity(mass2, ).to() mtotal = mass1 + mass2 fisco = (constants.c ** 3 / (constants.G * 6**1.5 * pi * mtotal)).to() fmax = units.Quantity(fmax or fisco, ) if fmax > fisco: warnings.warn("Upper frequency bound greater than %s-%s ISCO " "frequency of %s, using ISCO" % (mass1, mass2, fisco)) fmax = fisco if fmin is None: fmin = psd.df fmin = units.Quantity(fmin, ) f = psd.frequencies.to() condition = (f >= fmin) & (f < fmax) integrand = inspiral_range_psd(psd[condition], snr=snr, mass1=mass1, mass2=mass2, horizon=horizon) result = units.Quantity( integrate.trapz(integrand.value, f.value[condition]), unit=integrand.unit * units.Hertz) return (result ** (1/2.)).to()
Calculate the inspiral sensitive distance from a GW strain PSD The method returns the distance (in megaparsecs) to which an compact binary inspiral with the given component masses would be detectable given the instrumental PSD. The calculation is as defined in: https://dcc.ligo.org/LIGO-T030276/public Parameters ---------- psd : `~gwpy.frequencyseries.FrequencySeries` the instrumental power-spectral-density data snr : `float`, optional the signal-to-noise ratio for which to calculate range, default: `8` mass1 : `float`, `~astropy.units.Quantity`, optional the mass (`float` assumed in solar masses) of the first binary component, default: `1.4` mass2 : `float`, `~astropy.units.Quantity`, optional the mass (`float` assumed in solar masses) of the second binary component, default: `1.4` fmin : `float`, optional the lower frequency cut-off of the integral, default: `psd.df` fmax : `float`, optional the maximum frequency limit of the integral, defaults to innermost stable circular orbit (ISCO) frequency horizon : `bool`, optional if `True`, return the maximal 'horizon' sensitive distance, otherwise return the angle-averaged range, default: `False` Returns ------- range : `~astropy.units.Quantity` the calculated inspiral range [Mpc] Examples -------- Grab some data for LIGO-Livingston around GW150914 and generate a PSD >>> from gwpy.timeseries import TimeSeries >>> hoft = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478) >>> hoff = hoft.psd(fftlength=4) Now we can calculate the :func:`inspiral_range`: >>> from gwpy.astro import inspiral_range >>> r = inspiral_range(hoff, fmin=30) >>> print(r) 70.4612102889 Mpc
19,573
def do_stack(self,args): parser = CommandArgumentParser("stack") parser.add_argument(dest=,help=); args = vars(parser.parse_args(args)) print "loading stack {}".format(args[]) try: index = int(args[]) stackSummary = self.wrappedStack[][][index] except ValueError: stackSummary = self.wrappedStack[][][args[]] self.stackResource(stackSummary.stack_name,stackSummary.logical_id)
Go to the specified stack. stack -h for detailed help.
19,574
def all_instr(self, start, end, instr, target=None, include_beyond_target=False): code = self.code assert(start >= 0 and end <= len(code)) try: None in instr except: instr = [instr] result = [] extended_arg = 0 for offset in self.op_range(start, end): op = code[offset] if op == self.opc.EXTENDED_ARG: arg = code2num(code, offset+1) | extended_arg extended_arg = extended_arg_val(self.opc, arg) continue if op in instr: if target is None: result.append(offset) else: t = self.get_target(offset, extended_arg) if include_beyond_target and t >= target: result.append(offset) elif t == target: result.append(offset) pass pass pass extended_arg = 0 pass return result
Find all `instr` in the block from start to end. `instr` is any Python opcode or a list of opcodes If `instr` is an opcode with a target (like a jump), a target destination can be specified which must match precisely. Return a list with indexes to them or [] if none found.
19,575
def update_config(self, d): for key, value in d.items(): if hasattr(self, key): if key == "requirements": items, value = value, [] for item in items: if isinstance(item, basestring): req = RequirementConfig(path=item) elif isinstance(item, dict): path, item = item.popitem() req = RequirementConfig( path=path, pin=item.get("pin", None), compile=item.get("compile", False), update=item.get("update", Config.UPDATE_ALL) ) value.append(req) if req.compile: for spec in req.compile.specs: value.append(RequirementConfig(path=spec, pin=False)) elif key == "assignees": if "|" in value: continue if isinstance(value, (int, float)) and not isinstance(value, bool): value = str(value) setattr(self, key, value)
Updates the config object. :param d: dict
19,576
def list_all_zip_codes_geo_zones(cls, **kwargs): kwargs[] = True if kwargs.get(): return cls._list_all_zip_codes_geo_zones_with_http_info(**kwargs) else: (data) = cls._list_all_zip_codes_geo_zones_with_http_info(**kwargs) return data
List ZipCodesGeoZones Return a list of ZipCodesGeoZones This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_zip_codes_geo_zones(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[ZipCodesGeoZone] If the method is called asynchronously, returns the request thread.
19,577
def _register_elements(self, elements): self.elements = elements for key, obj in elements.items(): obj.contribute_to_class(self.metadata, key) fields = {} for key, obj in elements.items(): if obj.editable: field = obj.get_field() if not field.help_text: if key in self.bulk_help_text: field.help_text = self.bulk_help_text[key] fields[key] = field base_meta = type(, (), self.original_meta) class BaseMeta(base_meta): abstract = True app_label = fields[] = BaseMeta fields[] = __name__ self.MetadataBaseModel = type( % self.name, (models.Model,), fields)
Takes elements from the metadata class and creates a base model for all backend models .
19,578
def tlog_inv(y, th=1, r=_display_max, d=_l_mmax): if th <= 0: raise ValueError( % th) x = 10 ** (y * 1. * d / r) try: x[x < th] = th except TypeError: if x < th: x = th return x
Inverse truncated log10 transform. Values Parameters ---------- y : num | num iterable values to be transformed. th : num Inverse values below th are transormed to th. Must be > positive. r : num (default = 10**4) maximal transformed value. d : num (default = log10(2**18)) log10 of maximal possible measured value. tlog_inv(r) = 10**d Returns ------- Array of transformed values.
19,579
def adjustWPPointer(self): self.headingWPText.set_size(self.fontSize) headingRotate = mpl.transforms.Affine2D().rotate_deg_around(0.0,0.0,-self.wpBearing+self.heading)+self.axes.transData self.headingWPText.set_transform(headingRotate) angle = self.wpBearing - self.heading if angle < 0: angle += 360 if (angle > 90) and (angle < 270): headRot = angle-180 else: headRot = angle self.headingWPText.set_rotation(-headRot) self.headingWPTri.set_transform(headingRotate) self.headingWPText.set_text( % (angle))
Adjust the position and orientation of the waypoint pointer.
19,580
def get_summarizer(self, name): if name in self.summarizers: pass elif name == : from . import lexrank self.summarizers[name] = lexrank.summarize elif name == : from . import mcp_summ self.summarizers[name] = mcp_summ.summarize return self.summarizers[name]
import summarizers on-demand
19,581
def _get_object_from_python_path(python_path): python_path = python_path.split() module_path = python_path[:-1] object_class = python_path[-1] if isinstance(module_path, list): module_path = .join(module_path) module = import_module(module_path) schema = getattr(module, object_class) if isclass(schema): schema = schema() return schema
Method that will fetch a Marshmallow schema from a path to it. Args: python_path (str): The string path to the Marshmallow schema. Returns: marshmallow.Schema: The schema matching the provided path. Raises: TypeError: This is raised if the specified object isn't a Marshmallow schema.
19,582
def _Reg2Py(data, size, data_type): if data_type == winreg.REG_DWORD: if size == 0: return 0 return ctypes.cast(data, ctypes.POINTER(ctypes.c_uint32)).contents.value elif data_type == winreg.REG_SZ or data_type == winreg.REG_EXPAND_SZ: return ctypes.wstring_at(data, size // 2).rstrip(u"\x00") elif data_type == winreg.REG_MULTI_SZ: return ctypes.wstring_at(data, size // 2).rstrip(u"\x00").split(u"\x00") else: if size == 0: return None return ctypes.string_at(data, size)
Converts a Windows Registry value to the corresponding Python data type.
19,583
def get_instance(self, payload): return ReservationInstance( self._version, payload, workspace_sid=self._solution[], task_sid=self._solution[], )
Build an instance of ReservationInstance :param dict payload: Payload response from the API :returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance :rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
19,584
def fit(self, X): logger.info() if 0.0 >= self.lam: raise ValueError("Gamma parameter should be positive.") if len(X) <= 1: raise ValueError("There are not enough subjects in the input " "data to train the model.") if X[0].shape[1] < self.features: raise ValueError( "There are not enough timepoints to train the model with " "{0:d} features.".format(self.features)) number_trs = X[0].shape[1] number_subjects = len(X) for subject in range(number_subjects): assert_all_finite(X[subject]) if X[subject].shape[1] != number_trs: raise ValueError("Different number of alignment timepoints " "between subjects.") self.random_state_ = np.random.RandomState(self.rand_seed) self.w_, self.r_, self.s_ = self._rsrm(X) return self
Compute the Robust Shared Response Model Parameters ---------- X : list of 2D arrays, element i has shape=[voxels_i, timepoints] Each element in the list contains the fMRI data of one subject.
19,585
def read_saved_screenshot_to_array(self, screen_id, bitmap_format): if not isinstance(screen_id, baseinteger): raise TypeError("screen_id can only be an instance of type baseinteger") if not isinstance(bitmap_format, BitmapFormat): raise TypeError("bitmap_format can only be an instance of type BitmapFormat") (data, width, height) = self._call("readSavedScreenshotToArray", in_p=[screen_id, bitmap_format]) return (data, width, height)
Screenshot in requested format is retrieved to an array of bytes. in screen_id of type int Saved guest screen to read from. in bitmap_format of type :class:`BitmapFormat` The requested format. out width of type int Image width. out height of type int Image height. return data of type str Array with resulting image data.
19,586
def modulate(data): seconds_per_sample = 1.0 / audiogen.sampler.FRAME_RATE phase, seconds, bits = 0, 0, 0 clock = (x / BAUD_RATE for x in itertools.count(1)) tones = (MARK_HZ if bit else SPACE_HZ for bit in data) for boundary, frequency in itertools.izip(clock, tones): phase_change_per_sample = TWO_PI / (audiogen.sampler.FRAME_RATE / frequency) while seconds < boundary: yield math.sin(phase) seconds += seconds_per_sample phase += phase_change_per_sample if phase > TWO_PI: phase -= TWO_PI bits += 1 logger.debug("bits = %d, time = %.7f ms, expected time = %.7f ms, error = %.7f ms, baud rate = %.6f Hz" \ % (bits, 1000 * seconds, 1000 * bits / BAUD_RATE, 1000 * (seconds - bits / BAUD_RATE), bits / seconds))
Generate Bell 202 AFSK samples for the given symbol generator Consumes raw wire symbols and produces the corresponding AFSK samples.
19,587
def assemble( iterable, patterns=None, minimum_items=2, case_sensitive=True, assume_padded_when_ambiguous=False ): collectionsremainder collection_map = defaultdict(set) collections = [] remainder = [] flags = 0 if not case_sensitive: flags |= re.IGNORECASE compiled_patterns = [] if patterns is not None: if not patterns: return collections, list(iterable) for pattern in patterns: if isinstance(pattern, basestring): compiled_patterns.append(re.compile(pattern, flags=flags)) else: compiled_patterns.append(pattern) else: compiled_patterns.append(re.compile(DIGITS_PATTERN, flags=flags)) for item in iterable: matched = False for pattern in compiled_patterns: for match in pattern.finditer(item): index = match.group() head = item[:match.start()] tail = item[match.end():] if not case_sensitive: head = head.lower() tail = tail.lower() padding = match.group() if padding: padding = len(index) else: padding = 0 key = (head, tail, padding) collection_map[key].add(int(index)) matched = True if not matched: remainder.append(item) merge_candidates = [] for (head, tail, padding), indexes in collection_map.items(): collection = Collection(head, tail, padding, indexes) collections.append(collection) if collection.padding == 0: merge_candidates.append(collection) fully_merged = [] for collection in collections: if collection.padding == 0: continue for candidate in merge_candidates: if ( candidate.head == collection.head and candidate.tail == collection.tail ): merged_index_count = 0 for index in candidate.indexes: if len(str(abs(index))) == collection.padding: collection.indexes.add(index) merged_index_count += 1 if merged_index_count == len(candidate.indexes): fully_merged.append(candidate) collections = [collection for collection in collections if collection not in fully_merged] filtered = [] remainder_candidates = [] for collection in collections: if len(collection.indexes) >= minimum_items: filtered.append(collection) else: for member in collection: remainder_candidates.append(member) for candidate in remainder_candidates: if candidate in remainder: continue has_membership = False for collection in filtered: if candidate in collection: has_membership = True break if not has_membership: remainder.append(candidate) if assume_padded_when_ambiguous: for collection in filtered: if ( not collection.padding and collection.indexes ): indexes = list(collection.indexes) first_index_width = len(str(indexes[0])) last_index_width = len(str(indexes[-1])) if first_index_width == last_index_width: collection.padding = first_index_width return filtered, remainder
Assemble items in *iterable* into discreet collections. *patterns* may be specified as a list of regular expressions to limit the returned collection possibilities. Use this when interested in collections that only match specific patterns. Each pattern must contain the expression from :py:data:`DIGITS_PATTERN` exactly once. A selection of common expressions are available in :py:data:`PATTERNS`. .. note:: If a pattern is supplied as a string it will be automatically compiled to a :py:class:`re.RegexObject` instance for convenience. When *patterns* is not specified, collections are formed by examining all possible groupings of the items in *iterable* based around common numerical components. *minimum_items* dictates the minimum number of items a collection must have in order to be included in the result. The default is 2, filtering out single item collections. If *case_sensitive* is False, then items will be treated as part of the same collection when they only differ in casing. To avoid ambiguity, the resulting collection will always be lowercase. For example, "item.0001.dpx" and "Item.0002.dpx" would be part of the same collection, "item.%04d.dpx". .. note:: Any compiled *patterns* will also respect the set case sensitivity. For certain collections it may be ambiguous whether they are padded or not. For example, 1000-1010 can be considered either an unpadded collection or a four padded collection. By default, Clique is conservative and assumes that the collection is unpadded. To change this behaviour, set *assume_padded_when_ambiguous* to True and any ambiguous collection will have a relevant padding set. .. note:: *assume_padded_when_ambiguous* has no effect on collections that are unambiguous. For example, 1-100 will always be considered unpadded regardless of the *assume_padded_when_ambiguous* setting. Return tuple of two lists (collections, remainder) where 'collections' is a list of assembled :py:class:`~clique.collection.Collection` instances and 'remainder' is a list of items that did not belong to any collection.
19,588
def initialize_segment_register_x64(self, state, concrete_target): _l.debug("Synchronizing gs segment register") state.regs.gs = self._read_gs_register_x64(concrete_target)
Set the gs register in the angr to the value of the fs register in the concrete process :param state: state which will be modified :param concrete_target: concrete target that will be used to read the fs register :return: None
19,589
def get_rlzs_by_gsim(oqparam): cinfo = source.CompositionInfo.fake(get_gsim_lt(oqparam)) ra = cinfo.get_rlzs_assoc() dic = {} for rlzi, gsim_by_trt in enumerate(ra.gsim_by_trt): dic[gsim_by_trt[]] = [rlzi] return dic
Return an ordered dictionary gsim -> [realization index]. Work for gsim logic trees with a single tectonic region type.
19,590
def create(self, date_at=None, minutes=0, note=, user_id=None, project_id=None, service_id=None): keywords = { : date_at, : minutes, : note, : user_id, : project_id, : service_id, } foo = dict() foo[] = keywords path = partial(_path, self.adapter) path = _path(self.adapter) return self._post(path, **foo)
date_at - date of time entry. Format YYYY-MM-DD. default: today minutes - default: 0 note - default: '' (empty string) user_id - default: actual user id (only admin users can edit this) project_id - default: None service_id - default: None
19,591
def resource_get(self, resource_name): try: with self._resource_lock: res = self._resources[resource_name] except KeyError: return {} return res
Return resource info :param resource_name: Resource name as returned by resource_get_list() :type resource_name: str :return: Resource information (empty if not found) name: Resource name hash: Resource hash path: Path to resource checked: Last time information was updated :rtype: dict[str, str]
19,592
def lambda_handler(event, context): auth = check_auth(event, role=["admin"]) if not auth[]: return auth table = boto3.resource("dynamodb").Table(os.environ[]) results = table.scan() output = {: True, : list(), : 0} for item in results.get(, list()): output[].append(item) output[] = len(output[]) return output
Main handler.
19,593
def user_segment(self): if self.api and self.user_segment_id: return self.api._get_user_segment(self.user_segment_id)
| Comment: The id of the user segment to which this section belongs
19,594
def parse_sv_frequencies(variant): frequency_keys = [ , , , , , , , , , ] sv_frequencies = {} for key in frequency_keys: value = variant.INFO.get(key, 0) if in key: value = float(value) else: value = int(value) if value > 0: sv_frequencies[key] = value return sv_frequencies
Parsing of some custom sv frequencies These are very specific at the moment, this will hopefully get better over time when the field of structural variants is more developed. Args: variant(cyvcf2.Variant) Returns: sv_frequencies(dict)
19,595
def iter_issue_events(self, number=-1, etag=None): url = self._build_url(, , base_url=self._api) return self._iter(int(number), url, IssueEvent, etag=etag)
Iterates over issue events on this repository. :param int number: (optional), number of events to return. Default: -1 returns all available events :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`IssueEvent <github3.issues.event.IssueEvent>`\ s
19,596
def usage(ecode, msg=): print >> sys.stderr, __doc__ if msg: print >> sys.stderr, msg sys.exit(ecode)
Print usage and msg and exit with given code.
19,597
def set_coords(self, names, inplace=None): inplace = _check_inplace(inplace) if isinstance(names, str): names = [names] self._assert_all_in_dataset(names) obj = self if inplace else self.copy() obj._coord_names.update(names) return obj
Given names of one or more variables, set them as coordinates Parameters ---------- names : str or list of str Name(s) of variables in this dataset to convert into coordinates. inplace : bool, optional If True, modify this dataset inplace. Otherwise, create a new object. Returns ------- Dataset See also -------- Dataset.swap_dims
19,598
def add_consumer_tag(self, tag): if not is_string(tag): raise AMQPChannelError() if tag not in self._consumer_tags: self._consumer_tags.append(tag)
Add a Consumer tag. :param str tag: Consumer tag. :return:
19,599
def file_or_filename(input): if isinstance(input, string_types): yield smart_open(input) else: input.seek(0) yield input
Return a file-like object ready to be read from the beginning. `input` is either a filename (gz/bz2 also supported) or a file-like object supporting seek.