Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
383,700
def main(argv=None): try: colorama.init() if argv is None: argv = sys.argv[1:] _main(argv) except RuntimeError as e: print(colorama.Fore.RED + + str(e) + colorama.Style.RESET_ALL) sys.exit(1) else: sys.exit(0)
Main entry point when the user runs the `trytravis` command.
383,701
def num_rings(self): num = self._libinput.libinput_device_tablet_pad_get_num_rings( self._handle) if num < 0: raise AttributeError() return num
The number of rings a device with the :attr:`~libinput.constant.DeviceCapability.TABLET_PAD` capability provides. Returns: int: The number of rings or 0 if the device has no rings. Raises: AttributeError
383,702
def open_interpreter(self, fnames): for path in sorted(fnames): self.sig_open_interpreter.emit(path)
Open interpreter
383,703
def write_early_data(self, data: bytes) -> int: if self._is_handshake_completed: raise IOError() self._ssl.write_early_data(data) final_length = self._flush_ssl_engine() return final_length
Returns the number of (encrypted) bytes sent.
383,704
def getsize(store, path=None): path = normalize_storage_path(path) if hasattr(store, ): return store.getsize(path) elif isinstance(store, dict): if path in store: v = store[path] size = buffer_size(v) else: members = listdir(store, path) prefix = _path_to_prefix(path) size = 0 for k in members: try: v = store[prefix + k] except KeyError: pass else: try: size += buffer_size(v) except TypeError: return -1 return size else: return -1
Compute size of stored items for a given path. If `store` provides a `getsize` method, this will be called, otherwise will return -1.
383,705
def param(f): s abc attribute being set to the value of type(imm).abc(x). Params may not accept variable, variadic keyword, or default argumentsParameter transformation functions must take exactly one argumentis_paramname'] = f.__name__ f = staticmethod(f) return f
The @param decorator, usable in an immutable class (see immutable), specifies that the following function is actually a transformation on an input parameter; the parameter is required, and is set to the value returned by the function decorated by the parameter; i.e., if you decorate the function abc with @param, then imm.abc = x will result in imm's abc attribute being set to the value of type(imm).abc(x).
383,706
def submit_sample(self, filepath, filename, tags=[]): apiurl = params = {: base64.b64encode(filename.encode()), : self.reanalyze} if tags: params[] = .join(tags) if os.path.isfile(filepath): res = self.session.post(url=self.url + apiurl, files=[(, open(filepath, mode=))], params=params) if res.status_code == 200: return json.loads(res.text) else: raise BadResponseError( .format(res.status_code, res.text)) else: raise SampleFileNotFoundError()
Uploads a new sample to VMRay api. Filename gets sent base64 encoded. :param filepath: path to sample :type filepath: str :param filename: filename of the original file :type filename: str :param tags: List of tags to apply to the sample :type tags: list(str) :returns: Dictionary of results :rtype: dict
383,707
def contains_key(self, key): check_not_none(key, "key can't be None") key_data = self._to_data(key) return self._encode_invoke_on_key(multi_map_contains_key_codec, key_data, key=key_data, thread_id=thread_id())
Determines whether this multimap contains an entry with the key. **Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations of __hash__ and __eq__ defined in key's class.** :param key: (object), the specified key. :return: (bool), ``true`` if this multimap contains an entry for the specified key.
383,708
def chop(self, bits=1): s = len(self) if s % bits != 0: raise ValueError("expression length (%d) should be a multiple of (%d)" % (len(self), bits)) elif s == bits: return [ self ] else: return list(reversed([ self[(n+1)*bits - 1:n*bits] for n in range(0, s // bits) ]))
Chops a BV into consecutive sub-slices. Obviously, the length of this BV must be a multiple of bits. :returns: A list of smaller bitvectors, each ``bits`` in length. The first one will be the left-most (i.e. most significant) bits.
383,709
def hook_point(self, hook_name): self.my_daemon.hook_point(hook_name=hook_name, handle=self)
Generic function to call modules methods if such method is avalaible :param hook_name: function name to call :type hook_name: str :return:None
383,710
def to_date(value, default=None): if isinstance(value, DateTime): return value if not value: if default is None: return None return to_date(default) try: if isinstance(value, str) and in value: return DateTime(value, datefmt=) return DateTime(value) except (TypeError, ValueError, DateTimeError): return to_date(default)
Tries to convert the passed in value to Zope's DateTime :param value: The value to be converted to a valid DateTime :type value: str, DateTime or datetime :return: The DateTime representation of the value passed in or default
383,711
def _raw_sql(self, values): if isinstance(self.model._meta.pk, CharField): when_clauses = " ".join( [self._when("".format(x), y) for (x, y) in values] ) else: when_clauses = " ".join([self._when(x, y) for (x, y) in values]) table_name = self.model._meta.db_table primary_key = self.model._meta.pk.column return .format( table_name, primary_key, when_clauses )
Prepare SQL statement consisting of a sequence of WHEN .. THEN statements.
383,712
def build_from_info(cls, info): info = deepcopy(info) if in info: cls_ = TERMS[info.pop()] if issubclass(cls_, MetaTermMixin): return cls_.build_from_info(info) else: cls_ = cls return cls_(**info)
build a Term instance from a dict Parameters ---------- cls : class info : dict contains all information needed to build the term Return ------ Term instance
383,713
def _compute_soil_linear_factor(cls, pga_rock, imt): if imt.period >= 1: return np.ones_like(pga_rock) else: sl = np.zeros_like(pga_rock) pga_between_100_500 = (pga_rock > 100) & (pga_rock < 500) pga_greater_equal_500 = pga_rock >= 500 is_SA_between_05_1 = 0.5 < imt.period < 1 is_SA_less_equal_05 = imt.period <= 0.5 if is_SA_between_05_1: sl[pga_between_100_500] = (1 - (1. / imt.period - 1) * (pga_rock[pga_between_100_500] - 100) / 400) sl[pga_greater_equal_500] = 1 - (1. / imt.period - 1) if is_SA_less_equal_05 or imt.period == 0: sl[pga_between_100_500] = (1 - (pga_rock[pga_between_100_500] - 100) / 400) sl[pga_rock <= 100] = 1 return sl
Compute soil linear factor as explained in paragraph 'Functional Form', page 1706.
383,714
def find_root( self, rows ): maxes = sorted( rows.values(), key = lambda x: x.cumulative ) if not maxes: raise RuntimeError( ) root = maxes[-1] roots = [root] for key,value in rows.items(): if not value.parents: log.debug( , value ) if value not in roots: roots.append( value ) if len(roots) > 1: root = PStatGroup( directory=, filename=, name=_("<profiling run>"), children= roots, ) root.finalize() self.rows[ root.key ] = root self.roots[] = root return root
Attempt to find/create a reasonable root node from list/set of rows rows -- key: PStatRow mapping TODO: still need more robustness here, particularly in the case of threaded programs. Should be tracing back each row to root, breaking cycles by sorting on cumulative time, and then collecting the traced roots (or, if they are all on the same root, use that).
383,715
def genestats(args): p = OptionParser(genestats.__doc__) p.add_option("--groupby", default="conf_class", help="Print separate stats groupby") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gff_file, = args gb = opts.groupby g = make_index(gff_file) tf = "transcript.sizes" if need_update(gff_file, tf): fw = open(tf, "w") for feat in g.features_of_type("mRNA"): fid = feat.id conf_class = feat.attributes.get(gb, "all") tsize = sum((c.stop - c.start + 1) for c in g.children(fid, 1) \ if c.featuretype == "exon") print("\t".join((fid, str(tsize), conf_class)), file=fw) fw.close() tsizes = DictFile(tf, cast=int) conf_classes = DictFile(tf, valuepos=2) logging.debug("A total of {0} transcripts populated.".format(len(tsizes))) genes = [] for feat in g.features_of_type("gene"): fid = feat.id transcripts = [c.id for c in g.children(fid, 1) \ if c.featuretype == "mRNA"] transcript_sizes = [tsizes[x] for x in transcripts] exons = set((c.chrom, c.start, c.stop) for c in g.children(fid, 2) \ if c.featuretype == "exon") conf_class = conf_classes[transcripts[0]] gs = GeneStats(feat, conf_class, transcript_sizes, exons) genes.append(gs) r = {} distinct_groups = set(conf_classes.values()) for g in distinct_groups: num_genes = num_single_exon_genes = num_multi_exon_genes = 0 num_genes_with_alts = num_transcripts = num_exons = max_transcripts = 0 cum_locus_size = cum_transcript_size = cum_exon_size = 0 for gs in genes: if gs.conf_class != g: continue num_genes += 1 if gs.num_exons == 1: num_single_exon_genes += 1 else: num_multi_exon_genes += 1 num_exons += gs.num_exons if gs.num_transcripts > 1: num_genes_with_alts += 1 if gs.num_transcripts > max_transcripts: max_transcripts = gs.num_transcripts num_transcripts += gs.num_transcripts cum_locus_size += gs.locus_size cum_transcript_size += gs.cum_transcript_size cum_exon_size += gs.cum_exon_size mean_num_exons = num_exons * 1. / num_genes mean_num_transcripts = num_transcripts * 1. / num_genes mean_locus_size = cum_locus_size * 1. / num_genes mean_transcript_size = cum_transcript_size * 1. / num_transcripts mean_exon_size = cum_exon_size * 1. / num_exons r[("Number of genes", g)] = num_genes r[("Number of single-exon genes", g)] = \ percentage(num_single_exon_genes, num_genes, mode=1) r[("Number of multi-exon genes", g)] = \ percentage(num_multi_exon_genes, num_genes, mode=1) r[("Number of distinct exons", g)] = num_exons r[("Number of genes with alternative transcript variants", g)] = \ percentage(num_genes_with_alts, num_genes, mode=1) r[("Number of predicted transcripts", g)] = num_transcripts r[("Mean number of distinct exons per gene", g)] = mean_num_exons r[("Mean number of transcripts per gene", g)] = mean_num_transcripts r[("Max number of transcripts per gene", g)] = max_transcripts r[("Mean gene locus size (first to last exon)", g)] = mean_locus_size r[("Mean transcript size (UTR, CDS)", g)] = mean_transcript_size r[("Mean exon size", g)] = mean_exon_size fw = must_open(opts.outfile, "w") print(tabulate(r), file=fw) fw.close()
%prog genestats gffile Print summary stats, including: - Number of genes - Number of single-exon genes - Number of multi-exon genes - Number of distinct exons - Number of genes with alternative transcript variants - Number of predicted transcripts - Mean number of distinct exons per gene - Mean number of transcripts per gene - Mean gene locus size (first to last exon) - Mean transcript size (UTR, CDS) - Mean exon size Stats modeled after barley genome paper Table 1. A physical, genetic and functional sequence assembly of the barley genome
383,716
def check_perplexities(self, perplexities): usable_perplexities = [] for perplexity in sorted(perplexities): if 3 * perplexity > self.n_samples - 1: new_perplexity = (self.n_samples - 1) / 3 if new_perplexity in usable_perplexities: log.warning( "Perplexity value %d is too high. Dropping " "because the max perplexity is already in the " "list." % perplexity ) else: usable_perplexities.append(new_perplexity) log.warning( "Perplexity value %d is too high. Using " "perplexity %.2f instead" % (perplexity, new_perplexity) ) else: usable_perplexities.append(perplexity) return usable_perplexities
Check and correct/truncate perplexities. If a perplexity is too large, it is corrected to the largest allowed value. It is then inserted into the list of perplexities only if that value doesn't already exist in the list.
383,717
def train(self, x = None, y = None, training_frame = None, fold_column = None, weights_column = None, validation_frame = None, leaderboard_frame = None, blending_frame = None): ncols = training_frame.ncols names = training_frame.names if self.project_name is None: self.project_name = "automl_" + training_frame.frame_id self.build_control["project_name"] = self.project_name if y is None: raise ValueError() else: assert_is_type(y,int,str) if is_type(y, int): if not (-ncols <= y < ncols): raise H2OValueError("Column %d does not exist in the training frame" % y) y = names[y] else: if y not in names: raise H2OValueError("Column %s does not exist in the training frame" % y) input_spec = { : y, } if training_frame is None: raise ValueError() else: assert_is_type(training_frame, H2OFrame) input_spec[] = training_frame.frame_id if fold_column is not None: assert_is_type(fold_column,int,str) input_spec[] = fold_column if weights_column is not None: assert_is_type(weights_column,int,str) input_spec[] = weights_column if validation_frame is not None: assert_is_type(validation_frame, H2OFrame) input_spec[] = validation_frame.frame_id if leaderboard_frame is not None: assert_is_type(leaderboard_frame, H2OFrame) input_spec[] = leaderboard_frame.frame_id if blending_frame is not None: assert_is_type(blending_frame, H2OFrame) input_spec[] = blending_frame.frame_id if self.sort_metric is not None: assert_is_type(self.sort_metric, str) sort_metric = self.sort_metric.lower() self._job.poll() self._fetch()
Begins an AutoML task, a background task that automatically builds a number of models with various algorithms and tracks their performance in a leaderboard. At any point in the process you may use H2O's performance or prediction functions on the resulting models. :param x: A list of column names or indices indicating the predictor columns. :param y: An index or a column name indicating the response column. :param fold_column: The name or index of the column in training_frame that holds per-row fold assignments. :param weights_column: The name or index of the column in training_frame that holds per-row weights. :param training_frame: The H2OFrame having the columns indicated by x and y (as well as any additional columns specified by fold_column or weights_column). :param validation_frame: H2OFrame with validation data. This argument is ignored unless the user sets nfolds = 0. If cross-validation is turned off, then a validation frame can be specified and used for early stopping of individual models and early stopping of the grid searches. By default and when nfolds > 1, cross-validation metrics will be used for early stopping and thus validation_frame will be ignored. :param leaderboard_frame: H2OFrame with test data for scoring the leaderboard. This is optional and if this is set to None (the default), then cross-validation metrics will be used to generate the leaderboard rankings instead. :param blending_frame: H2OFrame used to train the the metalearning algorithm in Stacked Ensembles (instead of relying on cross-validated predicted values). This is optional, but when provided, it is also recommended to disable cross validation by setting `nfolds=0` and to provide a leaderboard frame for scoring purposes. :returns: An H2OAutoML object. :examples: >>> # Set up an H2OAutoML object >>> aml = H2OAutoML(max_runtime_secs=30) >>> # Launch an AutoML run >>> aml.train(y=y, training_frame=train)
383,718
def start_new_log(self): filename = self.new_log_filepath() self.block_cnt = 0 self.logfile = open(filename, ) print("DFLogger: logging started (%s)" % (filename)) self.prev_cnt = 0 self.download = 0 self.prev_download = 0 self.last_idle_status_printed_time = time.time() self.last_status_time = time.time() self.missing_blocks = {} self.acking_blocks = {} self.blocks_to_ack_and_nack = [] self.missing_found = 0 self.abandoned = 0
open a new dataflash log, reset state
383,719
def readline(self): line = self.file.readline() if self.grammar and line: try: return self.grammar.parseString(line).asDict() except ParseException: return self.readline() else: return line
Reads (and optionally parses) a single line.
383,720
def to_netflux(flux): r if issparse(flux): return sparse.tpt.to_netflux(flux) elif isdense(flux): return dense.tpt.to_netflux(flux) else: raise _type_not_supported
r"""Compute the netflux from the gross flux. Parameters ---------- flux : (M, M) ndarray Matrix of flux values between pairs of states. Returns ------- netflux : (M, M) ndarray Matrix of netflux values between pairs of states. Notes ----- The netflux or effective current is defined as .. math:: f_{ij}^{+}=\max \{ f_{ij}-f_{ji}, 0 \} :math:`f_{ij}` is the flux for the transition from :math:`A` to :math:`B`. References ---------- .. [1] P. Metzner, C. Schuette and E. Vanden-Eijnden. Transition Path Theory for Markov Jump Processes. Multiscale Model Simul 7: 1192-1219 (2009)
383,721
def pick(self): v = random.uniform(0, self.ub) d = self.dist c = self.vc - 1 s = self.vc while True: s = s / 2 if s == 0: break if v <= d[c][1]: c -= s else: c += s while len(d) <= c: s = s / 2 c -= s if s == 0: break if c == len(d) or v <= d[c][1]: c -= 1 return d[c][0]
picks a value accoriding to the given density
383,722
def find_signature_input_colocation_error(signature_name, inputs): for input_name, tensor in inputs.items(): expected_colocation_groups = [tf.compat.as_bytes("loc:@" + tensor.op.name)] if tensor.op.colocation_groups() != expected_colocation_groups: return ( "A tensor x used as input in a signature must not be subject to a " "tf.colocate_with(y) constraint. (The reverse would be allowed.)\n" "Details: tensor appears as input of signature " "but has Tensor.op.colocation_groups() == %s" % (tensor, input_name, signature_name, tensor.op.colocation_groups())) return None
Returns error message for colocation of signature inputs, or None if ok.
383,723
def correct_dmdt(d, dmind, dtind, blrange): data = numpyview(data_mem, , datashape(d)) data_resamp = numpyview(data_resamp_mem, , datashape(d)) bl0,bl1 = blrange data_resamp[:, bl0:bl1] = data[:, bl0:bl1] rtlib.dedisperse_resample(data_resamp, d[], d[], d[][dmind], d[][dtind], blrange, verbose=0)
Dedisperses and resamples data *in place*. Drops edges, since it assumes that data is read with overlapping chunks in time.
383,724
def process_edge_dijkstra(self, current, neighbor, pred, q, component): Dijkstras algorithm. User does not need to call this method directly. Input: current: Name of the current node. neighbor: Name of the neighbor node. pred: Predecessor tree. q: Data structure that holds nodes to be processed in a queue. component: component number. Post: attribute of nodes and edges may change. colorredlabelcolorblackcostcolorredlabelcolorblack')
API: process_edge_dijkstra(self, current, neighbor, pred, q, component) Description: Used by search() method if the algo argument is 'Dijkstra'. Processes edges along Dijkstra's algorithm. User does not need to call this method directly. Input: current: Name of the current node. neighbor: Name of the neighbor node. pred: Predecessor tree. q: Data structure that holds nodes to be processed in a queue. component: component number. Post: 'color' attribute of nodes and edges may change.
383,725
def generateExecutable(self, outpath=, signed=False): if not (self.runtime() or self.specfile()): return True if not self.distributionPath(): return True if os.path.exists(self.distributionPath()): shutil.rmtree(self.distributionPath()) if os.path.isfile(self.sourcePath()): basepath = os.path.normpath(os.path.dirname(self.sourcePath())) else: basepath = os.path.normpath(self.sourcePath()) self.generatePlugins(basepath) specfile = self.specfile() opts = { : self.name(), : self.executableName(), : self.productName(), : self.runtime(), : self.sourcePath(), : self.buildPath(), : .join(wrap_str(self.hookPaths())), : .join(wrap_str(self.hiddenImports())), : self.distributionPath(), : sys.platform, : .join(wrap_str(self.executableExcludes())) } if not specfile: datasets = [] for typ, data in self.executableData(): if typ == : args = { : data[0], : data[1], : .join(wrap_str(data[2])) } datasets.append(templ.SPECTREE.format(**args)) else: args = {} args.update(data) args.setdefault(, typ) datasets.append(templ.SPECDATA.format(**args)) opts[] = .join(datasets) opts.update(self._executableOptions) if self.executableCliName(): opts[] = self.executableCliName() opts[] = templ.SPECFILE_CLI.format(**opts) else: opts[] = templ.SPECFILE_COLLECT.format(**opts) if opts[]: data = templ.SPECFILE_ONEFILE.format(**opts) else: data = templ.SPECFILE.format(**opts) specfile = os.path.join(self.buildPath(), self.name() + ) f = open(specfile, ) f.write(data) f.close() cmd = os.path.expandvars(self.executableOption()) success = cmdexec(cmd.format(spec=specfile)) == 0 if signed: binfile = os.path.join(opts[], opts[], opts[] + ) self.sign(binfile) return success
Generates the executable for this builder in the output path. :param outpath | <str>
383,726
def set_background_corpus(self, background): if issubclass(type(background), TermDocMatrixWithoutCategories): self._background_corpus = pd.DataFrame(background .get_term_freq_df() .sum(axis=1), columns=[]).reset_index() self._background_corpus.columns = [, ] elif (type(background) == pd.DataFrame and set(background.columns) == set([, ])): self._background_corpus = background else: raise Exception( \ + \ + )
Parameters ---------- background
383,727
def get_generator(tweet): if is_original_format(tweet): if sys.version_info[0] == 3 and sys.version_info[1] >= 4: parser = GeneratorHTMLParser(convert_charrefs=True) else: parser = GeneratorHTMLParser() parser.feed(tweet["source"]) return {"link": parser.generator_link, "name": parser.generator_name} else: return {"link": tweet["generator"]["link"], "name": tweet["generator"]["displayName"]}
Get information about the application that generated the Tweet Args: tweet (Tweet): A Tweet object (or a dictionary) Returns: dict: keys are 'link' and 'name', the web link and the name of the application Example: >>> from tweet_parser.getter_methods.tweet_generator import get_generator >>> original_format_dict = { ... "created_at": "Wed May 24 20:17:19 +0000 2017", ... "source": '<a href="http://twitter.com" rel="nofollow">Twitter Web Client</a>' ... } >>> get_generator(original_format_dict) {'link': 'http://twitter.com', 'name': 'Twitter Web Client'} >>> activity_streams_format_dict = { ... "postedTime": "2017-05-24T20:17:19.000Z", ... "generator": ... {"link": "http://twitter.com", ... "displayName": "Twitter Web Client"} ... } >>> get_generator(activity_streams_format_dict) {'link': 'http://twitter.com', 'name': 'Twitter Web Client'}
383,728
def _leapfrog_integrator_one_step( target_log_prob_fn, independent_chain_ndims, step_sizes, current_momentum_parts, current_state_parts, current_target_log_prob, current_target_log_prob_grad_parts, state_gradients_are_stopped=False, name=None): current_state_parts, proposed_state_parts, proposed_target_log_prob_grad_parts)) proposed_momentum_parts = [ v + 0.5 * tf.cast(eps, v.dtype) * g for v, eps, g in zip(proposed_momentum_parts, step_sizes, proposed_target_log_prob_grad_parts)] return [ proposed_momentum_parts, proposed_state_parts, proposed_target_log_prob, proposed_target_log_prob_grad_parts, ]
Applies `num_leapfrog_steps` of the leapfrog integrator. Assumes a simple quadratic kinetic energy function: `0.5 ||momentum||**2`. #### Examples: ##### Simple quadratic potential. ```python import matplotlib.pyplot as plt %matplotlib inline import numpy as np import tensorflow as tf from tensorflow_probability.python.mcmc.hmc import _leapfrog_integrator_one_step # pylint: disable=line-too-long tfd = tfp.distributions dims = 10 num_iter = int(1e3) dtype = np.float32 position = tf.placeholder(np.float32) momentum = tf.placeholder(np.float32) target_log_prob_fn = tfd.MultivariateNormalDiag( loc=tf.zeros(dims, dtype)).log_prob def _leapfrog_one_step(*args): # Closure representing computation done during each leapfrog step. return _leapfrog_integrator_one_step( target_log_prob_fn=target_log_prob_fn, independent_chain_ndims=0, step_sizes=[0.1], current_momentum_parts=args[0], current_state_parts=args[1], current_target_log_prob=args[2], current_target_log_prob_grad_parts=args[3]) # Do leapfrog integration. [ [next_momentum], [next_position], next_target_log_prob, next_target_log_prob_grad_parts, ] = tf.while_loop( cond=lambda *args: True, body=_leapfrog_one_step, loop_vars=[ [momentum], [position], target_log_prob_fn(position), tf.gradients(target_log_prob_fn(position), position), ], maximum_iterations=3) momentum_ = np.random.randn(dims).astype(dtype) position_ = np.random.randn(dims).astype(dtype) positions = np.zeros([num_iter, dims], dtype) with tf.Session() as sess: for i in xrange(num_iter): position_, momentum_ = sess.run( [next_momentum, next_position], feed_dict={position: position_, momentum: momentum_}) positions[i] = position_ plt.plot(positions[:, 0]); # Sinusoidal. ``` Args: target_log_prob_fn: Python callable which takes an argument like `*current_state_parts` and returns its (possibly unnormalized) log-density under the target distribution. independent_chain_ndims: Scalar `int` `Tensor` representing the number of leftmost `Tensor` dimensions which index independent chains. step_sizes: Python `list` of `Tensor`s representing the step size for the leapfrog integrator. Must broadcast with the shape of `current_state_parts`. Larger step sizes lead to faster progress, but too-large step sizes make rejection exponentially more likely. When possible, it's often helpful to match per-variable step sizes to the standard deviations of the target distribution in each variable. current_momentum_parts: Tensor containing the value(s) of the momentum variable(s) to update. current_state_parts: Python `list` of `Tensor`s representing the current state(s) of the Markov chain(s). The first `independent_chain_ndims` of the `Tensor`(s) index different chains. current_target_log_prob: `Tensor` representing the value of `target_log_prob_fn(*current_state_parts)`. The only reason to specify this argument is to reduce TF graph size. current_target_log_prob_grad_parts: Python list of `Tensor`s representing gradient of `target_log_prob_fn(*current_state_parts`) wrt `current_state_parts`. Must have same shape as `current_state_parts`. The only reason to specify this argument is to reduce TF graph size. state_gradients_are_stopped: Python `bool` indicating that the proposed new state be run through `tf.stop_gradient`. This is particularly useful when combining optimization over samples from the HMC chain. Default value: `False` (i.e., do not apply `stop_gradient`). name: Python `str` name prefixed to Ops created by this function. Default value: `None` (i.e., 'hmc_leapfrog_integrator'). Returns: proposed_momentum_parts: Updated value of the momentum. proposed_state_parts: Tensor or Python list of `Tensor`s representing the state(s) of the Markov chain(s) at each result step. Has same shape as input `current_state_parts`. proposed_target_log_prob: `Tensor` representing the value of `target_log_prob_fn` at `next_state`. proposed_target_log_prob_grad_parts: Gradient of `proposed_target_log_prob` wrt `next_state`. Raises: ValueError: if `len(momentum_parts) != len(state_parts)`. ValueError: if `len(state_parts) != len(step_sizes)`. ValueError: if `len(state_parts) != len(grads_target_log_prob)`. TypeError: if `not target_log_prob.dtype.is_floating`.
383,729
def find_mutant_amino_acid_interval( cdna_sequence, cdna_first_codon_offset, cdna_variant_start_offset, cdna_variant_end_offset, n_ref, n_amino_acids): cdna_alt_nucleotides = cdna_sequence[ cdna_variant_start_offset:cdna_variant_end_offset] n_alt = len(cdna_alt_nucleotides) cdna_coding_prefix = cdna_sequence[cdna_first_codon_offset:cdna_variant_start_offset] n_coding_nucleotides_before_variant = len(cdna_coding_prefix) n_complete_prefix_codons = n_coding_nucleotides_before_variant // 3 frame_of_variant_nucleotides = n_coding_nucleotides_before_variant % 3 frameshift = abs(n_ref - n_alt) % 3 != 0 indel = n_ref != n_alt variant_aa_interval_start = n_complete_prefix_codons if frameshift: variant_aa_interval_end = n_amino_acids else: n_alt_codons = int(math.ceil(n_alt / 3.0)) if indel: extra_affected_codon = int(frame_of_variant_nucleotides != 0) variant_aa_interval_end = ( variant_aa_interval_start + n_alt_codons + extra_affected_codon) else: variant_aa_interval_end = variant_aa_interval_start + n_alt_codons return variant_aa_interval_start, variant_aa_interval_end, frameshift
Parameters ---------- cdna_sequence : skbio.DNA or str cDNA sequence found in RNAseq data cdna_first_codon_offset : int Offset into cDNA sequence to first complete codon, lets us skip past UTR region and incomplete codons. cdna_variant_start_offset : int Interbase start offset into cDNA sequence for selecting mutant nucleotides. cdna_variant_end_offset : int Interbase end offset into cDNA sequence for selecting mutant nucleotides. n_ref : int Number of reference nucleotides n_amino_acids : int Number of translated amino acids Returns tuple with three fields: 1) Start offset for interval of mutant amino acids in translated sequence 2) End offset for interval of mutant amino acids in translated sequence 3) Boolean flag indicating whether the variant was a frameshift.
383,730
def on_message(self, websocket, msg): if msg: lines = [] for li in msg.split(): li = li.strip() if li: lines.append(li) msg = .join(lines) if msg: return self.pubsub.publish(self.channel, msg)
When a new message arrives, it publishes to all listening clients.
383,731
def random_string(length, charset): n = len(charset) return .join(charset[random.randrange(n)] for _ in range(length))
Return a random string of the given length from the given character set. :param int length: The length of string to return :param str charset: A string of characters to choose from :returns: A random string :rtype: str
383,732
def set_post_data(self): self.form.data = self.post_data_dict for field_key, field in self.form.fields.items(): if has_digit(field_key): base_key = make_key(field_key, exclude_last_string=True) for key in self.post_data_dict.keys(): if base_key in key: self.form.fields.update({key: field})
Need to set form data so that validation on all post data occurs and places newly entered form data on the form object.
383,733
def create(**kwargs): secType = kwargs.get(, ) cls = { : Contract, : Stock, : Option, : Future, : ContFuture, : Forex, : Index, : CFD, : Bond, : Commodity, : FuturesOption, : MutualFund, : Warrant, : Warrant, : Bag, : Contract }.get(secType, Contract) if cls is not Contract: kwargs.pop(, ) return cls(**kwargs)
Create and a return a specialized contract based on the given secType, or a general Contract if secType is not given.
383,734
def Flush(self): if self.locked and self.CheckLease() == 0: self._RaiseLockError("Flush") self._WriteAttributes() self._SyncAttributes() if self.parent: self.parent.Flush()
Syncs this object with the data store, maintaining object validity.
383,735
def onDragSelection(self, event): if self.grid.GetSelectionBlockTopLeft(): bottom_right = eval(repr(self.grid.GetSelectionBlockBottomRight()).replace("GridCellCoordsArray: ", "").replace("GridCellCoords", "")) top_left = eval(repr(self.grid.GetSelectionBlockTopLeft()).replace("GridCellCoordsArray: ", "").replace("GridCellCoords", "")) top_left = top_left[0] bottom_right = bottom_right[0] else: return min_col = top_left[1] max_col = bottom_right[1] min_row = top_left[0] max_row = bottom_right[0] self.df_slice = self.contribution.tables[self.grid_type].df.iloc[min_row:max_row+1, min_col:max_col+1]
Set self.df_slice based on user's selection
383,736
def salt_master(project, target, module, args=None, kwargs=None): client = project.cluster.head.ssh_client cmd = [] cmd.extend(generate_salt_cmd(target, module, args, kwargs)) cmd.append() cmd.append() cmd = .join(cmd) output = client.exec_command(cmd, sudo=True) if output[] == 0: return output[] else: return output[]
Execute a `salt` command in the head node
383,737
def create_as_library(cls, url): site = { "crawler": "Download", "url": url } cfg_file_path = os.path.dirname(__file__) + os.path.sep + + os.path.sep + return cls(cfg_file_path, site, 0, False, False, True)
Creates a single crawler as in library mode. Crawling will start immediately. :param url: :return:
383,738
def delete_page_property(self, page_id, page_property): url = .format(page_id=page_id, page_property=str(page_property)) return self.delete(path=url)
Delete the page (content) property e.g. delete key of hash :param page_id: content_id format :param page_property: key of property :return:
383,739
def calculate_perf_100nsec_timer(previous, current, property_name): n0 = previous[property_name] n1 = current[property_name] d0 = previous["Timestamp_Sys100NS"] d1 = current["Timestamp_Sys100NS"] if n0 is None or n1 is None: return return (n1 - n0) / (d1 - d0) * 100
PERF_100NSEC_TIMER https://technet.microsoft.com/en-us/library/cc728274(v=ws.10).aspx
383,740
def unmarshal(self, v): try: return self.choices[v] except KeyError: self.log.warning("No such choice {0} for field {1}.".format(v, self)) return v
Convert the value from Strava API format to useful python representation. If the value does not appear in the choices attribute we log an error rather than raising an exception as this may be caused by a change to the API upstream so we want to fail gracefully.
383,741
def unique_identifier(self): for t in IDENTIFIER_PRIORITY: found = self._tree.getroot().find( % t, NS) if found is not None: return found.text
Get the unique identifier by looking through ``mods:identifier`` See `specs <https://ocr-d.github.io/mets#unique-id-for-the-document-processed>`_ for details.
383,742
def create_container_definition(container_name, image, port=80, cpu=1.0, memgb=1.5, environment=None): nameenvnamevalueenvvalue container = {: container_name} container_properties = {: image} container_properties[] = [{: port}] container_properties[] = { : {: cpu, : memgb}} container[] = container_properties if environment is not None: container_properties[] = environment return container
Makes a python dictionary of container properties. Args: container_name: The name of the container. image (str): Container image string. E.g. nginx. port (int): TCP port number. E.g. 8080. cpu (float): Amount of CPU to allocate to container. E.g. 1.0. memgb (float): Memory in GB to allocate to container. E.g. 1.5. environment (list): A list of [{'name':'envname', 'value':'envvalue'}]. Sets environment variables in the container. Returns: A Python dictionary of container properties, pass a list of these to create_container_group().
383,743
def main(): col1,col2=0,1 sym,size = ,20 xlab,ylab=, lines=0 if in sys.argv: print(main.__doc__) sys.exit() if in sys.argv: ind=sys.argv.index() file=sys.argv[ind+1] else: print(main.__doc__) sys.exit() if in sys.argv: ind=sys.argv.index() col1=sys.argv[ind+1] col2=sys.argv[ind+2] else: print(main.__doc__) sys.exit() if in sys.argv: ind=sys.argv.index() xlab=sys.argv[ind+1] if in sys.argv: ind=sys.argv.index() ylab=sys.argv[ind+1] if in sys.argv: ind=sys.argv.index() xmin=float(sys.argv[ind+1]) xmax=float(sys.argv[ind+2]) ymin=float(sys.argv[ind+3]) ymax=float(sys.argv[ind+4]) if in sys.argv: ind=sys.argv.index() sym=sys.argv[ind+1] size=int(sys.argv[ind+2]) if in sys.argv: lines=1 if in sys.argv: sym= X,Y=[],[] data,file_type=pmag.magic_read(file) print(file_type) for rec in data: if col1 not in list(rec.keys()) or col2 not in list(rec.keys()): print(col1,,col2, ) print() sys.exit() if rec[col1]!= and rec[col2]!=: skip=0 if in sys.argv: for crit in bounds: crits=crit.split() crit_key=crits[0] crit_min=crits[1] crit_max=crits[2] if rec[crit_key]=="": skip=1 else: if crit_min!="" and float(rec[crit_key])<float(crit_min):skip=1 if crit_max!="" and float(rec[crit_key])>float(crit_min):skip=1 if skip==0: X.append(float(rec[col1])) Y.append(float(rec[col2])) if len(X)==0: print(col1,,col2, ) print() sys.exit() else: print(len(X),) if sym!=:pylab.scatter(X,Y,c=sym[0],marker=sym[1],s=size) if xlab!=:pylab.xlabel(xlab) if ylab!=:pylab.ylabel(ylab) if lines==1:pylab.plot(X,Y,) if in sys.argv:pylab.axis([xmin,xmax,ymin,ymax]) pylab.draw() ans=input("Press return to quit ") sys.exit()
NAME plotxy_magic.py DESCRIPTION Makes simple X,Y plots INPUT FORMAT Any MagIC formatted file SYNTAX plotxy_magic.py [command line options] OPTIONS -h prints this help message -f FILE to set file name on command rec -c col1 col2 specify columns names to plot -sym SYM SIZE specify symbol and size to plot: default is red dots -S don't plot symbols -xlab XLAB -ylab YLAB -l connect symbols with lines -b xmin xmax ymin ymax, sets bounds # -b [key:max:min,key:max:min,etc.] leave or min blank for no cutoff
383,744
def store(self): if msgpack is None: log.error() else: try: with salt.utils.files.fopen(self._path, ) as fp_: cache = { "CacheDisk_data": self._dict, "CacheDisk_cachetime": self._key_cache_time } msgpack.dump(cache, fp_, use_bin_type=True) except (IOError, OSError) as err: log.error(, err)
Write content of the entire cache to disk
383,745
def copy(self): tokens = ([t for t in self.tokens] if isinstance(self.tokens, list) else self.tokens) return Identifier(tokens, 0)
Return copy of self Returns: Identifier object
383,746
def add_group_members(self, members): if not isinstance(members, list): members = [members] if not getattr(self, , None): self.group_members = members else: self.group_members.extend(members)
Add a new group member to the groups list :param members: member name :type members: str :return: None
383,747
def _bubbleP(cls, T): c = cls._blend["bubble"] Tj = cls._blend["Tj"] Pj = cls._blend["Pj"] Tita = 1-T/Tj suma = 0 for i, n in zip(c["i"], c["n"]): suma += n*Tita**(i/2.) P = Pj*exp(Tj/T*suma) return P
Using ancillary equation return the pressure of bubble point
383,748
def run(self, agent_host): total_reward = 0 self.prev_s = None self.prev_a = None is_first_action = True world_state = agent_host.getWorldState() while world_state.is_mission_running: current_r = 0 if is_first_action: while True: time.sleep(0.1) world_state = agent_host.getWorldState() for error in world_state.errors: self.logger.error("Error: %s" % error.text) for reward in world_state.rewards: current_r += reward.getValue() if world_state.is_mission_running and len(world_state.observations)>0 and not world_state.observations[-1].text=="{}": total_reward += self.act(world_state, agent_host, current_r) break if not world_state.is_mission_running: break is_first_action = False else: while world_state.is_mission_running and current_r == 0: time.sleep(0.1) world_state = agent_host.getWorldState() for error in world_state.errors: self.logger.error("Error: %s" % error.text) for reward in world_state.rewards: current_r += reward.getValue() while True: time.sleep(0.1) world_state = agent_host.getWorldState() for error in world_state.errors: self.logger.error("Error: %s" % error.text) for reward in world_state.rewards: current_r += reward.getValue() if world_state.is_mission_running and len(world_state.observations)>0 and not world_state.observations[-1].text=="{}": total_reward += self.act(world_state, agent_host, current_r) break if not world_state.is_mission_running: break self.logger.debug("Final reward: %d" % current_r) total_reward += current_r if self.prev_s is not None and self.prev_a is not None: self.updateQTableFromTerminatingState( current_r ) self.drawQ() return total_reward
run the agent on the world
383,749
def mk_function(metamodel, s_sync): action = s_sync.Action_Semantics_internal label = s_sync.Name return lambda **kwargs: interpret.run_function(metamodel, label, action, kwargs)
Create a python function from a BridgePoint function.
383,750
def pad_to_size(text, x, y): input_lines = text.rstrip().split("\n") longest_input_line = max(map(len, input_lines)) number_of_input_lines = len(input_lines) x = max(x, longest_input_line) y = max(y, number_of_input_lines) output = "" padding_top = int((y - number_of_input_lines) / 2) padding_bottom = y - number_of_input_lines - padding_top padding_left = int((x - longest_input_line) / 2) output += padding_top * (" " * x + "\n") for line in input_lines: output += padding_left * " " + line + " " * (x - padding_left - len(line)) + "\n" output += padding_bottom * (" " * x + "\n") return output
Adds whitespace to text to center it within a frame of the given dimensions.
383,751
def get_breadcrumbs(self): if not self.breadcrumbs: return None else: allowed_breadcrumbs = [] for breadcrumb in self.breadcrumbs: if breadcrumb[1] is not None and not view_from_url( breadcrumb[1] ).has_permission(self.request.user): continue obj = self if not hasattr(self, "object") else self.object url = ( None if not breadcrumb[1] else reverse_url(breadcrumb[1], obj) ) allowed_breadcrumbs.append({"name": breadcrumb[0], "url": url}) return allowed_breadcrumbs
Breadcrumb format: (('name', 'url'), ...) or None if not used.
383,752
def update(self): if not self._queue: return dim, widget_type, attr, old, new = self._queue[-1] self._queue = [] dim_label = dim.pprint_label label, widget = self.widgets[dim_label] if widget_type == : if isinstance(label, AutocompleteInput): value = [new] widget.value = value else: widget.value = float(new) elif label: lookups = self.lookups.get(dim_label) if not self.editable: if lookups: new = lookups[widget.value][1] label.text = % dim.pprint_value_string(new) elif isinstance(label, AutocompleteInput): text = lookups[new][1] label.value = text else: label.value = dim.pprint_value(new) key = [] for dim, (label, widget) in self.widgets.items(): lookups = self.lookups.get(dim) if label and lookups: val = lookups[widget.value][0] else: val = widget.value key.append(val) key = wrap_tuple_streams(tuple(key), self.plot.dimensions, self.plot.streams) self.plot.update(key) self._active = False
Handle update events on bokeh server.
383,753
def is_list_like(obj, allow_sets=True): return (isinstance(obj, abc.Iterable) and not isinstance(obj, (str, bytes)) and not (isinstance(obj, np.ndarray) and obj.ndim == 0) and not (allow_sets is False and isinstance(obj, abc.Set)))
Check if the object is list-like. Objects that are considered list-like are for example Python lists, tuples, sets, NumPy arrays, and Pandas Series. Strings and datetime objects, however, are not considered list-like. Parameters ---------- obj : The object to check allow_sets : boolean, default True If this parameter is False, sets will not be considered list-like .. versionadded:: 0.24.0 Returns ------- is_list_like : bool Whether `obj` has list-like properties. Examples -------- >>> is_list_like([1, 2, 3]) True >>> is_list_like({1, 2, 3}) True >>> is_list_like(datetime(2017, 1, 1)) False >>> is_list_like("foo") False >>> is_list_like(1) False >>> is_list_like(np.array([2])) True >>> is_list_like(np.array(2))) False
383,754
def _calculate_Hfr(self, T): if self.isCoal: return self._calculate_Hfr_coal(T) Hfr = 0.0 for compound in self.material.compounds: index = self.material.get_compound_index(compound) dHfr = thermo.H(compound, T, self._compound_mfrs[index]) Hfr = Hfr + dHfr return Hfr
Calculate the enthalpy flow rate of the stream at the specified temperature. :param T: Temperature. [°C] :returns: Enthalpy flow rate. [kWh/h]
383,755
def channels(self): if not self._channels: self._channels = self._call_api()[] return self._channels
List of channels of this slack team
383,756
def _handle_get(self, request_data): der = base64.b64decode(request_data) ocsp_request = self._parse_ocsp_request(der) return self._build_http_response(ocsp_request)
An OCSP GET request contains the DER-in-base64 encoded OCSP request in the HTTP request URL.
383,757
def setLength(self, personID, length): self._connection._sendDoubleCmd( tc.CMD_SET_PERSON_VARIABLE, tc.VAR_LENGTH, personID, length)
setLength(string, double) -> None Sets the length in m for the given person.
383,758
def aggregate(input, **params): PARAM_CFG_EXTRACT = PARAM_CFG_SUBSTITUTE = PARAM_CFG_AGGREGATE = AGGR_FIELD = AGGR_FUNC = extract_params = params.get(PARAM_CFG_EXTRACT) extract_params.update({AccessParams.KEY_TYPE: AccessParams.TYPE_MULTI}) dataset = __extract(input, extract_params) if PARAM_CFG_SUBSTITUTE in params: dataset = __substitute(input, dataset, params.get(PARAM_CFG_SUBSTITUTE)) cfg = params.get(PARAM_CFG_AGGREGATE) res = Aggregator.agg_single_func(dataset, cfg[AGGR_FIELD], cfg[AGGR_FUNC]) return res
Returns aggregate :param input: :param params: :return:
383,759
def metapolicy(self, permitted): if permitted not in VALID_SITE_CONTROL: raise TypeError(SITE_CONTROL_ERROR.format(permitted)) if permitted == SITE_CONTROL_NONE: self.domains = {} self.header_domains = {} self.identities = [] self.site_control = permitted
Sets metapolicy to ``permitted``. (only applicable to master policy files). Acceptable values correspond to those listed in Section 3(b)(i) of the crossdomain.xml specification, and are also available as a set of constants defined in this module. By default, Flash assumes a value of ``master-only`` for all policies except socket policies, (which assume a default of ``all``) so if this is desired (and, for security, it typically is), this method does not need to be called. Note that a metapolicy of ``none`` forbids **all** access, even if one or more domains, headers or identities have previously been specified as allowed. As such, setting the metapolicy to ``none`` will remove all access previously granted by ``allow_domain``, ``allow_headers`` or ``allow_identity``. Additionally, attempting to grant access via ``allow_domain``, ``allow_headers`` or ``allow_identity`` will, when the metapolicy is ``none``, raise ``TypeError``.
383,760
async def connect(self) -> None: def protocol_factory() -> Protocol: return Protocol(client=self) _, protocol = await self.loop.create_connection( protocol_factory, host=self.host, port=self.port, ssl=self.ssl ) if self.protocol: self.protocol.close() self.protocol = protocol protocol.client = self self.trigger("client_connect")
Open a connection to the defined server.
383,761
def bisect(func, a, b, xtol=1e-6, errorcontrol=True, testkwargs=dict(), outside=, ascending=None, disp=False): search = True if ascending is None: if errorcontrol: testkwargs.update(dict(type_=, force=True)) fa = func.test0(a, **testkwargs) fb = func.test0(b, **testkwargs) else: fa = func(a) < 0 fb = func(b) < 0 if fa and not fb: ascending = True elif fb and not fa: ascending = False else: if disp: print() if outside == : raise BisectException() search = False while (b-a > xtol) and search: mid = (a+b)/2.0 if ascending: if ((not errorcontrol) and (func(mid) < 0)) or \ (errorcontrol and func.test0(mid, **testkwargs)): a = mid else: b = mid else: if ((not errorcontrol) and (func(mid) < 0)) or \ (errorcontrol and func.test0(mid, **testkwargs)): b = mid else: a = mid if disp: print(, a, b) if errorcontrol: ya, yb = func(a)[0], func(b)[0] else: ya, yb = func(a), func(b) m = (yb-ya) / (b-a) res = a-ya/m if disp: print(, res) return res
Find root by bysection search. If the function evaluation is noisy then use `errorcontrol=True` for adaptive sampling of the function during the bisection search. Parameters ---------- func: callable Function of which the root should be found. If `errorcontrol=True` then the function should be derived from `AverageBase`. a, b: float initial interval xtol: float target tolerance for interval size errorcontrol: boolean if true, assume that function is derived from `AverageBase`. testkwargs: only for `errorcontrol=True` see `AverageBase.test0` outside: ['extrapolate', 'raise'] How to handle the case where f(a) and f(b) have same sign, i.e. where the root lies outside of the interval. If 'raise' throws a `BisectException`. ascending: allow passing in directly whether function is ascending or not if ascending=True then it is assumed without check that f(a) < 0 and f(b) > 0 if ascending=False then it is assumed without check that f(a) > 0 and f(b) < 0 Returns ------- float, root of function
383,762
def getAllSavedQueries(self, projectarea_id=None, projectarea_name=None, creator=None, saved_query_name=None): pa_id = (self.rtc_obj ._pre_get_resource(projectarea_id=projectarea_id, projectarea_name=projectarea_name)) filter_rule = None if creator is not None: fcreator = self.rtc_obj.getOwnedBy(creator).url filter_rule = [("dc:creator", "@rdf:resource", fcreator)] self.log.debug("Add rules for fetching all saved queries: " "created by %s", creator) if saved_query_name is not None: ftitle_rule = ("dc:title", None, saved_query_name) if filter_rule is None: filter_rule = [ftitle_rule] else: filter_rule.append(ftitle_rule) self.log.debug("Add rules for fetching all saved queries: " "saved query title is %s", saved_query_name) return (self.rtc_obj ._get_paged_resources("SavedQuery", projectarea_id=pa_id, page_size="100", filter_rule=filter_rule))
Get all saved queries created by somebody (optional) in a certain project area (optional, either `projectarea_id` or `projectarea_name` is needed if specified) If `saved_query_name` is specified, only the saved queries match the name will be fetched. Note: only if `creator` is added as a member, the saved queries can be found. Otherwise None will be returned. WARNING: now the RTC server cannot correctly list all the saved queries It seems to be a bug of RTC. Recommend using `runSavedQueryByUrl` to query all the workitems if the query is saved. Note: It will run faster when more attributes are specified. :param projectarea_id: the :class:`rtcclient.project_area.ProjectArea` id :param projectarea_name: the :class:`rtcclient.project_area.ProjectArea` name :param creator: the creator email address :param saved_query_name: the saved query name :return: a :class:`list` that contains the saved queried :class:`rtcclient.models.SavedQuery` objects :rtype: list
383,763
def pre_parse_and_validate_signavio(self, bpmn, filename): self._check_for_disconnected_boundary_events_signavio(bpmn, filename) self._fix_call_activities_signavio(bpmn, filename) return bpmn
This is the Signavio specific editor hook for pre-parsing and validation. A subclass can override this method to provide additional parseing or validation. It should call the parent method first. :param bpmn: an lxml tree of the bpmn content :param filename: the source file name This must return the updated bpmn object (or a replacement)
383,764
def previous_row(self): row = self.currentIndex().row() rows = self.source_model.rowCount() if row == 0: row = rows self.selectRow(row - 1)
Move to previous row from currently selected row.
383,765
def refresh_modules(self, module_string=None, exact=True): if not module_string: if time.time() > (self.last_refresh_ts + 0.1): self.last_refresh_ts = time.time() else: return update_i3status = False for name, module in self.output_modules.items(): if ( module_string is None or (exact and name == module_string) or (not exact and name.startswith(module_string)) ): if module["type"] == "py3status": if self.config["debug"]: self.log("refresh py3status module {}".format(name)) module["module"].force_update() else: if self.config["debug"]: self.log("refresh i3status module {}".format(name)) update_i3status = True if update_i3status: self.i3status_thread.refresh_i3status()
Update modules. if module_string is None all modules are refreshed if module_string then modules with the exact name or those starting with the given string depending on exact parameter will be refreshed. If a module is an i3status one then we refresh i3status. To prevent abuse, we rate limit this function to 100ms for full refreshes.
383,766
def get_register_func(base_class, nickname): if base_class not in _REGISTRY: _REGISTRY[base_class] = {} registry = _REGISTRY[base_class] def register(klass, name=None): assert issubclass(klass, base_class), \ "Can only register subclass of %s"%base_class.__name__ if name is None: name = klass.__name__ name = name.lower() if name in registry: warnings.warn( "\033[91mNew %s %s.%s registered with name %s is" "overriding existing %s %s.%s\033[0m"%( nickname, klass.__module__, klass.__name__, name, nickname, registry[name].__module__, registry[name].__name__), UserWarning, stacklevel=2) registry[name] = klass return klass register.__doc__ = "Register %s to the %s factory"%(nickname, nickname) return register
Get registrator function. Parameters ---------- base_class : type base class for classes that will be reigstered nickname : str nickname of base_class for logging Returns ------- a registrator function
383,767
def get_default_config(self): config = super(NetworkCollector, self).get_default_config() config.update({ : , : [, , , , , , , ], : [, ], : , }) return config
Returns the default collector settings
383,768
def add_exception_handler(self, exception_handler): if exception_handler is None: raise RuntimeConfigException( "Valid Exception Handler instance to be provided") if not isinstance(exception_handler, AbstractExceptionHandler): raise RuntimeConfigException( "Input should be an ExceptionHandler instance") self.exception_handlers.append(exception_handler)
Register input to the exception handlers list. :param exception_handler: Exception Handler instance to be registered. :type exception_handler: AbstractExceptionHandler :return: None
383,769
def _get_asset_urls(self, asset_id): dom = get_page(self._session, OPENCOURSE_ASSETS_URL, json=True, id=asset_id) logging.debug(, asset_id) urls = [] for element in dom[]: typeName = element[] definition = element[] if typeName == : open_course_asset_id = definition[] for asset in self._asset_retriever([open_course_asset_id], download=False): urls.append({: asset.name, : asset.url}) elif typeName == : urls.append({: definition[].strip(), : definition[].strip()}) else: logging.warning( , typeName, json.dumps(dom, indent=4)) return urls
Get list of asset urls and file names. This method may internally use AssetRetriever to extract `asset` element types. @param asset_id: Asset ID. @type asset_id: str @return List of dictionaries with asset file names and urls. @rtype [{ 'name': '<filename.ext>' 'url': '<url>' }]
383,770
async def create_local_did(self, seed: str = None, loc_did: str = None, metadata: dict = None) -> DIDInfo: LOGGER.debug(, loc_did, metadata) cfg = {} if seed: cfg[] = seed if loc_did: cfg[] = loc_did if not self.handle: LOGGER.debug(, self.name) raise WalletState(.format(self.name)) try: (created_did, verkey) = await did.create_and_store_my_did(self.handle, json.dumps(cfg)) except IndyError as x_indy: if x_indy.error_code == ErrorCode.DidAlreadyExistsError: LOGGER.debug(, loc_did, self.name) raise ExtantRecord(.format(loc_did, self.name)) LOGGER.debug(, x_indy.error_code) raise now = int(time()) loc_did_metadata = {**(metadata or {}), : now, : now} await did.set_did_metadata(self.handle, created_did, json.dumps(loc_did_metadata)) rv = DIDInfo(created_did, verkey, loc_did_metadata) LOGGER.debug(, rv) return rv
Create and store a new local DID for use in pairwise DID relations. :param seed: seed from which to create (default random) :param loc_did: local DID value (default None to let indy-sdk generate) :param metadata: metadata to associate with the local DID (operation always sets 'since', 'modified' epoch timestamps) :return: DIDInfo for new local DID
383,771
def service_group(self, service_name): for group in EFConfig.SERVICE_GROUPS: if self.services(group).has_key(service_name): return group return None
Args: service_name: the name of the service in the service registry Returns: the name of the group the service is in, or None of the service was not found
383,772
def summarize(self): data = [ [, self.seqrecord.id], [, .join(self.gdomain_regions) if self.gdomain_regions else None], [, self.evalue_bh_rabs], [, self.evalue_bh_non_rabs], [, .join(map(str, self.rabf_motifs)) if self.rabf_motifs else None], [, self.is_rab()] ] summary = for name, value in data: summary += .format(name, value) if self.is_rab(): summary += .format(, .join(.format(name, score) for name, score in self.rab_subfamily_top5)) return summary
G protein annotation summary in a text format :return: A string summary of the annotation :rtype: str
383,773
def decode(self, encoded): if self.enforce_reversible: self.enforce_reversible = False if self.encode(self.decode(encoded)) != encoded: raise ValueError( % encoded) self.enforce_reversible = True return encoded
Decodes an object. Args: object_ (object): Encoded object. Returns: object: Object decoded.
383,774
def fetch_pillar(self): log.debug(, self.ext) fresh_pillar = Pillar(self.opts, self.grains, self.minion_id, self.saltenv, ext=self.ext, functions=self.functions, pillarenv=self.pillarenv) return fresh_pillar.compile_pillar()
In the event of a cache miss, we need to incur the overhead of caching a new pillar.
383,775
def stop_change(self): self.logger.info("Dimmer %s stop_change", self.device_id) self.hub.direct_command(self.device_id, , ) success = self.hub.check_success(self.device_id, , ) if success: self.logger.info("Dimmer %s stop_change: Light stopped changing successfully", self.device_id) self.hub.clear_device_command_cache(self.device_id) else: self.logger.error("Dimmer %s stop_change: Light did not stop", self.device_id) return success
Stop changing light level manually
383,776
def load(stream, Loader=None): if Loader is None: load_warning() Loader = FullLoader loader = Loader(stream) try: return loader.get_single_data() finally: loader.dispose()
Parse the first YAML document in a stream and produce the corresponding Python object.
383,777
def extract_tar (archive, compression, cmd, verbosity, interactive, outdir): try: with tarfile.open(archive) as tfile: tfile.extractall(path=outdir) except Exception as err: msg = "error extracting %s: %s" % (archive, err) raise util.PatoolError(msg) return None
Extract a TAR archive with the tarfile Python module.
383,778
def migrate_file(src_id, location_name, post_fixity_check=False): location = Location.get_by_name(location_name) f_src = FileInstance.get(src_id) f_dst = FileInstance.create() db.session.commit() try: f_dst.copy_contents( f_src, progress_callback=progress_updater, default_location=location.uri, ) db.session.commit() except Exception: db.session.delete(f_dst) db.session.commit() raise ObjectVersion.relink_all(f_src, f_dst) db.session.commit() if post_fixity_check: verify_checksum.delay(str(f_dst.id))
Task to migrate a file instance to a new location. .. note:: If something goes wrong during the content copy, the destination file instance is removed. :param src_id: The :class:`invenio_files_rest.models.FileInstance` ID. :param location_name: Where to migrate the file. :param post_fixity_check: Verify checksum after migration. (Default: ``False``)
383,779
def cli(ctx, ftdi_enable, ftdi_disable, serial_enable, serial_disable): exit_code = 0 if ftdi_enable: exit_code = Drivers().ftdi_enable() elif ftdi_disable: exit_code = Drivers().ftdi_disable() elif serial_enable: exit_code = Drivers().serial_enable() elif serial_disable: exit_code = Drivers().serial_disable() else: click.secho(ctx.get_help()) ctx.exit(exit_code)
Manage FPGA boards drivers.
383,780
def _got_srv(self, addrs): with self.lock: if not addrs: self._dst_service = None if self._dst_port: self._dst_nameports = [(self._dst_name, self._dst_port)] else: self._dst_nameports = [] self._set_state("aborted") raise DNSError("Could not resolve SRV for service {0!r}" " on host {1!r} and fallback port number not given" .format(self._dst_service, self._dst_name)) elif addrs == [(".", 0)]: self._dst_nameports = [] self._set_state("aborted") raise DNSError("Service {0!r} not available on host {1!r}" .format(self._dst_service, self._dst_name)) else: self._dst_nameports = addrs self._set_state("resolve-hostname")
Handle SRV lookup result. :Parameters: - `addrs`: properly sorted list of (hostname, port) tuples
383,781
def min_ems(self, value: float) -> : raise_not_number(value) self.minimum = .format(value) return self
Set the minimum size in ems.
383,782
def recode(self, table: pd.DataFrame, validate=False) -> pd.DataFrame: series = table[self.name] self._check_series_name(series) col = self.name data = series.copy() for recoder in self.recoders.values(): try: data = recoder(data) except (BaseException) as err: raise RecodingError(col, recoder, err) if validate: failed_rows = find_failed_rows(self.validate(data.to_frame())) if failed_rows.shape[0] > 0: raise ValidationError(f"Rows that failed to validate for column :\n{failed_rows}") return data.to_frame()
Pass the provided series obj through each recoder function sequentially and return the final result. Args: table (pd.DataFrame): A dataframe on which to apply recoding logic. validate (bool): If ``True``, recoded table must pass validation tests.
383,783
def _compose_mro(cls, types): bases = set(cls.__mro__) def is_related(_type): return ( _type not in bases and hasattr(_type, ) and issubclass(cls, _type) ) types = [n for n in types if is_related(n)] def is_strict_base(_typ): for other in types: if _typ != other and _typ in other.__mro__: return True return False types = [n for n in types if not is_strict_base(n)] type_set = set(types) mro = [] for typ in types: found = [] for sub in typ.__subclasses__(): if sub not in bases and issubclass(cls, sub): found.append([s for s in sub.__mro__ if s in type_set]) if not found: mro.append(typ) continue found.sort(key=len, reverse=True) for sub in found: for subcls in sub: if subcls not in mro: mro.append(subcls) return _c3_mro(cls, abcs=mro)
Calculates the method resolution order for a given class *cls*. Includes relevant abstract base classes (with their respective bases) from the *types* iterable. Uses a modified C3 linearization algorithm.
383,784
def burn(self): if not self.data: raise ValueError("No data available") if hasattr(self, ): self.calculations() self.start_svg() self.calculate_graph_dimensions() self.foreground = etree.Element("g") self.draw_graph() self.draw_titles() self.draw_legend() self.draw_data() self.graph.append(self.foreground) self.render_inline_styles() return self.render(self.root)
Process the template with the data and config which has been set and return the resulting SVG. Raises ValueError when no data set has been added to the graph object.
383,785
def VerifyStructure(self, parser_mediator, line): try: structure = self._DPKG_LOG_LINE.parseString(line) except pyparsing.ParseException as exception: logger.debug( .format( exception)) return False return in structure and in structure
Verifies if a line from a text file is in the expected format. Args: parser_mediator (ParserMediator): parser mediator. line (str): line from a text file. Returns: bool: True if the line is in the expected format, False if not.
383,786
def get_relationships_by_query(self, relationship_query): and_list = list() or_list = list() for term in relationship_query._query_terms: if in relationship_query._query_terms[term] and in relationship_query._query_terms[term]: and_list.append( {: [{term: {: relationship_query._query_terms[term][]}}, {term: {: relationship_query._query_terms[term][]}}]}) else: and_list.append({term: relationship_query._query_terms[term]}) for term in relationship_query._keyword_terms: or_list.append({term: relationship_query._keyword_terms[term]}) if or_list: and_list.append({: or_list}) view_filter = self._view_filter() if view_filter: and_list.append(view_filter) if and_list: query_terms = {: and_list} collection = JSONClientValidated(, collection=, runtime=self._runtime) result = collection.find(query_terms).sort(, DESCENDING) else: result = [] return objects.RelationshipList(result, runtime=self._runtime, proxy=self._proxy)
Gets a list of ``Relationships`` matching the given relationship query. arg: relationship_query (osid.relationship.RelationshipQuery): the relationship query return: (osid.relationship.RelationshipList) - the returned ``RelationshipList`` raise: NullArgument - ``relationship_query`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``relationship_query`` is not of this service *compliance: mandatory -- This method must be implemented.*
383,787
def fit(self, matrix, epochs=5, no_threads=2, verbose=False): shape = matrix.shape if (len(shape) != 2 or shape[0] != shape[1]): raise Exception() if not sp.isspmatrix_coo(matrix): raise Exception() random_state = check_random_state(self.random_state) self.word_vectors = ((random_state.rand(shape[0], self.no_components) - 0.5) / self.no_components) self.word_biases = np.zeros(shape[0], dtype=np.float64) self.vectors_sum_gradients = np.ones_like(self.word_vectors) self.biases_sum_gradients = np.ones_like(self.word_biases) shuffle_indices = np.arange(matrix.nnz, dtype=np.int32) if verbose: print( % (epochs, no_threads)) for epoch in range(epochs): if verbose: print( % epoch) random_state.shuffle(shuffle_indices) fit_vectors(self.word_vectors, self.vectors_sum_gradients, self.word_biases, self.biases_sum_gradients, matrix.row, matrix.col, matrix.data, shuffle_indices, self.learning_rate, self.max_count, self.alpha, self.max_loss, int(no_threads)) if not np.isfinite(self.word_vectors).all(): raise Exception( )
Estimate the word embeddings. Parameters: - scipy.sparse.coo_matrix matrix: coocurrence matrix - int epochs: number of training epochs - int no_threads: number of training threads - bool verbose: print progress messages if True
383,788
def get_messages(session, query, limit=10, offset=0): query[] = limit query[] = offset response = make_get_request(session, , params_data=query) json_data = response.json() if response.status_code == 200: return json_data[] else: raise MessagesNotFoundException( message=json_data[], error_code=json_data[], request_id=json_data[] )
Get one or more messages
383,789
def __reset_crosshair(self): self.lhor.set_ydata(self.y_coord) self.lver.set_xdata(self.x_coord)
redraw the cross-hair on the horizontal slice plot Parameters ---------- x: int the x image coordinate y: int the y image coordinate Returns -------
383,790
def remove(self, value): ret = libxml2mod.xmlACatalogRemove(self._o, value) return ret
Remove an entry from the catalog
383,791
def get_dimension_type(self, dim): dim = self.get_dimension(dim) if dim is None: return None elif dim.type is not None: return dim.type elif dim in self.vdims: return np.float64 return self.interface.dimension_type(self, dim)
Get the type of the requested dimension. Type is determined by Dimension.type attribute or common type of the dimension values, otherwise None. Args: dimension: Dimension to look up by name or by index Returns: Declared type of values along the dimension
383,792
def _validate_param(param): detail = None if param.oper not in goldman.config.QUERY_FILTERS: detail = \ \ .format(param.oper, param) elif param.oper in goldman.config.GEO_FILTERS: try: if not isinstance(param.val, list) or len(param.val) <= 2: raise ValueError else: param.val = [float(i) for i in param.val] except ValueError: detail = \ \ .format(param) elif param.oper in goldman.config.ENUM_FILTERS: if not isinstance(param.val, list): param.val = [param.val] param.val = tuple(param.val) elif isinstance(param.val, list): detail = \ \ .format(param) elif param.oper in goldman.config.BOOL_FILTERS: try: param.val = str_to_bool(param.val) except ValueError: detail = \ \ .format(param) elif param.oper in goldman.config.DATE_FILTERS: try: param.val = str_to_dt(param.val) except ValueError: detail = \ \ .format(param) elif param.oper in goldman.config.NUM_FILTERS: try: param.val = int(param.val) except ValueError: detail = \ \ .format(param) if detail: raise InvalidQueryParams(**{ : detail, : LINK, : PARAM, })
Ensure the filter cast properly according to the operator
383,793
def get_power_status() -> SystemPowerStatus: get_system_power_status = ctypes.windll.kernel32.GetSystemPowerStatus get_system_power_status.argtypes = [ctypes.POINTER(SystemPowerStatus)] get_system_power_status.restype = wintypes.BOOL status = SystemPowerStatus() if not get_system_power_status(ctypes.pointer(status)): raise ctypes.WinError() else: return status
Retrieves the power status of the system. The status indicates whether the system is running on AC or DC power, whether the battery is currently charging, how much battery life remains, and if battery saver is on or off. :raises OSError: if the call to GetSystemPowerStatus fails :return: the power status :rtype: SystemPowerStatus
383,794
def poll(self): if not self.pod_reflector.first_load_future.done(): yield self.pod_reflector.first_load_future data = self.pod_reflector.pods.get(self.pod_name, None) if data is not None: if data.status.phase == : return None ctr_stat = data.status.container_statuses if ctr_stat is None: return 1 for c in ctr_stat: if c.name == : if c.state.terminated: if self.delete_stopped_pods: yield self.stop(now=True) return c.state.terminated.exit_code break return None return 1
Check if the pod is still running. Uses the same interface as subprocess.Popen.poll(): if the pod is still running, returns None. If the pod has exited, return the exit code if we can determine it, or 1 if it has exited but we don't know how. These are the return values JupyterHub expects. Note that a clean exit will have an exit code of zero, so it is necessary to check that the returned value is None, rather than just Falsy, to determine that the pod is still running.
383,795
def _finish_progress(self): if self._show_progressbar: if self._progressbar is None: self._initialize_progressbar() if self._progressbar is not None: self._progressbar.finish() if self._progress_callback is not None: self._progress_callback(100.0)
Mark the progressbar as finished. :return: None
383,796
def validate(self): required = [, ] valid_data = { : ([, ], , ), : ([, , , ], , ), : ([], , ), : ([, , ], , ), : ([, , ], , ), : ([, , ], , ), : ([], , ), : ([], , ), : ([], , ), : ([], , ), : ([], , ), : ([], , ) } extra, req_content, err_msg = valid_data[self.type] required.extend(extra) required = set(required) pl_keys = set(self.payload.keys()) if not set(required) <= pl_keys: not_pre = required - pl_keys raise RCAPIError("Required keys: %s" % .join(not_pre)) try: if self.payload[] != req_content: raise RCAPIError(err_msg) except KeyError: raise RCAPIError()
Checks that at least required params exist
383,797
def encrypt(s, base64 = False): e = _cipher().encrypt(s) return base64 and b64encode(e) or e
对称加密函数
383,798
def get_parser(parser=None): from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter if parser is None: parser = ArgumentParser(description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter) subparsers = parser.add_subparsers() pkg_init_parser = subparsers.add_parser() pkg_init_parser.add_argument("root", nargs=, help="project root - should be empty") pkg_init_parser.set_defaults(func=run_init) return parser
Get parser for mpu.
383,799
def setRepoData(self, searchString, category="", extension="", math=False, game=False, searchFiles=False): self.searchString = searchString self.category = category self.math = math self.game = game self.searchFiles = searchFiles self.extension = extension
Call this function with all the settings to use for future operations on a repository, must be called FIRST