Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
9,000
def _list(self, request, start_response): configs = [] generator = directory_list_generator.DirectoryListGenerator(request) for config in self._config_manager.configs.itervalues(): if config != self.API_CONFIG: configs.append(config) directory = generator.pretty_print_config_to_json(configs) if not directory: _logger.error() return util.send_wsgi_not_found_response(start_response) return self._send_success_response(directory, start_response)
Sends HTTP response containing the API directory. This calls start_response and returns the response body. Args: request: An ApiRequest, the transformed request sent to the Discovery API. start_response: A function with semantics defined in PEP-333. Returns: A string containing the response body.
9,001
def undoable(generator): descriptive text def inner(*args, **kwargs): action = _Action(generator, args, kwargs) ret = action.do() stack().append(action) if isinstance(ret, tuple): if len(ret) == 1: return ret[0] elif len(ret) == 0: return None return ret return inner
Decorator which creates a new undoable action type. This decorator should be used on a generator of the following format:: @undoable def operation(*args): do_operation_code yield 'descriptive text' undo_operator_code
9,002
def from_arg_kinds(cls, arch, fp_args, ret_fp=False, sizes=None, sp_delta=None, func_ty=None): basic = cls(arch, sp_delta=sp_delta, func_ty=func_ty) basic.args = basic.arg_locs(fp_args, sizes) basic.ret_val = basic.fp_return_val if ret_fp else basic.return_val return basic
Get an instance of the class that will extract floating-point/integral args correctly. :param arch: The Archinfo arch for this CC :param fp_args: A list, with one entry for each argument the function can take. True if the argument is fp, false if it is integral. :param ret_fp: True if the return value for the function is fp. :param sizes: Optional: A list, with one entry for each argument the function can take. Each entry is the size of the corresponding argument in bytes. :param sp_delta: The amount the stack pointer changes over the course of this function - CURRENTLY UNUSED :parmm func_ty: A SimType for the function itself
9,003
def output_default(paragraphs, fp=sys.stdout, no_boilerplate=True): for paragraph in paragraphs: if paragraph.class_type == : if paragraph.heading: tag = else: tag = elif no_boilerplate: continue else: tag = print( % (tag, cgi.escape(paragraph.text)), file=fp)
Outputs the paragraphs as: <tag> text of the first paragraph <tag> text of the second paragraph ... where <tag> is <p>, <h> or <b> which indicates standard paragraph, heading or boilerplate respecitvely.
9,004
def do_command(self): method = self.args[0] raw_args = self.args[1:] if in method: if raw_args: self.parser.error("Please don=,') self.execute(self.open(), method, self.cooked(raw_args))
Call a single command with arguments.
9,005
def hpre(*content, sep=): return _md(quote_html(_join(*content, sep=sep)), symbols=MD_SYMBOLS[7])
Make mono-width text block (HTML) :param content: :param sep: :return:
9,006
def format_docstring(elt, arg_comments:dict={}, alt_doc_string:str=, ignore_warn:bool=False)->str: "Merge and format the docstring definition with `arg_comments` and `alt_doc_string`." parsed = "" doc = parse_docstring(inspect.getdoc(elt)) description = alt_doc_string or f"{doc[]} {doc[]}" if description: parsed += f resolved_comments = {**doc.get(, {}), **arg_comments} args = inspect.getfullargspec(elt).args if not is_enum(elt.__class__) else elt.__members__.keys() if resolved_comments: parsed += for a in resolved_comments: parsed += f if a not in args and not ignore_warn: warn(f) return_comment = arg_comments.get() or doc.get() if return_comment: parsed += f return parsed
Merge and format the docstring definition with `arg_comments` and `alt_doc_string`.
9,007
def ReadClientLastPings(self, min_last_ping=None, max_last_ping=None, fleetspeak_enabled=None, cursor=None): query = "SELECT client_id, UNIX_TIMESTAMP(last_ping) FROM clients " query_values = [] where_filters = [] if min_last_ping is not None: where_filters.append("last_ping >= FROM_UNIXTIME(%s) ") query_values.append(mysql_utils.RDFDatetimeToTimestamp(min_last_ping)) if max_last_ping is not None: where_filters.append( "(last_ping IS NULL OR last_ping <= FROM_UNIXTIME(%s))") query_values.append(mysql_utils.RDFDatetimeToTimestamp(max_last_ping)) if fleetspeak_enabled is not None: if fleetspeak_enabled: where_filters.append("fleetspeak_enabled IS TRUE") else: where_filters.append( "(fleetspeak_enabled IS NULL OR fleetspeak_enabled IS FALSE)") if where_filters: query += "WHERE " + "AND ".join(where_filters) cursor.execute(query, query_values) last_pings = {} for int_client_id, last_ping in cursor.fetchall(): client_id = db_utils.IntToClientID(int_client_id) last_pings[client_id] = mysql_utils.TimestampToRDFDatetime(last_ping) return last_pings
Reads client ids for all clients in the database.
9,008
def get_model_choices(): result = [] for ct in ContentType.objects.order_by(, ): try: if issubclass(ct.model_class(), TranslatableModel): result.append( (.format(ct.app_label, ct.model.lower()), .format(ct.app_label.capitalize(), ct.model_class()._meta.verbose_name_plural)) ) except TypeError: continue return result
Get the select options for the model selector :return:
9,009
def repr_failure(self, excinfo): exc = excinfo.value cc = self.colors if isinstance(exc, NbCellError): msg_items = [ cc.FAIL + "Notebook cell execution failed" + cc.ENDC] formatstring = ( cc.OKBLUE + "Cell %d: %s\n\n" + "Input:\n" + cc.ENDC + "%s\n") msg_items.append(formatstring % ( exc.cell_num, str(exc), exc.source )) if exc.inner_traceback: msg_items.append(( cc.OKBLUE + "Traceback:" + cc.ENDC + "\n%s\n") % exc.inner_traceback) return "\n".join(msg_items) else: return "pytest plugin exception: %s" % str(exc)
called when self.runtest() raises an exception.
9,010
def text_to_qcolor(text): color = QColor() if not is_string(text): text = str(text) if not is_text_string(text): return color if text.startswith() and len(text)==7: correct = for char in text: if char.lower() not in correct: return color elif text not in list(QColor.colorNames()): return color color.setNamedColor(text) return color
Create a QColor from specified string Avoid warning from Qt when an invalid QColor is instantiated
9,011
def lookup(self, subcmd_prefix): for subcmd_name in list(self.subcmds.keys()): if subcmd_name.startswith(subcmd_prefix) \ and len(subcmd_prefix) >= \ self.subcmds[subcmd_name].__class__.min_abbrev: return self.subcmds[subcmd_name] pass return None
Find subcmd in self.subcmds
9,012
def is_empty_shape(sh: ShExJ.Shape) -> bool: return sh.closed is None and sh.expression is None and sh.extra is None and \ sh.semActs is None
Determine whether sh has any value
9,013
def scan_temperature_old(self, measure, temperature, rate, delay=1): self.activity = self.sweep_table.clear() current_temperature = self.control_temperature sweep_time = abs((temperature - current_temperature) / rate) self.sweep_table[0] = temperature, sweep_time, 0. self.sweep_table[-1] = temperature, 0., 0. self.activity = while self.activity == : measure() time.sleep(delay)
Performs a temperature scan. Measures until the target temperature is reached. :param measure: A callable called repeatedly until stability at target temperature is reached. :param temperature: The target temperature in kelvin. :param rate: The sweep rate in kelvin per minute. :param delay: The time delay between each call to measure in seconds.
9,014
def clean_ticker(ticker): pattern = re.compile() res = pattern.sub(, ticker.split()[0]) return res.lower()
Cleans a ticker for easier use throughout MoneyTree Splits by space and only keeps first bit. Also removes any characters that are not letters. Returns as lowercase. >>> clean_ticker('^VIX') 'vix' >>> clean_ticker('SPX Index') 'spx'
9,015
def _elements(self, IDs, func, aspList): res = [] for asp in aspList: if (asp in [0, 180]): if func == self.N: res.extend([func(ID, asp) for ID in IDs]) else: res.extend([func(ID) for ID in IDs]) else: res.extend([self.D(ID, asp) for ID in IDs]) res.extend([self.S(ID, asp) for ID in IDs]) return res
Returns the IDs as objects considering the aspList and the function.
9,016
def load_agents(self, config_file=None): if config_file is not None: self.overall_config = config_file self.agents.clear() num_participants = get_num_players(self.overall_config) try: for i in range(num_participants): self.load_agent(i) except BaseException as e: raise ValueError(f"{str(e)}\nPlease check your config files! {self.overall_config_path}")
Loads all agents for this team from the rlbot.cfg :param config_file: A config file that is similar to rlbot.cfg
9,017
def pre_calc(self, x, y, beta, n_order, center_x, center_y): x_ = x - center_x y_ = y - center_y n = len(np.atleast_1d(x)) H_x = np.empty((n_order+1, n)) H_y = np.empty((n_order+1, n)) if n_order > 170: raise ValueError(, n_order) for n in range(0, n_order+1): prefactor = 1./np.sqrt(2**n*np.sqrt(np.pi)*math.factorial(n)) n_array = np.zeros(n+1) n_array[n] = 1 H_x[n] = self.hermval(x_/beta, n_array, tensor=False) * prefactor * np.exp(-(x_/beta)**2/2.) H_y[n] = self.hermval(y_/beta, n_array, tensor=False) * prefactor * np.exp(-(y_/beta)**2/2.) return H_x, H_y
calculates the H_n(x) and H_n(y) for a given x-array and y-array :param x: :param y: :param amp: :param beta: :param n_order: :param center_x: :param center_y: :return: list of H_n(x) and H_n(y)
9,018
def htmldiff_tokens(html1_tokens, html2_tokens): result = cleanup_delete(result) return result
Does a diff on the tokens themselves, returning a list of text chunks (not tokens).
9,019
def _set_mstp(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=mstp.mstp, is_container=, presence=True, yang_name="mstp", rest_name="mstp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: None, u: None, u: u, u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "container", : , }) self.__mstp = t if hasattr(self, ): self._set()
Setter method for mstp, mapped from YANG variable /protocol/spanning_tree/mstp (container) If this variable is read-only (config: false) in the source YANG file, then _set_mstp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_mstp() directly.
9,020
def _get_input_for_run(args, executable, preset_inputs=None, input_name_prefix=None): exec_inputs = try_call(ExecutableInputs, executable, input_name_prefix=input_name_prefix, active_region=args.region) if args.input_json is None and args.filename is None: exec_inputs.update(args.input_from_clone, strip_prefix=False) if preset_inputs is not None: exec_inputs.update(preset_inputs, strip_prefix=False) require_all_inputs = (args.batch_tsv is None) try_call(exec_inputs.update_from_args, args, require_all_inputs) return exec_inputs.inputs
Returns an input dictionary that can be passed to executable.run()
9,021
def _file_filter(cls, filename, include_patterns, exclude_patterns): logger.debug(.format(filename)) for exclude_pattern in exclude_patterns: if exclude_pattern.match(filename): return False if include_patterns: found = False for include_pattern in include_patterns: if include_pattern.match(filename): found = True break if not found: return False return True
:returns: `True` if the file should be allowed through the filter.
9,022
def _update_roster(self): roster_file = self._get_roster() if os.access(roster_file, os.W_OK): if self.__parsed_rosters[self.ROSTER_UPDATE_FLAG]: with salt.utils.files.fopen(roster_file, ) as roster_fp: roster_fp.write( .format(s_user=getpass.getuser(), s_time=datetime.datetime.utcnow().isoformat(), hostname=self.opts.get(, ), user=self.opts.get(, ), passwd=self.opts.get(, ))) log.info(.format(self.opts.get(, ), roster_file)) else: log.error(.format(roster_file))
Update default flat roster with the passed in information. :return:
9,023
def files(self): self._printer() for directory in self.directory: for path in os.listdir(directory): full_path = os.path.join(directory, path) if os.path.isfile(full_path): if not path.startswith(): self.filepaths.append(full_path) return self._get_filepaths()
Return list of files in root directory
9,024
def _moments_central(data, center=None, order=1): data = np.asarray(data).astype(float) if data.ndim != 2: raise ValueError() if center is None: from ..centroids import centroid_com center = centroid_com(data) indices = np.ogrid[[slice(0, i) for i in data.shape]] ypowers = (indices[0] - center[1]) ** np.arange(order + 1) xpowers = np.transpose(indices[1] - center[0]) ** np.arange(order + 1) return np.dot(np.dot(np.transpose(ypowers), data), xpowers)
Calculate the central image moments up to the specified order. Parameters ---------- data : 2D array-like The input 2D array. center : tuple of two floats or `None`, optional The ``(x, y)`` center position. If `None` it will calculated as the "center of mass" of the input ``data``. order : int, optional The maximum order of the moments to calculate. Returns ------- moments : 2D `~numpy.ndarray` The central image moments.
9,025
def layer_tagger_mapping(self): return { PARAGRAPHS: self.tokenize_paragraphs, SENTENCES: self.tokenize_sentences, WORDS: self.tokenize_words, ANALYSIS: self.tag_analysis, TIMEXES: self.tag_timexes, NAMED_ENTITIES: self.tag_named_entities, CLAUSE_ANNOTATION: self.tag_clause_annotations, CLAUSES: self.tag_clauses, LAYER_CONLL: self.tag_syntax_vislcg3, LAYER_VISLCG3: self.tag_syntax_maltparser, WORDNET: self.tag_wordnet }
Dictionary that maps layer names to taggers that can create that layer.
9,026
def verifies( self, hash, signature ): G = self.generator n = G.order() r = signature.r s = signature.s if r < 1 or r > n-1: return False if s < 1 or s > n-1: return False c = numbertheory.inverse_mod( s, n ) u1 = ( hash * c ) % n u2 = ( r * c ) % n xy = u1 * G + u2 * self.point v = xy.x() % n return v == r
Verify that signature is a valid signature of hash. Return True if the signature is valid.
9,027
def decode_response(client_message, to_object=None): parameters = dict(response=None) response_size = client_message.read_int() response = [] for _ in range(0, response_size): response_item = client_message.read_data() response.append(response_item) parameters[] = ImmutableLazyDataList(response, to_object) return parameters
Decode response from client message
9,028
def check_version(component, expected_version): comp = comp_names[component] compath = os.path.realpath(os.path.abspath(comp.path)) sys.path.insert(0, compath) import version if version.version != expected_version: raise EnvironmentError("Version mismatch during release, expected={}, found={}".format(expected_version, version.version))
Make sure the package version in setuptools matches what we expect it to be
9,029
def parse_xml_jtl(self, granularity): data = defaultdict(list) processed_data = defaultdict(lambda: defaultdict(lambda: defaultdict(list))) for input_file in self.infile_list: logger.info(, input_file) timestamp_format = None tree = ElementTree.parse(input_file) samples = tree.findall() + tree.findall() for sample in samples: if not timestamp_format or timestamp_format == : timestamp_format = naarad.utils.detect_timestamp_format(sample.get()) if timestamp_format == : continue ts = naarad.utils.get_standardized_timestamp(sample.get(), timestamp_format) if ts == -1: continue ts = naarad.utils.reconcile_timezones(ts, self.timezone, self.graph_timezone) aggregate_timestamp, averaging_factor = self.get_aggregation_timestamp(ts, granularity) self.aggregate_count_over_time(processed_data, sample, [self._sanitize_label(sample.get()), ], aggregate_timestamp) self.aggregate_values_over_time(processed_data, sample, [self._sanitize_label(sample.get()), ], [, ], aggregate_timestamp) logger.info(, input_file) logger.info() self.average_values_for_plot(processed_data, data, averaging_factor) logger.info() for csv in data.keys(): self.csv_files.append(csv) with open(csv, ) as csvf: csvf.write(.join(sorted(data[csv]))) logger.info() self.calculate_key_stats(processed_data) return True
Parse Jmeter workload output in XML format and extract overall and per transaction data and key statistics :param string granularity: The time period over which to aggregate and average the raw data. Valid values are 'hour', 'minute' or 'second' :return: status of the metric parse
9,030
def copy(a): shared = anonymousmemmap(a.shape, dtype=a.dtype) shared[:] = a[:] return shared
Copy an array to the shared memory. Notes ----- copy is not always necessary because the private memory is always copy-on-write. Use :code:`a = copy(a)` to immediately dereference the old 'a' on private memory
9,031
def notUnique(iterable, reportMax=INF): hash = {} n=0 if reportMax < 1: raise ValueError("`reportMax` must be >= 1 and is %r" % reportMax) for item in iterable: count = hash[item] = hash.get(item, 0) + 1 if count > 1: yield item n += 1 if n >= reportMax: return
Returns the elements in `iterable` that aren't unique; stops after it found `reportMax` non-unique elements. Examples: >>> list(notUnique([1,1,2,2,3,3])) [1, 2, 3] >>> list(notUnique([1,1,2,2,3,3], 1)) [1]
9,032
def expand(self, other): if not isinstance(other, Result): raise ValueError("Provided argument has to be instance of overpy:Result()") other_collection_map = {Node: other.nodes, Way: other.ways, Relation: other.relations, Area: other.areas} for element_type, own_collection in self._class_collection_map.items(): for element in other_collection_map[element_type]: if is_valid_type(element, element_type) and element.id not in own_collection: own_collection[element.id] = element
Add all elements from an other result to the list of elements of this result object. It is used by the auto resolve feature. :param other: Expand the result with the elements from this result. :type other: overpy.Result :raises ValueError: If provided parameter is not instance of :class:`overpy.Result`
9,033
def print_about(self): filepath = os.path.join(self.suite_path, "bin", self.tool_name) print "Tool: %s" % self.tool_name print "Path: %s" % filepath print "Suite: %s" % self.suite_path msg = "%s (%r)" % (self.context.load_path, self.context_name) print "Context: %s" % msg variants = self.context.get_tool_variants(self.tool_name) if variants: if len(variants) > 1: self._print_conflicting(variants) else: variant = iter(variants).next() print "Package: %s" % variant.qualified_package_name return 0
Print an info message about the tool.
9,034
def star(self, **args): if in args: self.gist_name = args[] self.gist_id = self.getMyID(self.gist_name) elif in args: self.gist_id = args[] else: raise Exception(s Unambigious Gistname or any unique Gistid to be starred%s/gists/%s/staridGist can\)
star any gist by providing gistID or gistname(for authenticated user)
9,035
def wallet_republish(self, wallet, count): wallet = self._process_value(wallet, ) count = self._process_value(count, ) payload = {"wallet": wallet, "count": count} resp = self.call(, payload) return resp.get() or []
Rebroadcast blocks for accounts from **wallet** starting at frontier down to **count** to the network .. enable_control required .. version 8.0 required :param wallet: Wallet to rebroadcast blocks for :type wallet: str :param count: Max amount of blocks to rebroadcast since frontier block :type count: int :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.wallet_republish( ... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F", ... count=2 ... ) [ "991CF190094C00F0B68E2E5F75F6BEE95A2E0BD93CEAA4A6734DB9F19B728948", "A170D51B94E00371ACE76E35AC81DC9405D5D04D4CEBC399AEACE07AE05DD293", "90D0C16AC92DD35814E84BFBCC739A039615D0A42A76EF44ADAEF1D99E9F8A35" ]
9,036
def UnpackItems(*items, fields=None, defaults=None): defaults = defaults or {} @use_context @use_raw_input def _UnpackItems(context, bag): nonlocal fields, items, defaults if fields is None: fields = () for item in items: fields += tuple(bag[item].keys()) context.set_output_fields(fields) values = () for item in items: values += tuple(bag[item].get(field, defaults.get(field)) for field in fields) return values return _UnpackItems
>>> UnpackItems(0) :param items: :param fields: :param defaults: :return: callable
9,037
def get_abstracts(self, refresh=True): return [ScopusAbstract(eid, refresh=refresh) for eid in self.get_document_eids(refresh=refresh)]
Return a list of ScopusAbstract objects using ScopusSearch.
9,038
def deframesig(frames, siglen, frame_len, frame_step, winfunc=lambda x: numpy.ones((x,))): frame_len = round_half_up(frame_len) frame_step = round_half_up(frame_step) numframes = numpy.shape(frames)[0] assert numpy.shape(frames)[1] == frame_len, indices = numpy.tile(numpy.arange(0, frame_len), (numframes, 1)) + numpy.tile( numpy.arange(0, numframes * frame_step, frame_step), (frame_len, 1)).T indices = numpy.array(indices, dtype=numpy.int32) padlen = (numframes - 1) * frame_step + frame_len if siglen <= 0: siglen = padlen rec_signal = numpy.zeros((padlen,)) window_correction = numpy.zeros((padlen,)) win = winfunc(frame_len) for i in range(0, numframes): window_correction[indices[i, :]] = window_correction[ indices[i, :]] + win + 1e-15 rec_signal[indices[i, :]] = rec_signal[indices[i, :]] + frames[i, :] rec_signal = rec_signal / window_correction return rec_signal[0:siglen]
Does overlap-add procedure to undo the action of framesig. :param frames: the array of frames. :param siglen: the length of the desired signal, use 0 if unknown. Output will be truncated to siglen samples. :param frame_len: length of each frame measured in samples. :param frame_step: number of samples after the start of the previous frame that the next frame should begin. :param winfunc: the analysis window to apply to each frame. By default no window is applied. :returns: a 1-D signal.
9,039
def localize(dt, tz): if not isinstance(tz, tzinfo): tz = pytz.timezone(tz) return tz.localize(dt)
Given a naive datetime object this method will return a localized datetime object
9,040
def extract_bag_of_words_from_corpus_parallel(corpus, lemmatizing="wordnet"): pool = Pool(processes=get_threads_number()*2,) partitioned_corpus = chunks(corpus, len(corpus) / get_threads_number()) list_of_bags_of_words, list_of_lemma_to_keywordset_maps = pool.map(partial(clean_corpus_serial, lemmatizing=lemmatizing), partitioned_corpus) bag_of_words = reduce_list_of_bags_of_words(list_of_bags_of_words) lemma_to_keywordbag_total = defaultdict(lambda: defaultdict(int)) for lemma_to_keywordbag in list_of_lemma_to_keywordset_maps: for lemma, keywordbag in lemma_to_keywordbag.items(): for keyword, multiplicity in keywordbag.items(): lemma_to_keywordbag_total[lemma][keyword] += multiplicity return bag_of_words, lemma_to_keywordbag_total
This extracts one bag-of-words from a list of strings. The documents are mapped to parallel processes. Inputs: - corpus: A list of strings. - lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet". Output: - bag_of_words: This is a bag-of-words in python dictionary format. - lemma_to_keywordbag_total: Aggregated python dictionary that maps stems/lemmas to original topic keywords.
9,041
def get_keys(self, lst): pk_name = self.get_pk_name() return [getattr(item, pk_name) for item in lst]
return a list of pk values from object list
9,042
def _broadcast_item(self, row_lookup, col_lookup, item, to_shape): if isinstance(item, (pandas.Series, pandas.DataFrame, DataFrame)): if not all(idx in item.index for idx in row_lookup): raise ValueError( "Must have equal len keys and value when setting with " "an iterable" ) if hasattr(item, "columns"): if not all(idx in item.columns for idx in col_lookup): raise ValueError( "Must have equal len keys and value when setting " "with an iterable" ) item = item.reindex(index=row_lookup, columns=col_lookup) else: item = item.reindex(index=row_lookup) try: item = np.array(item) if np.prod(to_shape) == np.prod(item.shape): return item.reshape(to_shape) else: return np.broadcast_to(item, to_shape) except ValueError: from_shape = np.array(item).shape raise ValueError( "could not broadcast input array from shape {from_shape} into shape " "{to_shape}".format(from_shape=from_shape, to_shape=to_shape) )
Use numpy to broadcast or reshape item. Notes: - Numpy is memory efficient, there shouldn't be performance issue.
9,043
def fromfilenames(filenames, coltype = int): pattern = re.compile(r"-([\d.]+)-([\d.]+)\.[\w_+ l = segments.segmentlist() for name in filenames: [(s, d)] = pattern.findall(name.strip().rstrip(".gz")) s = coltype(s) d = coltype(d) l.append(segments.segment(s, s + d)) return l
Return a segmentlist describing the intervals spanned by the files whose names are given in the list filenames. The segmentlist is constructed by parsing the file names, and the boundaries of each segment are coerced to type coltype. The file names are parsed using a generalization of the format described in Technical Note LIGO-T010150-00-E, which allows the start time and duration appearing in the file name to be non-integers. NOTE: the output is a segmentlist as described by the file names; if the file names are not in time order, or describe overlaping segments, then thusly shall be the output of this function. It is recommended that this function's output be coalesced before use.
9,044
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None, license_assignment_manager=None): if not license_assignment_manager: license_assignment_manager = \ get_license_assignment_manager(service_instance) if not entity_name: raise salt.exceptions.ArgumentValueError() entity_id = None entity_type = check_name = False if not entity_ref: if entity_name: check_name = True entity_type = try: entity_id = service_instance.content.about.instanceUuid except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( .format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) else: entity_id = entity_ref._moId log.trace(%s\, entity_name) try: assignments = \ license_assignment_manager.QueryAssignedLicenses(entity_id) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( .format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if entity_type == and len(assignments) > 1: log.trace( ) raise salt.exceptions.VMwareObjectRetrievalError( ) if check_name: if entity_name != assignments[0].entityDisplayName: log.trace(, entity_name, assignments[0].entityDisplayName) raise salt.exceptions.VMwareObjectRetrievalError( ) return [a.assignedLicense for a in assignments]
Returns the licenses assigned to an entity. If entity ref is not provided, then entity_name is assumed to be the vcenter. This is later checked if the entity name is provided. service_instance The Service Instance Object from which to obtain the licenses. entity_ref VMware entity to get the assigned licenses for. If None, the entity is the vCenter itself. Default is None. entity_name Entity name used in logging. Default is None. license_assignment_manager The LicenseAssignmentManager object of the service instance. If not provided it will be retrieved. Default is None.
9,045
def calculate_squared_differences(image_tile_dict, transformed_array, template, sq_diff_tolerance=0.1): template_norm_squared = np.sum(template**2) image_norms_squared = {(x,y):np.sum(image_tile_dict[(x,y)]**2) for (x,y) in image_tile_dict.keys()} match_points = image_tile_dict.keys() h, w = template.shape image_matches_normalised = {match_points[i]:-2*transformed_array[match_points[i][0], match_points[i][1]] + image_norms_squared[match_points[i]] + template_norm_squared for i in range(len(match_points))} cutoff = h*w*255**2*sq_diff_tolerance normalised_matches = {key:value for key, value in image_matches_normalised.items() if np.round(value, decimals=3) <= cutoff} return normalised_matches.keys()
As above, but for when the squared differences matching method is used
9,046
def component_activated(self, component): component.env = self super(Environment, self).component_activated(component)
Initialize additional member variables for components. Every component activated through the `Environment` object gets an additional member variable: `env` (the environment object)
9,047
def load_table_from_uri( self, source_uris, destination, job_id=None, job_id_prefix=None, location=None, project=None, job_config=None, retry=DEFAULT_RETRY, ): job_id = _make_job_id(job_id, job_id_prefix) if project is None: project = self.project if location is None: location = self.location job_ref = job._JobReference(job_id, project=project, location=location) if isinstance(source_uris, six.string_types): source_uris = [source_uris] destination = _table_arg_to_table_ref(destination, default_project=self.project) load_job = job.LoadJob(job_ref, source_uris, destination, self, job_config) load_job._begin(retry=retry) return load_job
Starts a job for loading data into a table from CloudStorage. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load Arguments: source_uris (Union[str, Sequence[str]]): URIs of data files to be loaded; in format ``gs://<bucket_name>/<object_name_or_glob>``. destination (Union[ \ :class:`~google.cloud.bigquery.table.Table`, \ :class:`~google.cloud.bigquery.table.TableReference`, \ str, \ ]): Table into which data is to be loaded. If a string is passed in, this method attempts to create a table reference from a string using :func:`google.cloud.bigquery.table.TableReference.from_string`. Keyword Arguments: job_id (str): (Optional) Name of the job. job_id_prefix (str): (Optional) the user-provided prefix for a randomly generated job ID. This parameter will be ignored if a ``job_id`` is also given. location (str): Location where to run the job. Must match the location of the destination table. project (str): Project ID of the project of where to run the job. Defaults to the client's project. job_config (google.cloud.bigquery.job.LoadJobConfig): (Optional) Extra configuration options for the job. retry (google.api_core.retry.Retry): (Optional) How to retry the RPC. Returns: google.cloud.bigquery.job.LoadJob: A new load job.
9,048
def _deriv_growth(z, **cosmo): inv_h = (cosmo[]*(1 + z)**3 + cosmo[])**(-0.5) fz = (1 + z) * inv_h**3 deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\ 1.5 * cosmo[] * (1 + z)**2 -\ fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo) return(deriv_g)
Returns derivative of the linear growth factor at z for a given cosmology **cosmo
9,049
def Advertise(port, stype="SCOOP", sname="Broker", advertisername="Broker", location=""): scoop.logger.info("Launching advertiser...") service = minusconf.Service(stype, port, sname, location) advertiser = minusconf.ThreadAdvertiser([service], advertisername) advertiser.start() scoop.logger.info("Advertiser launched.") return advertiser
stype = always SCOOP port = comma separated ports sname = broker unique name location = routable location (ip or dns)
9,050
def stream(identifier=None, priority=LOG_INFO, level_prefix=False): r if identifier is None: if not _sys.argv or not _sys.argv[0] or _sys.argv[0] == : identifier = else: identifier = _sys.argv[0] fd = stream_fd(identifier, priority, level_prefix) return _os.fdopen(fd, , 1)
r"""Return a file object wrapping a stream to journal. Log messages written to this file as simple newline sepearted text strings are written to the journal. The file will be line buffered, so messages are actually sent after a newline character is written. >>> from systemd import journal >>> stream = journal.stream('myapp') # doctest: +SKIP >>> res = stream.write('message...\n') # doctest: +SKIP will produce the following message in the journal:: PRIORITY=7 SYSLOG_IDENTIFIER=myapp MESSAGE=message... If identifier is None, a suitable default based on sys.argv[0] will be used. This interface can be used conveniently with the print function: >>> from __future__ import print_function >>> stream = journal.stream() # doctest: +SKIP >>> print('message...', file=stream) # doctest: +SKIP priority is the syslog priority, one of `LOG_EMERG`, `LOG_ALERT`, `LOG_CRIT`, `LOG_ERR`, `LOG_WARNING`, `LOG_NOTICE`, `LOG_INFO`, `LOG_DEBUG`. level_prefix is a boolean. If true, kernel-style log priority level prefixes (such as '<1>') are interpreted. See sd-daemon(3) for more information.
9,051
def _HandleLegacy(self, args, token=None): generic_hunt_args = rdf_hunts.GenericHuntArgs() generic_hunt_args.flow_runner_args.flow_name = args.flow_name generic_hunt_args.flow_args = args.flow_args if args.original_hunt: ref = rdf_hunts.FlowLikeObjectReference.FromHuntId( utils.SmartStr(args.original_hunt.hunt_id)) args.hunt_runner_args.original_object = ref elif args.original_flow: ref = rdf_hunts.FlowLikeObjectReference.FromFlowIdAndClientId( utils.SmartStr(args.original_flow.flow_id), utils.SmartStr(args.original_flow.client_id)) args.hunt_runner_args.original_object = ref with implementation.StartHunt( runner_args=args.hunt_runner_args, args=generic_hunt_args, token=token) as hunt_obj: logging.info("User %s created a new %s hunt (%s)", token.username, hunt_obj.args.flow_runner_args.flow_name, hunt_obj.urn) return ApiHunt().InitFromAff4Object(hunt_obj, with_full_summary=True)
Creates a new hunt.
9,052
def ftr_get_config(website_url, exact_host_match=False): def check_requests_result(result): return ( u in result.headers.get() and u not in result.text and u not in result.text and u not in result.text ) repositories = [ x.strip() for x in os.environ.get( , os.path.expandvars(u) + u + u + u ).split() if x.strip() != u] try: proto, host_and_port, remaining = split_url(website_url) except: host_and_port = website_url host_domain_parts = host_and_port.split(u) ) )
Download the Five Filters config from centralized repositories. Repositories can be local if you need to override siteconfigs. The first entry found is returned. If no configuration is found, `None` is returned. If :mod:`cacheops` is installed, the result will be cached with a default expiration delay of 3 days. :param exact_host_match: If ``False`` (default), we will look for wildcard config matches. For example if host is ``www.test.example.org``, we will try looking up ``test.example.org`` and ``example.org``. :param exact_host_match: bool :param website_url: either a full web URI (eg. ``http://www.website.com:PORT/path/to/a/page.html``) or simply a domain name (eg. ``www.website.com``). In case of a domain name, no check is performed yet, be careful of what you pass. :type website_url: str or unicode :returns: tuple -- the loaded site config (as unicode string) and the hostname matched (unicode string too). :raises: :class:`SiteConfigNotFound` if no config could be found. .. note:: Whatever ``exact_host_match`` value is, the ``www`` part is always removed from the URL or domain name. .. todo:: there is currently no merging/cascading of site configs. In the original Five Filters implementation, primary and secondary configurations were merged. We could eventually re-implement this part if needed by someone. PRs welcome as always.
9,053
def gdf_to_geojson(gdf, date_format=, properties=None, filename=None): gdf = convert_date_columns(gdf, date_format) gdf_out = gdf[[] + properties or []] geojson_str = gdf_out.to_json() if filename: with codecs.open(filename, "w", "utf-8-sig") as f: f.write(geojson_str) return None else: return json.loads(geojson_str)
Serialize a GeoPandas dataframe to a geojson format Python dictionary / file
9,054
def event_filter_type(self, event_filter_type): allowed_values = ["BYCHART", "AUTOMATIC", "ALL", "NONE", "BYDASHBOARD", "BYCHARTANDDASHBOARD"] if event_filter_type not in allowed_values: raise ValueError( "Invalid value for `event_filter_type` ({0}), must be one of {1}" .format(event_filter_type, allowed_values) ) self._event_filter_type = event_filter_type
Sets the event_filter_type of this Dashboard. How charts belonging to this dashboard should display events. BYCHART is default if unspecified # noqa: E501 :param event_filter_type: The event_filter_type of this Dashboard. # noqa: E501 :type: str
9,055
def compile_insert_get_id(self, query, values, sequence=None): if sequence is None: sequence = "id" return "%s RETURNING %s" % ( self.compile_insert(query, values), self.wrap(sequence), )
Compile an insert and get ID statement into SQL. :param query: A QueryBuilder instance :type query: QueryBuilder :param values: The values to insert :type values: dict :param sequence: The id sequence :type sequence: str :return: The compiled statement :rtype: str
9,056
def import_from_string(value): value = value.replace(, ) try: module_path, class_name = value.rsplit(, 1) module = import_module(module_path) return getattr(module, class_name) except (ImportError, AttributeError) as ex: raise ImportError("Could not import . {}: {}.".format( value, ex.__class__.__name__, ex))
Copy of rest_framework.settings.import_from_string
9,057
def delete(self, url: StrOrURL, **kwargs: Any) -> : return _RequestContextManager( self._request(hdrs.METH_DELETE, url, **kwargs))
Perform HTTP DELETE request.
9,058
def packetToDict(pkt): d = { : pkt[4], : xl320.InstrToStr[pkt[7]], : (pkt[6] << 8) + pkt[5], : pkt[8:-2], : pkt[-2:] } return d
Given a packet, this turns it into a dictionary ... is this useful? in: packet, array of numbers out: dictionary (key, value)
9,059
def rpc_get_pydoc_documentation(self, symbol): try: docstring = pydoc.render_doc(str(symbol), "Elpy Pydoc Documentation for %s", False) except (ImportError, pydoc.ErrorDuringImport): return None else: if isinstance(docstring, bytes): docstring = docstring.decode("utf-8", "replace") return docstring
Get the Pydoc documentation for the given symbol. Uses pydoc and can return a string with backspace characters for bold highlighting.
9,060
def get_most_recent_event(self, originator_id, lt=None, lte=None): events = self.get_domain_events(originator_id=originator_id, lt=lt, lte=lte, limit=1, is_ascending=False) events = list(events) try: return events[0] except IndexError: pass
Gets a domain event from the sequence identified by `originator_id` at the highest position. :param originator_id: ID of a sequence of events :param lt: get highest before this position :param lte: get highest at or before this position :return: domain event
9,061
def _subclass_must_implement(self, fn): m = "Missing function implementation in {}: {}".format(type(self), fn) return NotImplementedError(m)
Returns a NotImplementedError for a function that should be implemented. :param fn: name of the function
9,062
def load(source, triples=False, cls=PENMANCodec, **kwargs): decode = cls(**kwargs).iterdecode if hasattr(source, ): return list(decode(source.read())) else: with open(source) as fh: return list(decode(fh.read()))
Deserialize a list of PENMAN-encoded graphs from *source*. Args: source: a filename or file-like object to read from triples: if True, read graphs as triples instead of as PENMAN cls: serialization codec class kwargs: keyword arguments passed to the constructor of *cls* Returns: a list of Graph objects
9,063
def send(self, data, room=None, skip_sid=None, namespace=None, callback=None): return self.client.send(data, namespace=namespace or self.namespace, callback=callback)
Send a message to the server. The only difference with the :func:`socketio.Client.send` method is that when the ``namespace`` argument is not given the namespace associated with the class is used.
9,064
def groups_moderators(self, room_id=None, group=None, **kwargs): if room_id: return self.__call_api_get(, roomId=room_id, kwargs=kwargs) elif group: return self.__call_api_get(, roomName=group, kwargs=kwargs) else: raise RocketMissingParamException()
Lists all moderators of a group.
9,065
def create_user(self, user): data = self._create_user_dict(user=user) response = self._perform_request( url=, method=, data=json.dumps(data)) return response
Creates a new user. :param user: The user object to be created. :type user: ``dict``
9,066
def instantiate_by_name_with_default(self, object_name, default_value=None): if object_name not in self.instances: if object_name not in self.environment: return default_value else: instance = self.instantiate_from_data(self.environment[object_name]) self.instances[object_name] = instance return instance else: return self.instances[object_name]
Instantiate object from the environment, possibly giving some extra arguments
9,067
def importSNPs(name) : path = os.path.join(this_dir, "bootstrap_data", "SNPs/" + name) PS.importSNPs(path)
Import a SNP set shipped with pyGeno. Most of the datawraps only contain URLs towards data provided by third parties.
9,068
def extract_path_info( environ_or_baseurl, path_or_url, charset="utf-8", errors="werkzeug.url_quote", collapse_http_schemes=True, ): def _normalize_netloc(scheme, netloc): parts = netloc.split(u"@", 1)[-1].split(u":", 1) if len(parts) == 2: netloc, port = parts if (scheme == u"http" and port == u"80") or ( scheme == u"https" and port == u"443" ): port = None else: netloc = parts[0] port = None if port is not None: netloc += u":" + port return netloc path = uri_to_iri(path_or_url, charset, errors) if isinstance(environ_or_baseurl, dict): environ_or_baseurl = get_current_url(environ_or_baseurl, root_only=True) base_iri = uri_to_iri(environ_or_baseurl, charset, errors) base_scheme, base_netloc, base_path = url_parse(base_iri)[:3] cur_scheme, cur_netloc, cur_path, = url_parse(url_join(base_iri, path))[:3] base_netloc = _normalize_netloc(base_scheme, base_netloc) cur_netloc = _normalize_netloc(cur_scheme, cur_netloc) if collapse_http_schemes: for scheme in base_scheme, cur_scheme: if scheme not in (u"http", u"https"): return None else: if not (base_scheme in (u"http", u"https") and base_scheme == cur_scheme): return None if base_netloc != cur_netloc: return None base_path = base_path.rstrip(u"/") if not cur_path.startswith(base_path): return None return u"/" + cur_path[len(base_path) :].lstrip(u"/")
Extracts the path info from the given URL (or WSGI environment) and path. The path info returned is a unicode string, not a bytestring suitable for a WSGI environment. The URLs might also be IRIs. If the path info could not be determined, `None` is returned. Some examples: >>> extract_path_info('http://example.com/app', '/app/hello') u'/hello' >>> extract_path_info('http://example.com/app', ... 'https://example.com/app/hello') u'/hello' >>> extract_path_info('http://example.com/app', ... 'https://example.com/app/hello', ... collapse_http_schemes=False) is None True Instead of providing a base URL you can also pass a WSGI environment. :param environ_or_baseurl: a WSGI environment dict, a base URL or base IRI. This is the root of the application. :param path_or_url: an absolute path from the server root, a relative path (in which case it's the path info) or a full URL. Also accepts IRIs and unicode parameters. :param charset: the charset for byte data in URLs :param errors: the error handling on decode :param collapse_http_schemes: if set to `False` the algorithm does not assume that http and https on the same server point to the same resource. .. versionchanged:: 0.15 The ``errors`` parameter defaults to leaving invalid bytes quoted instead of replacing them. .. versionadded:: 0.6
9,069
def deactivate_user(query): user = _query_to_user(query) if click.confirm(f): user.active = False user_manager.save(user, commit=True) click.echo(f) else: click.echo()
Deactivate a user.
9,070
def dynamize_attribute_updates(self, pending_updates): d = {} for attr_name in pending_updates: action, value = pending_updates[attr_name] if value is None: d[attr_name] = {"Action": action} else: d[attr_name] = {"Action": action, "Value": self.dynamize_value(value)} return d
Convert a set of pending item updates into the structure required by Layer1.
9,071
def query_raw(self, metric, **kwargs): kwargs[] = True if kwargs.get(): return self.query_raw_with_http_info(metric, **kwargs) else: (data) = self.query_raw_with_http_info(metric, **kwargs) return data
Perform a raw data query against Wavefront servers that returns second granularity points grouped by tags # noqa: E501 An API to check if ingested points are as expected. Points ingested within a single second are averaged when returned. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.query_raw(metric, async_req=True) >>> result = thread.get() :param async_req bool :param str metric: metric to query ingested points for (cannot contain wildcards) (required) :param str host: host to query ingested points for (cannot contain wildcards). host or source is equivalent, only one should be used. :param str source: source to query ingested points for (cannot contain wildcards). host or source is equivalent, only one should be used. :param int start_time: start time in epoch milliseconds (cannot be more than a day in the past) null to use an hour before endTime :param int end_time: end time in epoch milliseconds (cannot be more than a day in the past) null to use now :return: list[RawTimeseries] If the method is called asynchronously, returns the request thread.
9,072
def load_interfaces(self, interfaces_dict): if interfaces_dict.get(, {}).get(, None) is None: raise ValueError("Invalid response for GetSupportedAPIList") interfaces = interfaces_dict[][] if len(interfaces) == 0: raise ValueError("API returned not interfaces; probably using invalid key") for interface in self.interfaces: delattr(self, interface.name) self.interfaces = [] for interface in interfaces: obj = WebAPIInterface(interface, parent=self) self.interfaces.append(obj) setattr(self, obj.name, obj)
Populates the namespace under the instance
9,073
def to_dict(self, index=0): index += 1 rep = {} rep["index"] = index rep["leaf"] = len(self.children) == 0 rep["depth"] = self.udepth rep["scoreDistr"] = [0.0] * len(LabeledTree.SCORE_MAPPING) if self.label is not None: rep["scoreDistr"][self.label] = 1.0 mapping = LabeledTree.SCORE_MAPPING[:] rep["rating"] = mapping[self.label] - min(mapping) rep["numChildren"] = len(self.children) text = self.text if self.text != None else "" seen_tokens = 0 witnessed_pixels = 0 for i, child in enumerate(self.children): if i > 0: text += " " child_key = "child%d" % (i) (rep[child_key], index) = child.to_dict(index) text += rep[child_key]["text"] seen_tokens += rep[child_key]["tokens"] witnessed_pixels += rep[child_key]["pixels"] rep["text"] = text rep["tokens"] = 1 if (self.text != None and len(self.text) > 0) else seen_tokens rep["pixels"] = witnessed_pixels + 3 if len(self.children) > 0 else text_size(self.text) return (rep, index)
Dict format for use in Javascript / Jason Chuang's display technology.
9,074
def featureCounts_chart (self): config = { : , : , : , : } return bargraph.plot(self.featurecounts_data, self.featurecounts_keys, config)
Make the featureCounts assignment rates plot
9,075
def _run_server(self, multiprocessing): if not self._flag_m: raise UnsupportedCall(f"Extractor(engine={self._exeng})_run_serverExtractor(engine=pipeline)fout={self._ofnm}mpfrmmprsmmpbufmpkitstartmpkitmpbufmpfdp': self._mpfdp[self._frnum]} ) self._mpkit.pool -= 1 self._mpkit.counter += 1 proc.start() self._frnum += 1 self._mpprc.append(proc) if len(self._mpprc) >= CPU_CNT - 1: [proc.join() for proc in self._mpprc[:-4]] del self._mpprc[:-4]
Use server multiprocessing to extract PCAP files.
9,076
def pop(h): n = h.size() - 1 h.swap(0, n) down(h, 0, n) return h.pop()
Pop the heap value from the heap.
9,077
def on_tab_close_clicked(self, event, state_m): [page, state_identifier] = self.find_page_of_state_m(state_m) if page: self.close_page(state_identifier, delete=False)
Triggered when the states-editor close button is clicked Closes the tab. :param state_m: The desired state model (the selected state)
9,078
def lfprob (dfnum, dfden, F): p = betai(0.5*dfden, 0.5*dfnum, dfden/float(dfden+dfnum*F)) return p
Returns the (1-tailed) significance level (p-value) of an F statistic given the degrees of freedom for the numerator (dfR-dfF) and the degrees of freedom for the denominator (dfF). Usage: lfprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn
9,079
def apply(self, key, value, prompt=None, on_load=lambda a: a, on_save=lambda a: a): if value == : value = None if key and self.data.has_key(key): del self.data[key] if value is not None: value = on_load(value) if key: self.data[key] = on_save(value) return value elif not key or not self.has_key(key): if callable(prompt): value = prompt() elif prompt is not None: value = raw_input(prompt + ": ") if value is None: if self.data.has_key(key): del self.data[key] return None self.data[key] = on_save(value) return value return on_load(self.data[key])
Applies a setting value to a key, if the value is not `None`. Returns without prompting if either of the following: * `value` is not `None` * already present in the dictionary Args: prompt: May either be a string to prompt via `raw_input` or a method (callable) that returns the value. on_load: lambda. Value is passed through here after loaded. on_save: lambda. Value is saved as this value.
9,080
def execute(self, progress_fn, print_verbose_info=None): assert_is_type(progress_fn, FunctionType, GeneratorType, MethodType) if isinstance(progress_fn, GeneratorType): progress_fn = (lambda g: lambda: next(g))(progress_fn) self._next_poll_time = 0 self._t0 = time.time() self._x0 = 0 self._v0 = 0.01 self._ve = 0.01 progress = 0 status = None try: while True: now = time.time() progress = min(self._compute_progress_at_time(now)[0], 1) if progress == 1 and self._get_real_progress() >= 1: break result = self._widget.render(progress) assert_is_type(result, RenderResult) time0 = result.next_time time1 = self._get_time_at_progress(result.next_progress) next_render_time = min(time0, time1) self._draw(result.rendered) wait_time = min(next_render_time, self._next_poll_time) - now if wait_time > 0: time.sleep(wait_time) if print_verbose_info is not None: print_verbose_info(progress) except KeyboardInterrupt: status = "cancelled" except StopIteration as e: status = str(e) result = self._widget.render(progress=progress, status=status) self._draw(result.rendered, final=True) if status == "cancelled": raise StopIteration(status)
Start the progress bar, and return only when the progress reaches 100%. :param progress_fn: the executor function (or a generator). This function should take no arguments and return either a single number -- the current progress level, or a tuple (progress level, delay), where delay is the time interval for when the progress should be checked again. This function may at any point raise the ``StopIteration(message)`` exception, which will interrupt the progress bar, display the ``message`` in red font, and then re-raise the exception. :raises StopIteration: if the job is interrupted. The reason for interruption is provided in the exception's message. The message will say "cancelled" if the job was interrupted by the user by pressing Ctrl+C.
9,081
def _get_default_iface_linux(): data = _read_file() if data is not None and len(data) > 1: for line in data.split()[1:-1]: iface_name, dest = line.split()[:2] if dest == : return iface_name return None
Get the default interface by reading /proc/net/route. This is the same source as the `route` command, however it's much faster to read this file than to call `route`. If it fails for whatever reason, we can fall back on the system commands (e.g for a platform that has a route command, but maybe doesn't use /proc?).
9,082
def js_distance(p, q): js_dist = np.sqrt(js_divergence(p, q)) return js_dist
Compute the Jensen-Shannon distance between two discrete distributions. NOTE: JS divergence is not a metric but the sqrt of JS divergence is a metric and is called the JS distance. Parameters ---------- p : np.array probability mass array (sums to 1) q : np.array probability mass array (sums to 1) Returns ------- js_dist : float Jensen-Shannon distance between two discrete distributions
9,083
def play(self, **kwargs): path = % (self.manager.path, self.get_id()) self.manager.gitlab.http_post(path)
Trigger a job explicitly. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabJobPlayError: If the job could not be triggered
9,084
def clear_cached_data(self): adapter.RemoveDevice(device._device.object_path)
Clear any internally cached BLE device data. Necessary in some cases to prevent issues with stale device data getting cached by the OS.
9,085
def get_theming_attribute(self, mode, name, part=None): colours = int(self._config.get()) return self._theme.get_attribute(colours, mode, name, part)
looks up theming attribute :param mode: ui-mode (e.g. `search`,`thread`...) :type mode: str :param name: identifier of the atttribute :type name: str :rtype: urwid.AttrSpec
9,086
def getLogger(name): log = logging.getLogger(name=name) for handler in log.handlers: if name == handler.name: return log else: return LogSetup().default_logger(name=name.split()[0])
Return a logger from a given name. If the name does not have a log handler, this will create one for it based on the module name which will log everything to a log file in a location the executing user will have access to. :param name: ``str`` :return: ``object``
9,087
def build_network_settings(**settings): * changes = [] current_network_settings = _parse_current_network_settings() opts = _parse_network_settings(settings, current_network_settings) skip_etc_default_networking = ( __grains__[] == and int(__grains__[].split()[0]) >= 12) if skip_etc_default_networking: if opts[] == : service_cmd = else: service_cmd = if __salt__[](): __salt__[service_cmd]() if __salt__[](): __salt__[service_cmd]() else: try: template = JINJA.get_template() except jinja2.exceptions.TemplateNotFound: log.error() return network = template.render(opts) if in settings and settings[]: return _read_temp(network) _write_file_network(network, _DEB_NETWORKING_FILE, True) sline = opts[].split(, 1) opts[] = sline[0] current_domainname = current_network_settings[] current_searchdomain = current_network_settings[] new_domain = False if len(sline) > 1: new_domainname = sline[1] if new_domainname != current_domainname: domainname = new_domainname opts[] = new_domainname new_domain = True else: domainname = current_domainname opts[] = domainname else: domainname = current_domainname opts[] = domainname new_search = False if in opts: new_searchdomain = opts[] if new_searchdomain != current_searchdomain: searchdomain = new_searchdomain opts[] = new_searchdomain new_search = True else: searchdomain = current_searchdomain opts[] = searchdomain else: searchdomain = current_searchdomain opts[] = searchdomain if new_domain or new_search: resolve = _parse_resolve() domain_prog = re.compile(r) search_prog = re.compile(r) new_contents = [] for item in _read_file(_DEB_RESOLV_FILE): if domain_prog.match(item): item = .format(domainname) elif search_prog.match(item): item = .format(searchdomain) new_contents.append(item) if not in resolve: new_contents.insert(0, . format(domainname)) if not in resolve: new_contents.insert( in resolve, .format(searchdomain)) new_resolv = .join(new_contents) if not ( in settings and settings[]): _write_file_network(new_resolv, _DEB_RESOLV_FILE) try: template = JINJA.get_template() except jinja2.exceptions.TemplateNotFound: log.error() return network = template.render(opts) changes.extend(_read_temp(network)) return changes
Build the global network script. CLI Example: .. code-block:: bash salt '*' ip.build_network_settings <settings>
9,088
def get_library_citation(): all_ref_data = api.get_reference_data() lib_refs_data = {k: all_ref_data[k] for k in _lib_refs} return (_lib_refs_desc, lib_refs_data)
Return a descriptive string and reference data for what users of the library should cite
9,089
def parse_field(self, field_data, index=0): field = { : index, } if isinstance(field_data, str): field.update(self.parse_string_field(field_data)) elif isinstance(field_data, dict): field.update(field_data) else: raise TypeError(.format(type(field_data))) if not in field: field[] = None if not in field and field[]: try: field[] = self.object._meta.get_field(field[]).verbose_name.capitalize() except Exception: field[] = field[].replace(, ).capitalize() elif not in field: field[] = if not in field: field[] = for name, options in self.fields_options.items(): if in options and name not in field: field[name] = options[] return field
Parse field and add missing options
9,090
def grant_permissions(self, proxy_model): ContentType = apps.get_model(, ) try: Permission = apps.get_model(, ) except LookupError: return )) permissions = [ Permission(codename=codename, name=name, content_type=ctype) for codename, name in searched_perms if (ctype.pk, codename) not in all_perms ] Permission.objects.bulk_create(permissions)
Create the default permissions for the just added proxy model
9,091
def default_image_loader(filename, flags, **kwargs): def load(rect=None, flags=None): return filename, rect, flags return load
This default image loader just returns filename, rect, and any flags
9,092
def script(self, sql_script, split_algo=, prep_statements=True, dump_fails=True): return Execute(sql_script, split_algo, prep_statements, dump_fails, self)
Wrapper method providing access to the SQLScript class's methods and properties.
9,093
def run_radia(job, bams, univ_options, radia_options, chrom): job.fileStore.logToMaster( %(univ_options[], chrom)) work_dir = job.fileStore.getLocalTempDir() input_files = { : bams[], : bams[], : bams[], : bams[], : bams[], : bams[], : radia_options[], : radia_options[]} input_files = get_files_from_filestore(job, input_files, work_dir, docker=True) radia_output = .join([work_dir, , chrom, ]) radia_log = .join([work_dir, , chrom, ]) parameters = [univ_options[], chrom, , input_files[], , input_files[], , input_files[], .join([, input_files[]]), , input_files[], , docker_path(radia_output), , , , input_files[], , , , , , , , , , docker_path(radia_log)] docker_call(tool=, tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options[]) output_files = defaultdict() for radia_file in [radia_output, radia_log]: output_files[os.path.basename(radia_file)] = \ job.fileStore.writeGlobalFile(radia_file) filterradia = job.wrapJobFn(run_filter_radia, bams, output_files[os.path.basename(radia_output)], univ_options, radia_options, chrom, disk=, memory=) job.addChild(filterradia) return filterradia.rv()
This module will run radia on the RNA and DNA bams ARGUMENTS 1. bams: Dict of bams and their indexes bams |- 'tumor_rna': <JSid> |- 'tumor_rnai': <JSid> |- 'tumor_dna': <JSid> |- 'tumor_dnai': <JSid> |- 'normal_dna': <JSid> +- 'normal_dnai': <JSid> 2. univ_options: Dict of universal arguments used by almost all tools univ_options +- 'dockerhub': <dockerhub to use> 3. radia_options: Dict of parameters specific to radia radia_options |- 'dbsnp_vcf': <JSid for dnsnp vcf file> +- 'genome': <JSid for genome fasta file> 4. chrom: String containing chromosome name with chr appended RETURN VALUES 1. Dict of filtered radia output vcf and logfile (Nested return) |- 'radia_filtered_CHROM.vcf': <JSid> +- 'radia_filtered_CHROM_radia.log': <JSid>
9,094
def coverage_region_detailed_stats(target_name, bed_file, data, out_dir): if bed_file and utils.file_exists(bed_file): ready_depth = tz.get_in(["depth", target_name], data) if ready_depth: cov_file = ready_depth["regions"] dist_file = ready_depth["dist"] thresholds_file = ready_depth.get("thresholds") out_cov_file = os.path.join(out_dir, os.path.basename(cov_file)) out_dist_file = os.path.join(out_dir, os.path.basename(dist_file)) out_thresholds_file = os.path.join(out_dir, os.path.basename(thresholds_file)) \ if thresholds_file and os.path.isfile(thresholds_file) else None if not utils.file_uptodate(out_cov_file, cov_file): utils.copy_plus(cov_file, out_cov_file) utils.copy_plus(dist_file, out_dist_file) utils.copy_plus(thresholds_file, out_thresholds_file) if out_thresholds_file else None return [out_cov_file, out_dist_file] + ([out_thresholds_file] if out_thresholds_file else []) return []
Calculate coverage at different completeness cutoff for region in coverage option.
9,095
def create_dset_to3d(prefix,file_list,file_order=,num_slices=None,num_reps=None,TR=None,slice_order=,only_dicoms=True,sort_filenames=False): tags = { : (0x0028,0x0010), : (0x0020,0x0105), : (0x0018,0x0080) } with nl.notify( % prefix): if os.path.exists(prefix): nl.notify( % prefix,level=nl.level.error) return False tagvals = {} for f in file_list: try: tagvals[f] = info_for_tags(f,tags.values()) except: pass if only_dicoms: new_file_list = [] for f in file_list: if f in tagvals and len(tagvals[f][tags[]])>0: new_file_list.append(f) file_list = new_file_list if sort_filenames: def file_num(fname): try: nums = [x.strip() for x in re.findall(r,fname) if x.strip()!=] return float(nums[-1]) except: return fname file_list = sorted(file_list,key=file_num) if len(file_list)==0: nl.notify(t find any valid DICOM imagesto3d-skip_outliers-quit_on_err-prefixError: trying to guess return False num_slices = len(file_list)/num_reps if num_reps==None: if len(file_list)%num_slices==0: num_reps = len(file_list)/num_slices elif len(file_list)==1 and tags[] in tagvals[file_list[0]]: num_reps = tagvals[file_list[0]][tags[]] else: nl.notify(t divide evenly into %d (number of slices)TR-time:%szt-@\nError: to3d returned errorstdout:\n\nstderr:\n' + out[1],level=nl.level.error) return False
manually create dataset by specifying everything (not recommended, but necessary when autocreation fails) If `num_slices` or `num_reps` is omitted, it will be inferred by the number of images. If both are omitted, it assumes that this it not a time-dependent dataset :only_dicoms: filter the given list by readable DICOM images :sort_filenames: sort the given files by filename using the right-most number in the filename
9,096
def configure_sessionmaker(graph): engine_routing_strategy = getattr(graph, graph.config.sessionmaker.engine_routing_strategy) if engine_routing_strategy.supports_multiple_binds: ScopedFactory.infect(graph, "postgres") class RoutingSession(Session): def get_bind(self, mapper=None, clause=None): return engine_routing_strategy.get_bind(mapper, clause) return sessionmaker(class_=RoutingSession)
Create the SQLAlchemy session class.
9,097
def generate_variants(unresolved_spec): for resolved_vars, spec in _generate_variants(unresolved_spec): assert not _unresolved_values(spec) yield format_vars(resolved_vars), spec
Generates variants from a spec (dict) with unresolved values. There are two types of unresolved values: Grid search: These define a grid search over values. For example, the following grid search values in a spec will produce six distinct variants in combination: "activation": grid_search(["relu", "tanh"]) "learning_rate": grid_search([1e-3, 1e-4, 1e-5]) Lambda functions: These are evaluated to produce a concrete value, and can express dependencies or conditional distributions between values. They can also be used to express random search (e.g., by calling into the `random` or `np` module). "cpu": lambda spec: spec.config.num_workers "batch_size": lambda spec: random.uniform(1, 1000) Finally, to support defining specs in plain JSON / YAML, grid search and lambda functions can also be defined alternatively as follows: "activation": {"grid_search": ["relu", "tanh"]} "cpu": {"eval": "spec.config.num_workers"}
9,098
def _create_opt_rule(self, rulename): optname = rulename + def optrule(self, p): p[0] = p[1] optrule.__doc__ = % (optname, rulename) optrule.__name__ = % optname setattr(self.__class__, optrule.__name__, optrule)
Given a rule name, creates an optional ply.yacc rule for it. The name of the optional rule is <rulename>_opt
9,099
def view_set(method_name): def view_set(value, context, **_params): method = getattr(context["view"], method_name) return _set(method, context["key"], value, (), {}) return view_set
Creates a setter that will call the view method with the context's key as first parameter and the value as second parameter. @param method_name: the name of a method belonging to the view. @type method_name: str