Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
13,300
def write_file_to_zip_with_neutral_metadata(zfile, filename, content): info = zipfile.ZipInfo(filename, date_time=(2015, 10, 21, 7, 28, 0)) info.compress_type = zipfile.ZIP_DEFLATED info.comment = "".encode() info.create_system = 0 zfile.writestr(info, content)
Write the string `content` to `filename` in the open ZipFile `zfile`. Args: zfile (ZipFile): open ZipFile to write the content into filename (str): the file path within the zip file to write into content (str): the content to write into the zip Returns: None
13,301
def next(cls): try: with db.session.begin_nested(): obj = cls() db.session.add(obj) except IntegrityError: with db.session.begin_nested(): cls._set_sequence(cls.max()) obj = cls() db.session.add(obj) return obj.recid
Return next available record identifier.
13,302
def upload_html(destination, html, name=None): [project, path, n] = parse_destination(destination) try: dxfile = dxpy.upload_string(html, media_type="text/html", project=project, folder=path, hidden=True, name=name or None) return dxfile.get_id() except dxpy.DXAPIError as ex: parser.error("Could not upload HTML report to DNAnexus server! ({ex})".format(ex=ex))
Uploads the HTML to a file on the server
13,303
def call_for_each_tower( towers, func, devices=None, use_vs=None): ret = [] if devices is not None: assert len(devices) == len(towers) if use_vs is not None: assert len(use_vs) == len(towers) tower_names = [.format(idx) for idx in range(len(towers))] for idx, t in enumerate(towers): device = devices[idx] if devices is not None else .format(t) usevs = use_vs[idx] if use_vs is not None else False reuse = not usevs and idx > 0 with tfv1.device(device), _maybe_reuse_vs(reuse), TrainTowerContext( tower_names[idx], vs_name=tower_names[idx] if usevs else , index=idx, total=len(towers)): if len(str(device)) < 10: with override_to_local_variable(enable=usevs): ret.append(func()) return ret
Run `func` on all GPUs (towers) and return the results. Args: towers (list[int]): a list of GPU id. func: a lambda to be called inside each tower devices: a list of devices to be used. By default will use '/gpu:{tower}' use_vs (list[bool]): list of use_vs to passed to TowerContext Returns: List of outputs of ``func``, evaluated on each tower.
13,304
def split_and_strip_without(string, exclude, separator_regexp=None): result = split_and_strip(string, separator_regexp) if not exclude: return result return [x for x in result if x not in exclude]
Split a string into items, and trim any excess spaces Any items in exclude are not in the returned list >>> split_and_strip_without('fred, was, here ', ['was']) ['fred', 'here']
13,305
def dfs_postorder(self, reverse=False): stack = deque() stack.append(self) visited = set() while stack: node = stack.pop() if node in visited: yield node else: visited.add(node) stack.append(node) if hasattr(node, "childs"): if reverse: stack.extend(node.childs) else: stack.extend(node.childs[::-1])
Generator that returns each element of the tree in Postorder order. Keyword arguments: reverse -- if true, the search is done from right to left.
13,306
def match(self, pattern): m = pattern.match(self._string, self._index) if m: self._index = m.end() return m
Perform regex match at index.
13,307
def tredparse(args): p = OptionParser(tredparse.__doc__) p.add_option(, default=300, type="int", help="Maximum number of repeats") add_simulate_options(p) opts, args, iopts = p.set_image_options(args, figsize="10x10") if len(args) != 0: sys.exit(not p.print_help()) depth = opts.depth max_insert = opts.maxinsert fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(ncols=2, nrows=2, figsize=(iopts.w, iopts.h)) plt.tight_layout(pad=3) lobstr_results = parse_results("lobstr_results_homo-20x-150bp-500bp.txt") tredparse_results = parse_results("tredparse_results_homo-20x-150bp-500bp.txt") title = SIMULATED_HAPLOID + r" (Depth=$%s\times$)" % depth plot_compare(ax1, title, tredparse_results, lobstr_results, max_insert=max_insert) lobstr_results = parse_results("lobstr_results_het-20x-150bp-500bp.txt", exclude=20) tredparse_results = parse_results("tredparse_results_het-20x-150bp-500bp.txt", exclude=20) title = SIMULATED_DIPLOID + r" (Depth=$%s\times$)" % depth plot_compare(ax2, title, tredparse_results, lobstr_results, max_insert=max_insert) lobstr_results = parse_results("lobstr_results_het-5x-150bp-500bp.txt", exclude=20) tredparse_results = parse_results("tredparse_results_het-5x-150bp-500bp.txt", exclude=20) title = SIMULATED_DIPLOID + r" (Depth=$%s\times$)" % 5 plot_compare(ax3, title, tredparse_results, lobstr_results, max_insert=max_insert) lobstr_results = parse_results("lobstr_results_het-80x-150bp-500bp.txt", exclude=20) tredparse_results = parse_results("tredparse_results_het-80x-150bp-500bp.txt", exclude=20) title = SIMULATED_DIPLOID + r" (Depth=$%s\times$)" % 80 plot_compare(ax4, title, tredparse_results, lobstr_results, max_insert=max_insert) for ax in (ax1, ax2, ax3, ax4): ax.set_xlim(0, max_insert) ax.set_ylim(0, max_insert) root = fig.add_axes([0, 0, 1, 1]) pad = .03 panel_labels(root, ((pad / 2, 1 - pad, "A"), (1 / 2., 1 - pad, "B"), (pad / 2, 1 / 2. , "C"), (1 / 2., 1 / 2. , "D"))) normalize_axes(root) image_name = "tredparse." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
%prog tredparse Compare performances of various variant callers on simulated STR datasets. Adds coverage comparisons as panel C and D.
13,308
def _common_query_parameters(self, doc_type, includes, owner, promulgated_only, series, sort): queries = [] if includes is not None: queries.extend([(, include) for include in includes]) if doc_type is not None: queries.append((, doc_type)) if promulgated_only: queries.append((, 1)) if owner is not None: queries.append((, owner)) if series is not None: if type(series) is list: series = .join(series) queries.append((, series)) if sort is not None: queries.append((, sort)) return queries
Extract common query parameters between search and list into slice. @param includes What metadata to return in results (e.g. charm-config). @param doc_type Filter to this type: bundle or charm. @param promulgated_only Whether to filter to only promulgated charms. @param sort Sorting the result based on the sort string provided which can be name, author, series and - in front for descending. @param owner Optional owner. If provided, search results will only include entities that owner can view. @param series The series to filter; can be a list of series or a single series.
13,309
def divrank_scipy(G, alpha=0.25, d=0.85, personalization=None, max_iter=100, tol=1.0e-6, nstart=None, weight=, dangling=None): import scipy.sparse N = len(G) if N == 0: return {} nodelist = G.nodes() M = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight, dtype=float) S = scipy.array(M.sum(axis=1)).flatten() S[S != 0] = 1.0 / S[S != 0] Q = scipy.sparse.spdiags(S.T, 0, *M.shape, format=) M = Q * M M = scipy.sparse.lil_matrix(M) M.setdiag(0.0) M = alpha * M M.setdiag(1.0 - alpha) x = scipy.repeat(1.0 / N, N) if personalization is None: p = scipy.repeat(1.0 / N, N) else: missing = set(nodelist) - set(personalization) if missing: raise NetworkXError( % missing) p = scipy.array([personalization[n] for n in nodelist], dtype=float) p = p / p.sum() if dangling is None: dangling_weights = p else: missing = set(nodelist) - set(dangling) if missing: raise NetworkXError( % missing) dangling_weights = scipy.array([dangling[n] for n in nodelist], dtype=float) dangling_weights /= dangling_weights.sum() is_dangling = scipy.where(S == 0)[0] for _ in range(max_iter): xlast = x D_t = M * x x = ( d * (x / D_t * M * x + sum(x[is_dangling]) * dangling_weights) + (1.0 - d) * p ) err = scipy.absolute(x - xlast).sum() if err < N * tol: return dict(zip(nodelist, map(float, x))) raise NetworkXError( % max_iter)
Returns the DivRank (Diverse Rank) of the nodes in the graph. This code is based on networkx.pagerank_scipy
13,310
def compare(self, statement_a, statement_b): document_a = self.nlp(statement_a.text.lower()) document_b = self.nlp(statement_b.text.lower()) statement_a_lemmas = set([ token.lemma_ for token in document_a if not token.is_stop ]) statement_b_lemmas = set([ token.lemma_ for token in document_b if not token.is_stop ]) numerator = len(statement_a_lemmas.intersection(statement_b_lemmas)) denominator = float(len(statement_a_lemmas.union(statement_b_lemmas))) ratio = numerator / denominator return ratio
Return the calculated similarity of two statements based on the Jaccard index.
13,311
def run(self, cmd, sudo=False, ignore_error=False, success_status=(0,), error_callback=None, custom_log=None, retry=0): self._check_started() cmd_output = io.StringIO() channel = self._get_channel() cmd = self._prepare_cmd(cmd, sudo=sudo) if not custom_log: custom_log = cmd LOG.info("%s run " % (self.description, custom_log)) channel.exec_command(cmd) while True: received = None rl, _, _ = select.select([channel], [], [], 30) if rl: received = channel.recv(1024).decode(, ).strip() if received: LOG.debug(received) cmd_output.write(received) if channel.exit_status_ready() and not received: break cmd_output = cmd_output.getvalue() exit_status = channel.exit_status try: return self._evaluate_run_result( exit_status, cmd_output, ignore_error=ignore_error, success_status=success_status, error_callback=error_callback, custom_log=custom_log) except (paramiko.ssh_exception.SSHException, socket.error) as e: if not retry: raise e else: return self.run( cmd, sudo=sudo, ignore_error=ignore_error, success_status=success_status, error_callback=error_callback, custom_log=custom_log, retry=(retry - 1))
Run a command on the remote host. The command is run on the remote host, if there is a redirected host then the command will be run on that redirected host. See __init__. :param cmd: the command to run :type cmd: str :param sudo: True if the command should be run with sudo, this parameter disable the use of environment files. :type sudo: str :param success_status: the list of the possible success status :type success_status: list :param error_callback: if provided, the callback to call in case of a failure. it will be called with two args, the output of the command and the returned error code. :return: the tuple (output of the command, returned code) :rtype: tuple :param custom_log: a optional string to record in the log instead of the command. This is useful for example if you want to hide a password. :type custom_log: str
13,312
def plot(result_pickle_file_path, show, plot_save_file): import pandas as pd from .plot import plot_result result_dict = pd.read_pickle(result_pickle_file_path) plot_result(result_dict, show, plot_save_file)
[sys_analyser] draw result DataFrame
13,313
def generate_config_set(self, config): if isinstance(config, dict): self.config = [(config, 1.0)] elif isinstance(config, list): total_weight = 0. self.config = [] for params in config: weight = params[] total_weight += params[] self.config.append((params, weight)) if fabs(total_weight - 1.0) > 1E-7: raise ValueError( % self.id) else: raise ValueError()
Generates a list of magnitude frequency distributions and renders as a tuple :param dict/list config: Configuration paramters of magnitude frequency distribution
13,314
def parse_on_condition(self, node): try: test = node.lattrib[] except: self.raise_error() event_handler = OnCondition(test) self.current_regime.add_event_handler(event_handler) self.current_event_handler = event_handler self.process_nested_tags(node) self.current_event_handler = None
Parses <OnCondition> @param node: Node containing the <OnCondition> element @type node: xml.etree.Element
13,315
def timebinlc_worker(task): outdirlcformatlcformatdirtimecolsmagcolserrcolsminbinelems lcfile, binsizesec, kwargs = task try: binnedlc = timebinlc(lcfile, binsizesec, **kwargs) LOGINFO( % (lcfile, binsizesec, binnedlc)) return binnedlc except Exception as e: LOGEXCEPTION( % (lcfile, binsizesec)) return None
This is a parallel worker for the function below. Parameters ---------- task : tuple This is of the form:: task[0] = lcfile task[1] = binsizesec task[3] = {'outdir','lcformat','lcformatdir', 'timecols','magcols','errcols','minbinelems'} Returns ------- str The output pickle file with the binned LC if successful. None otherwise.
13,316
def T11(word, rules): WORD = word offset = 0 for vvv in precedence_sequences(WORD): i = vvv.start(1) + (1 if vvv.group(1)[-1] in else 2) + offset WORD = WORD[:i] + + WORD[i:] offset += 1 rules += if word != WORD else return WORD, rules
If a VVV sequence contains a /u,y/-final diphthong, insert a syllable boundary between the diphthong and the third vowel.
13,317
def asterisk_to_min_max(field, time_filter, search_engine_endpoint, actual_params=None): if actual_params: raise NotImplemented("actual_params") start, end = parse_solr_time_range_as_pair(time_filter) if start == or end == : params_stats = { "q": "*:*", "rows": 0, "stats.field": field, "stats": "true", "wt": "json" } res_stats = requests.get(search_engine_endpoint, params=params_stats) if res_stats.ok: stats_date_field = res_stats.json()["stats"]["stats_fields"][field] date_min = stats_date_field["min"] date_max = stats_date_field["max"] if start != : date_min = start if end != : date_max = end time_filter = "[{0} TO {1}]".format(date_min, date_max) return time_filter
traduce [* TO *] to something like [MIN-INDEXED-DATE TO MAX-INDEXED-DATE] :param field: map the stats to this field. :param time_filter: this is the value to be translated. think in "[* TO 2000]" :param search_engine_endpoint: solr core :param actual_params: (not implemented) to merge with other params. :return: translated time filter
13,318
def populate_branch(self, editor, root_item, tree_cache=None): if tree_cache is None: tree_cache = {} for _l in list(tree_cache.keys()): if _l >= editor.get_line_count(): if _l in tree_cache: remove_from_tree_cache(tree_cache, line=_l) ancestors = [(root_item, 0)] cell_ancestors = [(root_item, 0)] previous_item = None previous_level = None prev_cell_level = None prev_cell_item = None oe_data = editor.get_outlineexplorer_data() for block_nb in range(editor.get_line_count()): line_nb = block_nb+1 data = oe_data.get(block_nb) level = None if data is None else data.fold_level citem, clevel, _d = tree_cache.get(line_nb, (None, None, "")) if level is None: if citem is not None: remove_from_tree_cache(tree_cache, line=line_nb) continue not_class_nor_function = data.is_not_class_nor_function() if not not_class_nor_function: class_name = data.get_class_name() if class_name is None: func_name = data.get_function_name() if func_name is None: if citem is not None: remove_from_tree_cache(tree_cache, line=line_nb) continue if not_class_nor_function and not data.is_comment(): if citem is not None: remove_from_tree_cache(tree_cache, line=line_nb) continue if citem is not None: cname = to_text_string(citem.text(0)) cparent = citem.parent if (data is not None and data.def_type == data.CELL and self.group_cells): preceding = (root_item if previous_item is None else previous_item) cell_level = data.cell_level if prev_cell_level is not None: if cell_level == prev_cell_level: pass elif cell_level > prev_cell_level: cell_ancestors.append((prev_cell_item, prev_cell_level)) else: while (len(cell_ancestors) > 1 and cell_level <= prev_cell_level): cell_ancestors.pop(-1) _item, prev_cell_level = cell_ancestors[-1] parent, _level = cell_ancestors[-1] if citem is not None: if data.text == cname and level == clevel: previous_level = clevel previous_item = citem continue else: remove_from_tree_cache(tree_cache, line=line_nb) item = CellItem(data.def_name, line_nb, parent, preceding) item.setup() debug = "%s -- %s/%s" % (str(item.line).rjust(6), to_text_string(item.parent().text(0)), to_text_string(item.text(0))) tree_cache[line_nb] = (item, level, debug) ancestors = [(item, 0)] prev_cell_level = cell_level prev_cell_item = item previous_item = item continue if previous_level is not None: if level == previous_level: pass elif level > previous_level: ancestors.append((previous_item, previous_level)) else: while len(ancestors) > 1 and level <= previous_level: ancestors.pop(-1) _item, previous_level = ancestors[-1] parent, _level = ancestors[-1] preceding = root_item if previous_item is None else previous_item if not_class_nor_function and data.is_comment(): if not self.show_comments: if citem is not None: remove_from_tree_cache(tree_cache, line=line_nb) continue if citem is not None: if data.text == cname and level == clevel: previous_level = clevel previous_item = citem continue else: remove_from_tree_cache(tree_cache, line=line_nb) if data.def_type == data.CELL: item = CellItem(data.def_name, line_nb, parent, preceding) else: item = CommentItem(data.text, line_nb, parent, preceding) elif class_name is not None: if citem is not None: if (class_name == cname and level == clevel and parent is cparent): previous_level = clevel previous_item = citem continue else: remove_from_tree_cache(tree_cache, line=line_nb) item = ClassItem(class_name, line_nb, parent, preceding) else: if citem is not None: if (func_name == cname and level == clevel and parent is cparent): previous_level = clevel previous_item = citem continue else: remove_from_tree_cache(tree_cache, line=line_nb) item = FunctionItem(func_name, line_nb, parent, preceding) item.setup() debug = "%s -- %s/%s" % (str(item.line).rjust(6), to_text_string(item.parent().text(0)), to_text_string(item.text(0))) tree_cache[line_nb] = (item, level, debug) previous_level = level previous_item = item return tree_cache
Generates an outline of the editor's content and stores the result in a cache.
13,319
def from_filename(cls, filename): if not filename: logger.error() return None if not os.path.exists(filename): logger.error("Err: File does not exist", filename) return None if os.path.isdir(filename): logger.error("Err: File is a directory", filename) return None try: audiofile = eyed3.load(filename) except Exception as error: print(type(error), error) return None if audiofile is None: return None tags = audiofile.tag album = tags.album title = tags.title lyrics = .join([l.text for l in tags.lyrics]) artist = tags.album_artist if not artist: artist = tags.artist song = cls(artist, title, album, lyrics) song.filename = filename return song
Class constructor using the path to the corresponding mp3 file. The metadata will be read from this file to create the song object, so it must at least contain valid ID3 tags for artist and title.
13,320
def run(self, N=100): th = self.proposal.rvs(size=N) self.X = ThetaParticles(theta=th, lpost=None) self.X.lpost = self.model.logpost(th) lw = self.X.lpost - self.proposal.logpdf(th) self.wgts = rs.Weights(lw=lw) self.norm_cst = rs.log_mean_exp(lw)
Parameter --------- N: int number of particles Returns ------- wgts: Weights object The importance weights (with attributes lw, W, and ESS) X: ThetaParticles object The N particles (with attributes theta, logpost) norm_cst: float Estimate of the normalising constant of the target
13,321
def do_forceescape(value): if hasattr(value, ): value = value.__html__() return escape(text_type(value))
Enforce HTML escaping. This will probably double escape variables.
13,322
async def rset(self, timeout: DefaultNumType = _default) -> SMTPResponse: await self._ehlo_or_helo_if_needed() async with self._command_lock: response = await self.execute_command(b"RSET", timeout=timeout) if response.code != SMTPStatus.completed: raise SMTPResponseException(response.code, response.message) return response
Send an SMTP RSET command, which resets the server's envelope (the envelope contains the sender, recipient, and mail data). :raises SMTPResponseException: on unexpected server response code
13,323
def format_output(self, rendered_widgets): ret = [u] for i, field in enumerate(self.fields): label = self.format_label(field, i) help_text = self.format_help_text(field, i) ret.append(u % ( label, rendered_widgets[i], field.help_text and help_text)) ret.append(u) return .join(ret)
This output will yeild all widgets grouped in a un-ordered list
13,324
def get_events(self, start_time, end_time, ignore_cancelled = True, get_recurring_events_as_instances = True, restrict_to_calendars = []): es = [] calendar_ids = restrict_to_calendars or self.calendar_ids for calendar_id in calendar_ids: now = datetime.now(tz = self.timezone) events = [] page_token = None while True: events = self.service.events().list(pageToken=page_token, maxResults = 250, calendarId = self.configured_calendar_ids[calendar_id], timeMin = start_time, timeMax = end_time, showDeleted = False).execute() for event in events[]: dt = None nb = DeepNonStrictNestedBunch(event) assert(not(nb._event)) nb._event = event if (not ignore_cancelled) or (nb.status != ): if nb.recurrence: if get_recurring_events_as_instances: es += self.get_recurring_events(calendar_id, nb.id, start_time, end_time) else: es.append(nb) elif nb.start.dateTime: dt = dateutil.parser.parse(nb.start.dateTime) elif nb.start.date: dt = dateutil.parser.parse(nb.start.date) dt = datetime(year = dt.year, month = dt.month, day = dt.day, hour=0, minute=0, second=0, tzinfo=self.timezone) if dt: nb.datetime_o = dt nb.calendar_id = calendar_id es.append(nb) page_token = events.get() if not page_token: break es.sort(key=lambda x: x.datetime_o) return es
A wrapper for events().list. Returns the events from the calendar within the specified times. Some of the interesting fields are: description, end, htmlLink, location, organizer, start, summary Note: "Cancelled instances of recurring events (but not the underlying recurring event) will still be included if showDeleted and singleEvents are both False."
13,325
def _auth_req_callback_func(self, context, internal_request): state = context.state state[STATE_KEY] = {"requester": internal_request.requester} try: state_dict = context.state[consent.STATE_KEY] except KeyError: state_dict = context.state[consent.STATE_KEY] = {} finally: state_dict.update({ "filter": internal_request.attributes or [], "requester_name": internal_request.requester_name, }) satosa_logging(logger, logging.INFO, "Requesting provider: {}".format(internal_request.requester), state) if self.request_micro_services: return self.request_micro_services[0].process(context, internal_request) return self._auth_req_finish(context, internal_request)
This function is called by a frontend module when an authorization request has been processed. :type context: satosa.context.Context :type internal_request: satosa.internal.InternalData :rtype: satosa.response.Response :param context: The request context :param internal_request: request processed by the frontend :return: response
13,326
def rename(self, name, **kwargs): return self._rename(self._dxid, {"project": self._proj, "name": name}, **kwargs)
:param name: New name for the object :type name: string Renames the remote object. The name is changed on the copy of the object in the project associated with the handler.
13,327
def getIncludeAndRuntime(): include_dirs, library_dirs = [], [] py_include = distutils.sysconfig.get_python_inc() plat_py_include = distutils.sysconfig.get_python_inc(plat_specific=1) include_dirs.append(py_include) if plat_py_include != py_include: include_dirs.append(plat_py_include) if os.name == : library_dirs.append(os.path.join(sys.exec_prefix, )) include_dirs.append(os.path.join(sys.exec_prefix, )) MSVC_VERSION = int(distutils.msvccompiler.get_build_version()) if MSVC_VERSION == 14: library_dirs.append(os.path.join(sys.exec_prefix, , , )) elif MSVC_VERSION == 9: suffix = if PLATFORM == else PLATFORM[4:] new_lib = os.path.join(sys.exec_prefix, ) if suffix: new_lib = os.path.join(new_lib, suffix) library_dirs.append(new_lib) elif MSVC_VERSION == 8: library_dirs.append(os.path.join(sys.exec_prefix, , , )) elif MSVC_VERSION == 7: library_dirs.append(os.path.join(sys.exec_prefix, , )) else: library_dirs.append(os.path.join(sys.exec_prefix, , )) if os.name == : library_dirs.append(os.path.join(sys.exec_prefix, )) is_cygwin = sys.platform[:6] == is_atheos = sys.platform[:6] == is_shared = distutils.sysconfig.get_config_var() is_linux = sys.platform.startswith() is_gnu = sys.platform.startswith() is_sunos = sys.platform.startswith() if is_cygwin or is_atheos: if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")): library_dirs.append(os.path.join(sys.prefix, "lib", BASENAME, "config")) else: library_dirs.append(os.getcwd()) if (is_linux or is_gnu or is_sunos) and is_shared: if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")): library_dirs.append(distutils.sysconfig.get_config_var()) else: library_dirs.append(os.getcwd()) user_include = os.path.join(site.USER_BASE, "include") user_lib = os.path.join(site.USER_BASE, "lib") if os.path.isdir(user_include): include_dirs.append(user_include) if os.path.isdir(user_lib): library_dirs.append(user_lib) ret_object = (include_dirs, library_dirs) _filter_non_existing_dirs(ret_object) return ret_object
A function from distutils' build_ext.py that was updated and changed to ACTUALLY WORK
13,328
def versions_request(self): ret = self.handle_api_exceptions(, , api_ver=) return [str_dict(x) for x in ret.json()]
List Available REST API Versions
13,329
def compare_digest(a, b): py_version = sys.version_info[0] if py_version >= 3: return _compare_digest_py3(a, b) return _compare_digest_py2(a, b)
Compare 2 hash digest.
13,330
def extract_docs(): methods = [] def _key(entry): return sorted_entries = sorted(Client.__dict__.items(), key=lambda x: x[0]) tree = {} meta_key = for attr_name, attr_value in sorted_entries: if not hasattr(attr_value, meta_key): continue func = attr_value meta = getattr(func, meta_key) arg_spec = inspect.getargspec(func) if arg_spec[0] and arg_spec[0][0] in (, ): del arg_spec[0][0] func_name = func.__name__ func_spec = func_name + inspect.formatargspec(*arg_spec) doc = textwrap.dedent((func.__doc__ or )) doc = indent(doc, n=3) func_desc_lines = [] for i, line in enumerate(func.__doc__.splitlines()): if i == 0: continue func_desc_lines.append(line.strip()) if not line: break func_desc = .join(func_desc_lines) doc = textwrap.dedent( ).format( func_spec=func_spec, func_name_line= * len(func_name), func_name=func_name, func_desc=func_desc, doc=doc, ) categories = meta[] for category in categories: tree.setdefault(category, []).append(doc) directory = for file in os.listdir(directory): if file.endswith(): os.unlink(os.path.join(directory, file)) for category, func_docs in sorted(tree.items(), key=lambda x: x[0]): category = category or file_path = os.path.join(directory, category) + with open(file_path, ) as docfile: docfile.write( % category + ) title = .format(category=category.capitalize()) docfile.write( % title) docfile.write( % (len(title) * )) docfile.write() for func_doc in func_docs: docfile.write(func_doc + )
Parses the nano.rpc.Client for methods that have a __doc_meta__ attribute and saves generated docs
13,331
def parse_eggs_list(path): with open(path, ) as script: data = script.readlines() start = 0 end = 0 for counter, line in enumerate(data): if not start: if in line: start = counter + 1 if counter >= start and not end: if in line: end = counter script_eggs = tidy_eggs_list(data[start:end]) return script_eggs
Parse eggs list from the script at the given path
13,332
def run_powerflow_onthefly(components, components_data, grid, export_pypsa_dir=None, debug=False): scenario = cfg_ding0.get("powerflow", "test_grid_stability_scenario") start_hour = cfg_ding0.get("powerflow", "start_hour") end_hour = cfg_ding0.get("powerflow", "end_hour") temp_id_set = 1 timesteps = 2 start_time = datetime(1970, 1, 1, 00, 00, 0) resolution = if debug: data_integrity(components, components_data) timerange = DatetimeIndex(freq=resolution, periods=timesteps, start=start_time) network, snapshots = create_powerflow_problem(timerange, components) for key in [, ]: for attr in [, ]: if not components_data[key].empty: series = transform_timeseries4pypsa(components_data[key][ attr].to_frame(), timerange, column=attr) import_series_from_dataframe(network, series, key, attr) series = transform_timeseries4pypsa(components_data[] [].to_frame(), timerange, column=) import_series_from_dataframe(network, series, , ) network.pf(snapshots) bus_data, line_data = process_pf_results(network) assign_bus_results(grid, bus_data) assign_line_results(grid, line_data) if export_pypsa_dir: export_to_dir(network, export_dir=export_pypsa_dir)
Run powerflow to test grid stability Two cases are defined to be tested here: i) load case ii) feed-in case Parameters ---------- components: dict of pandas.DataFrame components_data: dict of pandas.DataFrame export_pypsa_dir: str Sub-directory in output/debug/grid/ where csv Files of PyPSA network are exported to. Export is omitted if argument is empty.
13,333
def is_spontaneous(gene, custom_id=None): spont = re.compile("[Ss](_|)0001") if spont.match(gene.id): return True elif gene.id == custom_id: return True else: return False
Input a COBRApy Gene object and check if the ID matches a spontaneous ID regex. Args: gene (Gene): COBRApy Gene custom_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001`` Returns: bool: If gene ID matches spontaneous ID
13,334
def run(self): try: self.owner.start_event() while True: while not self.incoming: time.sleep(0.01) while self.incoming: command = self.incoming.popleft() if command is None: raise StopIteration() command() except StopIteration: pass self.owner.stop_event()
The actual event loop. Calls the ``owner``'s :py:meth:`~Component.start_event` method, then calls its :py:meth:`~Component.new_frame_event` and :py:meth:`~Component.new_config_event` methods as required until :py:meth:`~Component.stop` is called. Finally the ``owner``'s :py:meth:`~Component.stop_event` method is called before the thread terminates.
13,335
def home_mode_set_state(self, state, **kwargs): if state not in (HOME_MODE_ON, HOME_MODE_OFF): raise ValueError() api = self._api_info[] payload = dict({ : api[], : , : api[], : state, : self._sid, }, **kwargs) response = self._get_json_with_retry(api[], payload) if response[]: return True return False
Set the state of Home Mode
13,336
def keys(self): result = [] if self.fresh_index is not None: result += self.fresh_index.keys() if self.opt_index is not None: result += self.opt_index.keys() return result
Return ids of all indexed documents.
13,337
def mean_return_by_quantile(factor_data, by_date=False, by_group=False, demeaned=True, group_adjust=False): if group_adjust: grouper = [factor_data.index.get_level_values()] + [] factor_data = utils.demean_forward_returns(factor_data, grouper) elif demeaned: factor_data = utils.demean_forward_returns(factor_data) else: factor_data = factor_data.copy() grouper = [, factor_data.index.get_level_values()] if by_group: grouper.append() group_stats = factor_data.groupby(grouper)[ utils.get_forward_returns_columns(factor_data.columns)] \ .agg([, , ]) mean_ret = group_stats.T.xs(, level=1).T if not by_date: grouper = [mean_ret.index.get_level_values()] if by_group: grouper.append(mean_ret.index.get_level_values()) group_stats = mean_ret.groupby(grouper)\ .agg([, , ]) mean_ret = group_stats.T.xs(, level=1).T std_error_ret = group_stats.T.xs(, level=1).T \ / np.sqrt(group_stats.T.xs(, level=1).T) return mean_ret, std_error_ret
Computes mean returns for factor quantiles across provided forward returns columns. Parameters ---------- factor_data : pd.DataFrame - MultiIndex A MultiIndex DataFrame indexed by date (level 0) and asset (level 1), containing the values for a single alpha factor, forward returns for each period, the factor quantile/bin that factor value belongs to, and (optionally) the group the asset belongs to. - See full explanation in utils.get_clean_factor_and_forward_returns by_date : bool If True, compute quantile bucket returns separately for each date. by_group : bool If True, compute quantile bucket returns separately for each group. demeaned : bool Compute demeaned mean returns (long short portfolio) group_adjust : bool Returns demeaning will occur on the group level. Returns ------- mean_ret : pd.DataFrame Mean period wise returns by specified factor quantile. std_error_ret : pd.DataFrame Standard error of returns by specified quantile.
13,338
async def unset_lock(self, resource, lock_identifier): try: with await self.connect() as redis: await redis.eval( self.unset_lock_script, keys=[resource], args=[lock_identifier] ) except aioredis.errors.ReplyError as exc: self.log.debug(, resource, repr(self)) raise LockError() from exc except (aioredis.errors.RedisError, OSError) as exc: self.log.error(, resource, repr(self), repr(exc)) raise LockError() from exc except asyncio.CancelledError: self.log.debug(, resource, repr(self)) raise except Exception as exc: self.log.exception(, resource, repr(self)) raise else: self.log.debug(, resource, repr(self))
Unlock this instance :param resource: redis key to set :param lock_identifier: uniquie id of lock :raises: LockError if the lock resource acquired with different lock_identifier
13,339
def registIssue(self, CorpNum, taxinvoice, writeSpecification=False, forceIssue=False, dealInvoiceMgtKey=None, memo=None, emailSubject=None, UserID=None): if writeSpecification: taxinvoice.writeSpecification = True if forceIssue: taxinvoice.forceIssue = True if dealInvoiceMgtKey != None and dealInvoiceMgtKey != : taxinvoice.dealInvoiceMgtKey = dealInvoiceMgtKey if memo != None and memo != : taxinvoice.memo = memo if emailSubject != None and emailSubject != : taxinvoice.emailSubject = emailSubject postData = self._stringtify(taxinvoice) return self._httppost(, postData, CorpNum, UserID, "ISSUE")
즉시 발행 args CorpNum : 팝빌회원 사업자번호 taxinvoice : 세금계산서 객체 writeSpecification : 거래명세서 동시작성 여부 forceIssue : 지연발행 강제여부 dealInvoiceMgtKey : 거래명세서 문서관리번호 memo : 메모 emailSubject : 메일제목, 미기재시 기본제목으로 전송 UsreID : 팝빌회원 아이디 return 검색결과 정보 raise PopbillException
13,340
def get_border_phase(self, idn=0, idr=0): assert idn in [-1, 0, 1] assert idr in [-1, 0, 1] n = self.sphere_index + self.dn * idn r = self.radius + self.dr * idr idn += 1 idr += 1 if self._n_border[idn, idr] == n and self._r_border[idn, idr] == r: if self.verbose > 3: print("Using cached border phase (n{}, r{})".format(idn, idr)) pha = self._border_pha[(idn, idr)] else: if self.verbose > 3: print("Computing border phase (n{}, r{})".format(idn, idr)) kwargs = self.model_kwargs.copy() kwargs["radius"] = r kwargs["sphere_index"] = n kwargs["center"] = [self.posx_offset, self.posy_offset] tb = time.time() pha = self.sphere_method(**kwargs).pha if self.verbose > 2: print("Border phase computation time:", self.sphere_method.__module__, time.time() - tb) self._border_pha[(idn, idr)] = pha self._n_border[idn, idr] = n self._r_border[idn, idr] = r return pha
Return one of nine border fields Parameters ---------- idn: int Index for refractive index. One of -1 (left), 0 (center), 1 (right) idr: int Index for radius. One of -1 (left), 0 (center), 1 (right)
13,341
def helical_turbulent_fd_Mori_Nakayama(Re, Di, Dc): r term = (Re*(Di/Dc)**2)**-0.2 return 0.3*(Dc/Di)**-0.5*term*(1. + 0.112*term)
r'''Calculates Darcy friction factor for a fluid flowing inside a curved pipe such as a helical coil under turbulent conditions, using the method of Mori and Nakayama [1]_, also shown in [2]_ and [3]_. .. math:: f_{curv} = 0.3\left(\frac{D_i}{D_c}\right)^{0.5} \left[Re\left(\frac{D_i}{D_c}\right)^2\right]^{-0.2}\left[1 + 0.112\left[Re\left(\frac{D_i}{D_c}\right)^2\right]^{-0.2}\right] Parameters ---------- Re : float Reynolds number with `D=Di`, [-] Di : float Inner diameter of the coil, [m] Dc : float Diameter of the helix/coil measured from the center of the tube on one side to the center of the tube on the other side, [m] Returns ------- fd : float Darcy friction factor for a curved pipe [-] Notes ----- Valid from the transition to turbulent flow up to :math:`Re=6.5\times 10^{5}\sqrt{D_i/D_c}`. Does not use a straight pipe correlation, and so will not converge on the straight pipe result at very low curvature. Examples -------- >>> helical_turbulent_fd_Mori_Nakayama(1E4, 0.01, .2) 0.037311802071379796 References ---------- .. [1] Mori, Yasuo, and Wataru Nakayama. "Study of Forced Convective Heat Transfer in Curved Pipes (2nd Report, Turbulent Region)." International Journal of Heat and Mass Transfer 10, no. 1 (January 1, 1967): 37-59. doi:10.1016/0017-9310(67)90182-2. .. [2] El-Genk, Mohamed S., and Timothy M. Schriener. "A Review and Correlations for Convection Heat Transfer and Pressure Losses in Toroidal and Helically Coiled Tubes." Heat Transfer Engineering 0, no. 0 (June 7, 2016): 1-28. doi:10.1080/01457632.2016.1194693. .. [3] Ali, Shaukat. "Pressure Drop Correlations for Flow through Regular Helical Coil Tubes." Fluid Dynamics Research 28, no. 4 (April 2001): 295-310. doi:10.1016/S0169-5983(00)00034-4.
13,342
def verbose_message(self): if self.threshold is None: return return % (self.value, self.adjective, self.threshold)
return more complete message
13,343
def validate_path(ctx, param, value): client = ctx.obj if value is None: from renku.models.provenance import ProcessRun activity = client.process_commit() if not isinstance(activity, ProcessRun): raise click.BadParameter() return activity.path return value
Detect a workflow path if it is not passed.
13,344
def macro_body(self, node, frame, children=None): frame = self.function_scoping(node, frame, children) frame.require_output_check = False args = frame.arguments self.indent() self.buffer(frame) self.pull_locals(frame) self.blockvisit(node.body, frame) self.return_buffer_contents(frame) self.outdent() return frame
Dump the function def of a macro or call block.
13,345
def srcnode(self): srcdir_list = self.dir.srcdir_list() if srcdir_list: srcnode = srcdir_list[0].Entry(self.name) srcnode.must_be_same(self.__class__) return srcnode return self
If this node is in a build path, return the node corresponding to its source file. Otherwise, return ourself.
13,346
def get(self, metric_id=None, **kwargs): path = if metric_id is not None: path += % metric_id return self.paginate_get(path, data=kwargs)
Get metrics :param int metric_id: Metric ID :return: Metrics data (:class:`dict`) Additional named arguments may be passed and are directly transmitted to API. It is useful to use the API search features. .. seealso:: https://docs.cachethq.io/reference#get-metrics .. seealso:: https://docs.cachethq.io/docs/advanced-api-usage
13,347
async def _deploy(self, charm_url, application, series, config, constraints, endpoint_bindings, resources, storage, channel=None, num_units=None, placement=None, devices=None): log.info(, charm_url) config = {k: str(v) for k, v in config.items()} config = yaml.dump({application: config}, default_flow_style=False) app_facade = client.ApplicationFacade.from_connection( self.connection()) app = client.ApplicationDeploy( charm_url=charm_url, application=application, series=series, channel=channel, config_yaml=config, constraints=parse_constraints(constraints), endpoint_bindings=endpoint_bindings, num_units=num_units, resources=resources, storage=storage, placement=placement, devices=devices, ) result = await app_facade.Deploy([app]) errors = [r.error.message for r in result.results if r.error] if errors: raise JujuError(.join(errors)) return await self._wait_for_new(, application)
Logic shared between `Model.deploy` and `BundleHandler.deploy`.
13,348
def get_code(self, timestamp=None): return generate_twofactor_code_for_time(b64decode(self.shared_secret), self.get_time() if timestamp is None else timestamp)
:param timestamp: time to use for code generation :type timestamp: int :return: two factor code :rtype: str
13,349
def retrieve(self, session, lookup_keys, *args, **kwargs): model = self._get_model(lookup_keys, session) return self.serialize_model(model)
Retrieves a model using the lookup keys provided. Only one model should be returned by the lookup_keys or else the manager will fail. :param Session session: The SQLAlchemy session to use :param dict lookup_keys: A dictionary mapping the fields and their expected values :return: The dictionary of keys and values for the retrieved model. The only values returned will be those specified by fields attrbute on the class :rtype: dict :raises: NotFoundException
13,350
def share_file(comm, path): localrank, _ = get_local_rank_size(comm) if comm.Get_rank() == 0: with open(path, ) as fh: data = fh.read() comm.bcast(data) else: data = comm.bcast(None) if localrank == 0: os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, ) as fh: fh.write(data) comm.Barrier()
Copies the file from rank 0 to all other ranks Puts it in the same place on all machines
13,351
def iter_islast(iterable): it = iter(iterable) prev = next(it) for item in it: yield prev, False prev = item yield prev, True
Generate (item, islast) pairs for an iterable. Generates pairs where the first element is an item from the iterable source and the second element is a boolean flag indicating if it is the last item in the sequence.
13,352
def path_size(path, total=False, ext=, level=None, verbosity=0): dict_of_path_sizes = dict((d[], d[]) for d in util.find_files(path, ext=ext, level=level, verbosity=0)) if total: return reduce(lambda tot, size: tot + size, dict_of_path_sizes.values(), 0) return dict_of_path_sizes
Walk the file tree and query the file.stat object(s) to compute their total (or individual) size in bytes Returns: dict: {relative_path: file_size_in_bytes, ...} Examples: >>> all(d >= 0 for d in path_size(__file__).values()) True >>> sum(path_size(os.path.dirname(__file__)).values()) == path_size(os.path.dirname(__file__), total=True) True >>> path_size(__file__, total=True) > 10000 True >>> len(path_size('.')) >= 2 True
13,353
def _writeXputMaps(self, session, directory, mapCards, name=None, replaceParamFile=None): if self.mapType in self.MAP_TYPES_SUPPORTED: for card in self.projectCards: if (card.name in mapCards) and self._noneOrNumValue(card.value): filename = card.value.strip() filename = self._replaceNewFilename(filename, name) self._invokeWrite(fileIO=RasterMapFile, session=session, directory=directory, filename=filename, replaceParamFile=replaceParamFile) else: for card in self.projectCards: if (card.name in mapCards) and self._noneOrNumValue(card.value): filename = card.value.strip() fileExtension = filename.split()[1] if fileExtension in self.ALWAYS_READ_AND_WRITE_MAPS: filename = self._replaceNewFilename(filename, name) self._invokeWrite(fileIO=RasterMapFile, session=session, directory=directory, filename=filename, replaceParamFile=replaceParamFile) log.error( .format(self.mapType))
GSSHAPY Project Write Map Files to File Method
13,354
def header(self, sheet, name): header = sheet.row(0) for i, column in enumerate(self.headers[name]): header.write(i, self.headers[name][i])
Write sheet header. Args: sheet: (xlwt.Worksheet.Worksheet) instance of xlwt sheet. name: (unicode) name of sheet.
13,355
def select(self, *features): for feature_name in features: feature_module = importlib.import_module(feature_name) try: feature_spec_module = importlib.import_module( feature_name + ) if not hasattr(feature_spec_module, ): raise CompositionError( % ( feature_name ) ) args, varargs, keywords, defaults = inspect.getargspec( feature_spec_module.select ) if varargs or keywords or defaults or len(args) != 1: raise CompositionError( % ( feature_name ) ) feature_spec_module.select(self) except ImportError: raise
selects the features given as string e.g passing 'hello' and 'world' will result in imports of 'hello' and 'world'. Then, if possible 'hello.feature' and 'world.feature' are imported and select is called in each feature module.
13,356
def commit(name, repository, tag=, message=None, author=None): if not isinstance(repository, six.string_types): repository = six.text_type(repository) if not isinstance(tag, six.string_types): tag = six.text_type(tag) time_started = time.time() response = _client_wrapper( , name, repository=repository, tag=tag, message=message, author=author) ret = {: time.time() - time_started} _clear_context() image_id = None for id_ in (, , ): if id_ in response: image_id = response[id_] break if image_id is None: raise CommandExecutionError() ret[] = image_id return ret
.. versionchanged:: 2018.3.0 The repository and tag must now be passed separately using the ``repository`` and ``tag`` arguments, rather than together in the (now deprecated) ``image`` argument. Commits a container, thereby promoting it to an image. Equivalent to running the ``docker commit`` Docker CLI command. name Container name or ID to commit repository Repository name for the image being committed .. versionadded:: 2018.3.0 tag : latest Tag name for the image .. versionadded:: 2018.3.0 image .. deprecated:: 2018.3.0 Use both ``repository`` and ``tag`` instead message Commit message (Optional) author Author name (Optional) **RETURN DATA** A dictionary containing the following keys: - ``Id`` - ID of the newly-created image - ``Image`` - Name of the newly-created image - ``Time_Elapsed`` - Time in seconds taken to perform the commit CLI Example: .. code-block:: bash salt myminion docker.commit mycontainer myuser/myimage mytag
13,357
def unindent_selection(self, cursor): doc = self.editor.document() tab_len = self.editor.tab_length nb_lines = len(cursor.selection().toPlainText().splitlines()) if nb_lines == 0: nb_lines = 1 block = doc.findBlock(cursor.selectionStart()) assert isinstance(block, QtGui.QTextBlock) i = 0 _logger().debug(, nb_lines) while i < nb_lines: txt = block.text()[self.min_column:] _logger().debug(, txt) _logger().debug(, self.editor.use_spaces_instead_of_tabs) if self.editor.use_spaces_instead_of_tabs: indentation = len(txt) - len(txt.lstrip()) else: indentation = len(txt) - len(txt.replace(, )) _logger().debug(, i, indentation, self.min_column) if indentation > 0: c = QtGui.QTextCursor(block) c.movePosition(c.StartOfLine, cursor.MoveAnchor) c.movePosition(c.Right, cursor.MoveAnchor, indentation + self.min_column) max_spaces = indentation % tab_len if max_spaces == 0: max_spaces = tab_len spaces = self.count_deletable_spaces(c, max_spaces) for _ in range(spaces): c.deletePreviousChar() block = block.next() i += 1 return cursor
Un-indents selected text :param cursor: QTextCursor
13,358
def get_table_list(self, cursor): "Returns a list of table names in the current database." result = [TableInfo(SfProtectName(x[]), ) for x in self.table_list_cache[]] return result
Returns a list of table names in the current database.
13,359
def object_to_json(obj, indent=2): instance_json = json.dumps(obj, indent=indent, ensure_ascii=False, cls=DjangoJSONEncoder) return instance_json
transform object to json
13,360
def _DecodeUnrecognizedFields(message, pair_type): new_values = [] codec = _ProtoJsonApiTools.Get() for unknown_field in message.all_unrecognized_fields(): if isinstance(value_type, messages.MessageField): decoded_value = DictToMessage(value, pair_type.value.message_type) else: decoded_value = codec.decode_field( pair_type.value, value) try: new_pair_key = str(unknown_field) except UnicodeEncodeError: new_pair_key = protojson.ProtoJson().decode_field( pair_type.key, unknown_field) new_pair = pair_type(key=new_pair_key, value=decoded_value) new_values.append(new_pair) return new_values
Process unrecognized fields in message.
13,361
def get_salic_url(item, prefix, df_values=None): url_keys = { : , : , : , : , : , : , } if df_values: values = [item[v] for v in df_values] url_values = dict( zip(url_keys.keys(), values) ) else: url_values = { "pronac": item["idPronac"], "uf": item["UfItem"], "product": item["idProduto"], "county": item["cdCidade"], "item_id": item["idPlanilhaItens"], "stage": item["cdEtapa"], } item_data = [(value, url_values[key]) for key, value in url_keys.items()] url = prefix for k, v in item_data: url += f return url
Mount a salic url for the given item.
13,362
def diff(self, container): return self._result( self._get(self._url("/containers/{0}/changes", container)), True )
Inspect changes on a container's filesystem. Args: container (str): The container to diff Returns: (str) Raises: :py:class:`docker.errors.APIError` If the server returns an error.
13,363
def RemoveMethod(self, function): self.added_methods = [dm for dm in self.added_methods if not dm.method is function]
Removes the specified function's MethodWrapper from the added_methods list, so we don't re-bind it when making a clone.
13,364
def capture_heroku_database(self): self.print_message("Capturing database backup for app " % self.args.source_app) args = [ "heroku", "pg:backups:capture", "--app=%s" % self.args.source_app, ] if self.args.use_pgbackups: args = [ "heroku", "pgbackups:capture", "--app=%s" % self.args.source_app, "--expire", ] subprocess.check_call(args)
Capture Heroku database backup.
13,365
def trim_decimals(s, precision=-3): encoded = s.encode(, ) str_val = "" if six.PY3: str_val = str(encoded, encoding=, errors=)[:precision] else: if precision == 0: str_val = str(encoded) else: str_val = str(encoded)[:precision] if len(str_val) > 0: return float(str_val) else: return 0
Convert from scientific notation using precision
13,366
def _compile_mapping(self, schema, invalid_msg=None): invalid_msg = invalid_msg or all_required_keys = set(key for key in schema if key is not Extra and ((self.required and not isinstance(key, (Optional, Remove))) or isinstance(key, Required))) all_default_keys = set(key for key in schema if isinstance(key, Required) or isinstance(key, Optional)) _compiled_schema = {} for skey, svalue in iteritems(schema): new_key = self._compile(skey) new_value = self._compile(svalue) _compiled_schema[skey] = (new_key, new_value) candidates = list(_iterate_mapping_candidates(_compiled_schema)) additional_candidates = [] candidates_by_key = {} for skey, (ckey, cvalue) in candidates: if type(skey) in primitive_types: candidates_by_key.setdefault(skey, []).append((skey, (ckey, cvalue))) elif isinstance(skey, Marker) and type(skey.schema) in primitive_types: candidates_by_key.setdefault(skey.schema, []).append((skey, (ckey, cvalue))) else: additional_candidates.append((skey, (ckey, cvalue))) def validate_mapping(path, iterable, out): required_keys = all_required_keys.copy() key_value_map = type(out)() for key, value in iterable: key_value_map[key] = value for key in all_default_keys: if not isinstance(key.default, Undefined) and \ key.schema not in key_value_map: key_value_map[key.schema] = key.default() error = None errors = [] for key, value in key_value_map.items(): key_path = path + [key] remove_key = False relevant_candidates = itertools.chain(candidates_by_key.get(key, []), additional_candidates) for skey, (ckey, cvalue) in relevant_candidates: try: new_key = ckey(key_path, key) except er.Invalid as e: if len(e.path) > len(key_path): raise if not error or len(e.path) > len(error.path): error = e continue exception_errors = [] is_remove = new_key is Remove try: cval = cvalue(key_path, value) for key in required_keys: msg = key.msg if hasattr(key, ) and key.msg else errors.append(er.RequiredFieldInvalid(msg, path + [key])) if errors: raise er.MultipleInvalid(errors) return out return validate_mapping
Create validator for given mapping.
13,367
def _parse_hextet(self, hextet_str): if not self._HEX_DIGITS.issuperset(hextet_str): raise ValueError if len(hextet_str) > 4: raise ValueError hextet_int = int(hextet_str, 16) if hextet_int > 0xFFFF: raise ValueError return hextet_int
Convert an IPv6 hextet string into an integer. Args: hextet_str: A string, the number to parse. Returns: The hextet as an integer. Raises: ValueError: if the input isn't strictly a hex number from [0..FFFF].
13,368
def code_almost_equal(a, b): split_a = split_and_strip_non_empty_lines(a) split_b = split_and_strip_non_empty_lines(b) if len(split_a) != len(split_b): return False for (index, _) in enumerate(split_a): if .join(split_a[index].split()) != .join(split_b[index].split()): return False return True
Return True if code is similar. Ignore whitespace when comparing specific line.
13,369
def add_fields(self, fields = None, **kwargs): if fields != None: for field in fields: self.fields.append(field)
Add the fields into the list of fields.
13,370
def get_keypair_name(): username = get_username() assert not in username, "username must not contain -, change $USER" validate_aws_name(username) assert len(username) < 30 return get_prefix() + + username
Returns current keypair name.
13,371
def encode_network(root): def fix_values(obj): if isinstance(obj, Container): obj.update((k, get_ref(v)) for (k, v) in obj.items() if k != ) fixed_obj = obj elif isinstance(obj, Dictionary): fixed_obj = obj.__class__(dict( (get_ref(field), get_ref(value)) for (field, value) in obj.value.items() )) elif isinstance(obj, dict): fixed_obj = dict( (get_ref(field), get_ref(value)) for (field, value) in obj.items() ) elif isinstance(obj, list): fixed_obj = [get_ref(field) for field in obj] elif isinstance(obj, Form): fixed_obj = obj.__class__(**dict( (field, get_ref(value)) for (field, value) in obj.value.items() )) elif isinstance(obj, ContainsRefs): fixed_obj = obj.__class__([get_ref(field) for field in obj.value]) else: return obj fixed_obj._made_from = obj return fixed_obj objects = [] def get_ref(obj, objects=objects): obj = PythonicAdapter(Pass)._encode(obj, None) if isinstance(obj, (FixedObject, Container)): if getattr(obj, , None): index = obj._index else: objects.append(None) obj._index = index = len(objects) objects[index - 1] = fix_values(obj) return Ref(index) else: return obj get_ref(root) for obj in objects: if getattr(obj, , None): del obj._index return objects
Yield ref-containing obj table entries from object network
13,372
def Append(self, **kw): kw = copy_non_reserved_keywords(kw) for key, val in kw.items(): update_dict = orig.update except AttributeError: try: self._dict[key] = orig + val except (KeyError, TypeError): try: add_to_orig = orig.append except AttributeError: if orig: val.insert(0, orig) self._dict[key] = val else: tmp = [] for (k, v) in orig.items(): if v is not None: tmp.append((k, v)) else: tmp.append((k,)) orig = tmp orig += val self._dict[key] = orig else: for v in val: orig[v] = None else: try: update_dict(val) except (AttributeError, TypeError, ValueError): if SCons.Util.is_Dict(val): for k, v in val.items(): orig[k] = v else: orig[val] = None self.scanner_map_delete(kw)
Append values to existing construction variables in an Environment.
13,373
def patch(self, patched_value): try: if self.getter: setattr(self.getter_class, self.attr_name, patched_value) else: setattr(self.orig_object, self.attr_name, patched_value) except TypeError: proxy_name = % ( self.orig_object.__module__, self.orig_object.__name__, patched_value.__class__.__name__ ) self.proxy_object = type(proxy_name, (self.orig_object,), {self.attr_name: patched_value}) mod = sys.modules[self.orig_object.__module__] setattr(mod, self.orig_object.__name__, self.proxy_object)
Set a new value for the attribute of the object.
13,374
def validation_scatter(self, log_lam, b, masks, pre_v, gp, flux, time, med): self.lam[b] = 10 ** log_lam scatter = [None for i in range(len(masks))] for i in range(len(masks)): model = self.cv_compute(b, *pre_v[i]) try: gpm, _ = gp.predict(flux - model - med, time[masks[i]]) except ValueError: return 1.e30 fdet = (flux - model)[masks[i]] - gpm scatter[i] = 1.e6 * (1.4826 * np.nanmedian(np.abs(fdet / med - np.nanmedian(fdet / med))) / np.sqrt(len(masks[i]))) return np.max(scatter)
Computes the scatter in the validation set.
13,375
def remove_connection(self, id_interface, back_or_front): msg_err = u if not is_valid_0_1(back_or_front): raise InvalidParameterError( msg_err % (, back_or_front)) if not is_valid_int_param(id_interface): raise InvalidParameterError( msg_err % (, id_interface)) url = % (str(id_interface), str(back_or_front)) code, xml = self.submit(None, , url) return self.response(code, xml)
Remove a connection between two interfaces :param id_interface: One side of relation :param back_or_front: This side of relation is back(0) or front(1) :return: None :raise InterfaceInvalidBackFrontError: Front or Back of interfaces not match to remove connection :raise InvalidParameterError: Interface id or back or front indicator is none or invalid. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
13,376
def txn_data2schema_key(self, txn: dict) -> SchemaKey: rv = None if self == Protocol.V_13: rv = SchemaKey(txn[], txn[][], txn[][]) else: txn_txn = txn.get(, None) or txn rv = SchemaKey( txn_txn[][], txn_txn[][][], txn_txn[][][]) return rv
Return schema key from ledger transaction data. :param txn: get-schema transaction (by sequence number) :return: schema key identified
13,377
def _assign_name(self, obj, name, shaders): if self._is_global(obj): assert name not in self._global_ns self._global_ns[name] = obj else: for shader in shaders: ns = self._shader_ns[shader] assert name not in ns ns[name] = obj self._object_names[obj] = name
Assign *name* to *obj* in *shaders*.
13,378
def authorize(self, ip_protocol=None, from_port=None, to_port=None, cidr_ip=None, src_group=None): if src_group: cidr_ip = None src_group_name = src_group.name src_group_owner_id = src_group.owner_id else: src_group_name = None src_group_owner_id = None status = self.connection.authorize_security_group(self.name, src_group_name, src_group_owner_id, ip_protocol, from_port, to_port, cidr_ip) if status: self.add_rule(ip_protocol, from_port, to_port, src_group_name, src_group_owner_id, cidr_ip) return status
Add a new rule to this security group. You need to pass in either src_group_name OR ip_protocol, from_port, to_port, and cidr_ip. In other words, either you are authorizing another group or you are authorizing some ip-based rule. :type ip_protocol: string :param ip_protocol: Either tcp | udp | icmp :type from_port: int :param from_port: The beginning port number you are enabling :type to_port: int :param to_port: The ending port number you are enabling :type cidr_ip: string :param cidr_ip: The CIDR block you are providing access to. See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing :type src_group: :class:`boto.ec2.securitygroup.SecurityGroup` or :class:`boto.ec2.securitygroup.GroupOrCIDR` :param src_group: The Security Group you are granting access to. :rtype: bool :return: True if successful.
13,379
def push(self, *args, **kwargs): ref = { : , : , : [ { : , : False, : , }, { : False, : , }, { : False, : , }, ], : , } return self._makeTopicExchange(ref, *args, **kwargs)
GitHub push Event When a GitHub push event is posted it will be broadcast on this exchange with the designated `organization` and `repository` in the routing-key along with event specific metadata in the payload. This exchange outputs: ``v1/github-push-message.json#``This exchange takes the following keys: * routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required) * organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required) * repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
13,380
def textContent(self, text: str) -> None: self._set_text_content(text) if self.connected: self._set_text_content_web(text)
Set textContent both on this node and related browser node.
13,381
def get_instances(feature_name): feats = [] for ft in AncillaryFeature.features: if ft.feature_name == feature_name: feats.append(ft) return feats
Return all all instances that compute `feature_name`
13,382
def call_fset(self, obj, value) -> None: vars(obj)[self.name] = self.fset(obj, value)
Store the given custom value and call the setter function.
13,383
def handle_stream(self, stream, address): log.trace(, address) self.clients.append((stream, address)) unpacker = msgpack.Unpacker() try: while True: wire_bytes = yield stream.read_bytes(4096, partial=True) unpacker.feed(wire_bytes) for framed_msg in unpacker: if six.PY3: framed_msg = salt.transport.frame.decode_embedded_strs( framed_msg ) header = framed_msg[] self.io_loop.spawn_callback(self.message_handler, stream, header, framed_msg[]) except StreamClosedError: log.trace(, address) self.clients.remove((stream, address)) except Exception as e: log.trace(, e) self.clients.remove((stream, address)) stream.close()
Handle incoming streams and add messages to the incoming queue
13,384
def open(self): if self._status == "opened": return self.reset() self._loading = True self._status = "opened" path = self._topology_file() if not os.path.exists(path): self._loading = False return try: shutil.copy(path, path + ".backup") except OSError: pass try: project_data = load_topology(path) keys_to_load = [ "auto_start", "auto_close", "auto_open", "scene_height", "scene_width", "zoom", "show_layers", "snap_to_grid", "show_grid", "show_interface_labels" ] for key in keys_to_load: val = project_data.get(key, None) if val is not None: setattr(self, key, val) topology = project_data["topology"] for compute in topology.get("computes", []): yield from self.controller.add_compute(**compute) for node in topology.get("nodes", []): compute = self.controller.get_compute(node.pop("compute_id")) name = node.pop("name") node_id = node.pop("node_id", str(uuid.uuid4())) yield from self.add_node(compute, name, node_id, dump=False, **node) for link_data in topology.get("links", []): if not in link_data.keys(): continue link = yield from self.add_link(link_id=link_data["link_id"]) if "filters" in link_data: yield from link.update_filters(link_data["filters"]) for node_link in link_data["nodes"]: node = self.get_node(node_link["node_id"]) port = node.get_port(node_link["adapter_number"], node_link["port_number"]) if port is None: log.warning("Port {}/{} for {} not found".format(node_link["adapter_number"], node_link["port_number"], node.name)) continue if port.link is not None: log.warning("Port {}/{} is already connected to link ID {}".format(node_link["adapter_number"], node_link["port_number"], port.link.id)) continue yield from link.add_node(node, node_link["adapter_number"], node_link["port_number"], label=node_link.get("label"), dump=False) if len(link.nodes) != 2: yield from self.delete_link(link.id, force_delete=True) for drawing_data in topology.get("drawings", []): yield from self.add_drawing(dump=False, **drawing_data) self.dump() except Exception as e: for compute in list(self._project_created_on_compute): try: yield from compute.post("/projects/{}/close".format(self._id)) except (ComputeError, aiohttp.web.HTTPNotFound, aiohttp.web.HTTPConflict, aiohttp.ServerDisconnectedError): pass try: if os.path.exists(path + ".backup"): shutil.copy(path + ".backup", path) except (PermissionError, OSError): pass self._status = "closed" self._loading = False if isinstance(e, ComputeError): raise aiohttp.web.HTTPConflict(text=str(e)) else: raise e try: os.remove(path + ".backup") except OSError: pass self._loading = False if self._auto_start: asyncio.async(self.start_all())
Load topology elements
13,385
def get_previous_character(self): cursor = self.textCursor() cursor.movePosition(QTextCursor.PreviousCharacter, QTextCursor.KeepAnchor) return cursor.selectedText()
Returns the character before the cursor. :return: Previous cursor character. :rtype: QString
13,386
def build(self): for detail_view in self.detail_views: view = self._get_view(detail_view) view().build_object(self) self._build_extra() self._build_related()
Iterates through the views pointed to by self.detail_views, runs build_object with `self`, and calls _build_extra() and _build_related().
13,387
def _nonzero_counter_hook(module, inputs, output): if not hasattr(module, "__counter_nonzero__"): raise ValueError("register_counter_nonzero was not called for this network") if module.training: return size = module.__counter_nonzero__.get("input", 0) size += sum([torch.nonzero(i).size(0) for i in inputs]) module.__counter_nonzero__["input"] = size size = module.__counter_nonzero__.get("output", 0) size += torch.nonzero(output).size(0) module.__counter_nonzero__["output"] = size for name, param in module._parameters.items(): if param is None: continue size = module.__counter_nonzero__.get(name, 0) size += torch.nonzero(param.data).size(0) module.__counter_nonzero__[name] = size for name, buffer in module._buffers.items(): if buffer is None: continue size = module.__counter_nonzero__.get(name, 0) size += torch.nonzero(buffer).size(0) module.__counter_nonzero__[name] = size
Module hook used to count the number of nonzero floating point values from all the tensors used by the given network during inference. This hook will be called every time before :func:`forward` is invoked. See :func:`torch.nn.Module.register_forward_hook`
13,388
def histogram_info(self) -> dict: return { : self.support_atoms, : self.atom_delta, : self.vmin, : self.vmax, : self.atoms }
Return extra information about histogram
13,389
def list_member_topics(self, member_id): title = % self.__class__.__name__ input_fields = { : member_id } for key, value in input_fields.items(): if value: object_title = % (title, key, str(value)) self.fields.validate(value, % key, object_title) if not member_id: raise IndexError( % title) url = % (self.endpoint, str(member_id)) params = { : } response_details = self._get_request(url, params=params) member_topics = { : [] } for key, value in response_details.items(): if not key == : member_topics[key] = value if response_details[]: if in response_details[].keys(): for topic in response_details[][]: member_topics[].append(self.objects.topic.ingest(**topic)) return member_topics
a method to retrieve a list of topics member follows :param member_id: integer with meetup member id :return: dictionary with list of topic details inside [json] key topic_details = self.objects.topic.schema
13,390
def animate(self, duration = None, easing = None, on_complete = None, on_update = None, round = False, **kwargs): scene = self.get_scene() if scene: return scene.animate(self, duration, easing, on_complete, on_update, round, **kwargs) else: for key, val in kwargs.items(): setattr(self, key, val) return None
Request parent Scene to Interpolate attributes using the internal tweener. Specify sprite's attributes that need changing. `duration` defaults to 0.4 seconds and `easing` to cubic in-out (for others see pytweener.Easing class). Example:: # tween some_sprite to coordinates (50,100) using default duration and easing self.animate(x = 50, y = 100)
13,391
def diff_files(left, right, diff_options=None, formatter=None): return _diff(etree.parse, left, right, diff_options=diff_options, formatter=formatter)
Takes two filenames or streams, and diffs the XML in those files
13,392
def unicode_decode(data, encoding_list): assert encoding_list, xs = distinct(encoding_list if isinstance(encoding_list, list) else [encoding_list]) first_exp = None for i, encoding in enumerate(xs): try: return data.decode(encoding) except UnicodeDecodeError as e: if i == 0: first_exp = e raise first_exp
Decode string data with one or more encodings, trying sequentially :param data: bytes: encoded string data :param encoding_list: list[string] or string: encoding names :return: string: decoded string
13,393
def build_keyjar(key_conf, kid_template="", keyjar=None, owner=): if keyjar is None: keyjar = KeyJar() tot_kb = build_key_bundle(key_conf, kid_template) keyjar.add_kb(owner, tot_kb) return keyjar
Builds a :py:class:`oidcmsg.key_jar.KeyJar` instance or adds keys to an existing KeyJar based on a key specification. An example of such a specification:: keys = [ {"type": "RSA", "key": "cp_keys/key.pem", "use": ["enc", "sig"]}, {"type": "EC", "crv": "P-256", "use": ["sig"], "kid": "ec.1"}, {"type": "EC", "crv": "P-256", "use": ["enc"], "kid": "ec.2"} ] Keys in this specification are: type The type of key. Presently only 'rsa' and 'ec' supported. key A name of a file where a key can be found. Only works with PEM encoded RSA keys use What the key should be used for crv The elliptic curve that should be used. Only applies to elliptic curve keys :-) kid Key ID, can only be used with one usage type is specified. If there are more the one usage type specified 'kid' will just be ignored. :param key_conf: The key configuration :param kid_template: A template by which to build the key IDs. If no kid_template is given then the built-in function add_kid() will be used. :param keyjar: If an KeyJar instance the new keys are added to this key jar. :param owner: The default owner of the keys in the key jar. :return: A KeyJar instance
13,394
def from_plugin_classname(plugin_classname, exclude_lines_regex=None, **kwargs): klass = globals()[plugin_classname] if not issubclass(klass, BasePlugin): raise TypeError try: instance = klass(exclude_lines_regex=exclude_lines_regex, **kwargs) except TypeError: log.warning( , ) raise return instance
Initializes a plugin class, given a classname and kwargs. :type plugin_classname: str :param plugin_classname: subclass of BasePlugin. :type exclude_lines_regex: str|None :param exclude_lines_regex: optional regex for ignored lines.
13,395
def find_objects(config=None, config_path=None, regex=None, saltenv=): ciscoconfparse.find_objectssalt://path/to/config.txtGigabit ccp = _get_ccp(config=config, config_path=config_path, saltenv=saltenv) lines = ccp.find_objects(regex) return lines
Return all the line objects that match the expression in the ``regex`` argument. .. warning:: This function is mostly valuable when invoked from other Salt components (i.e., execution modules, states, templates etc.). For CLI usage, please consider using :py:func:`ciscoconfparse.find_lines <salt.ciscoconfparse_mod.find_lines>` config The configuration sent as text. .. note:: This argument is ignored when ``config_path`` is specified. config_path The absolute or remote path to the file with the configuration to be parsed. This argument supports the usual Salt filesystem URIs, e.g., ``salt://``, ``https://``, ``ftp://``, ``s3://``, etc. regex The regular expression to match the lines against. saltenv: ``base`` Salt fileserver environment from which to retrieve the file. This argument is ignored when ``config_path`` is not a ``salt://`` URL. Usage example: .. code-block:: python objects = __salt__['ciscoconfparse.find_objects'](config_path='salt://path/to/config.txt', regex='Gigabit') for obj in objects: print(obj.text)
13,396
def register(self, cmd: Type[Command]) -> None: self.commands[cmd.command] = cmd
Register a new IMAP command. Args: cmd: The new command type.
13,397
def get(url, params={}): request_url = url if len(params): request_url = "{}?{}".format(url, urlencode(params)) try: req = Request(request_url, headers={: }) response = json.loads(urlopen(req).read().decode("utf-8")) return response except HTTPError as err: raise MtgException(err.read())
Invoke an HTTP GET request on a url Args: url (string): URL endpoint to request params (dict): Dictionary of url parameters Returns: dict: JSON response as a dictionary
13,398
def remove_all(self, item): item = self.ref(item) while list.__contains__(self, item): list.remove(self, item)
Remove all occurrence of the parameter. :param item: Value to delete from the WeakList.
13,399
def gtlike_spectrum_to_vectors(spectrum): parameters = pyLike.ParameterVector() spectrum.getParams(parameters) npar = max(parameters.size(), 10) o = {: np.zeros(npar, dtype=), : np.empty(npar, dtype=float) * np.nan, : np.empty(npar, dtype=float) * np.nan, } for i, p in enumerate(parameters): o[][i] = p.getName() o[][i] = p.getTrueValue() perr = abs(p.error() * p.getScale()) if p.isFree() else np.nan o[][i] = perr return o
Convert a pyLikelihood object to a python dictionary which can be easily saved to a file.