Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
379,100
def get_response(self, method, endpoint, headers=None, json=None, params=None, data=None): logger.debug("Parameters for get_response:") logger.debug("\t - endpoint: %s", endpoint) logger.debug("\t - method: %s", method) logger.debug("\t - headers: %s", headers) logger.debug("\t - json: %s", json) logger.debug("\t - params: %s", params) logger.debug("\t - data: %s", data) url = self.get_url(endpoint) try: response = self.session.request(method=method, url=url, headers=headers, json=json, params=params, data=data, proxies=self.proxies, timeout=self.timeout) logger.debug("response headers: %s", response.headers) logger.debug("response content: %s", response.content) except RequestException as e: response = {"_status": "ERR", "_error": {"message": e, "code": BACKEND_ERROR}, "_issues": {"message": e, "code": BACKEND_ERROR}} raise BackendException(code=BACKEND_ERROR, message=e, response=response) else: return response
Returns the response from the requested endpoint with the requested method :param method: str. one of the methods accepted by Requests ('POST', 'GET', ...) :param endpoint: str. the relative endpoint to access :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :return: Requests.response
379,101
def convert_to_ns(self, value): parsed = self.parse_uri(value) try: rtn_val = "%s_%s" % (self.uri_dict[parsed[0]], parsed[1]) except KeyError: rtn_val = self.pyhttp(value) return rtn_val
converts a value to the prefixed rdf ns equivalent. If not found returns the value as is args: value: the value to convert
379,102
def get_by_id(self, webhook, params={}, **options): path = "/webhooks/%s" % (webhook) return self.client.get(path, params, **options)
Returns the full record for the given webhook. Parameters ---------- webhook : {Id} The webhook to get. [params] : {Object} Parameters for the request
379,103
def get_huisnummer_by_id(self, id): def creator(): res = crab_gateway_request( self.client, , id ) if res == None: raise GatewayResourceNotFoundException() return Huisnummer( res.HuisnummerId, res.StatusHuisnummer, res.Huisnummer, res.StraatnaamId, Metadata( res.BeginDatum, res.BeginTijd, self.get_bewerking(res.BeginBewerking), self.get_organisatie(res.BeginOrganisatie) ) ) if self.caches[].is_configured: key = % (id) huisnummer = self.caches[].get_or_create(key, creator) else: huisnummer = creator() huisnummer.set_gateway(self) return huisnummer
Retrieve a `huisnummer` by the Id. :param integer id: the Id of the `huisnummer` :rtype: :class:`Huisnummer`
379,104
def flatter(x, k=1): if k == 0: return x x = x.toarray() if sps.issparse(x) else np.asarray(x) if len(x.shape) - abs(k) < 2: return x.flatten() k += np.sign(k) if k > 0: return np.reshape(x, (-1,) + x.shape[k:]) else: return np.reshape(x, x.shape[:k] + (-1,))
flatter(x) yields a numpy array equivalent to x but whose first dimension has been flattened. flatter(x, k) yields a numpy array whose first k dimensions have been flattened; if k is negative, the last k dimensions are flattened. If np.inf or -np.inf is passed, then this is equivalent to flattest(x). Note that flatter(x) is equivalent to flatter(x,1). flatter(x, 0) yields x.
379,105
def op_nodes(self, op=None): nodes = [] for node in self._multi_graph.nodes(): if node.type == "op": if op is None or isinstance(node.op, op): nodes.append(node) return nodes
Get the list of "op" nodes in the dag. Args: op (Type): Instruction subclass op nodes to return. if op=None, return all op nodes. Returns: list[DAGNode]: the list of node ids containing the given op.
379,106
def raw(request): foos = foobar_models.Foo.objects.all() return HttpResponse(tree.xml(foos), mimetype=)
shows untransformed hierarchical xml output
379,107
def is_valid(self, tree): conano_plaintext = etree.tostring(tree, encoding=, method=) token_str_list = conano_plaintext.split() for i, plain_token in enumerate(token_str_list): graph_token = self.node[self.tokens[i]][self.ns+] if ensure_unicode(plain_token) != graph_token: sys.stderr.write( "Conano tokenizations don't match: {0} vs. {1} " "({2})".format(plain_token, graph_token)) return False return True
returns true, iff the order of the tokens in the graph are the same as in the Conano file (converted to plain text).
379,108
def write_to_disk( manifest_root_dir: Optional[Path] = None, manifest_name: Optional[str] = None, prettify: Optional[bool] = False, ) -> Manifest: return _write_to_disk(manifest_root_dir, manifest_name, prettify)
Write the active manifest to disk Defaults - Writes manifest to cwd unless Path is provided as manifest_root_dir. - Writes manifest with a filename of Manifest[version].json unless a desired manifest name (which must end in json) is provided as manifest_name. - Writes the minified manifest version to disk unless prettify is set to True.
379,109
def build(self, builder): params = dict(OID=self.oid, Name=self.name, DataType=self.datatype.value) if self.sas_format_name is not None: params["SASFormatName"] = self.sas_format_name builder.start("CodeList", params) for item in self.codelist_items: item.build(builder) for alias in self.aliases: alias.build(builder) builder.end("CodeList")
Build XML by appending to builder
379,110
def get_imgid(self, img): imgid = img.filename() hdr = self.get_header(img) if in hdr: return hdr[] if in hdr: return hdr[] if not imgid: imgid = repr(img) return imgid
Obtain a unique identifier of the image. Parameters ---------- img : astropy.io.fits.HDUList Returns ------- str: Identification of the image
379,111
def build_assert(cls: Type[_Block], nodes: List[ast.stmt], min_line_number: int) -> _Block: return cls(filter_assert_nodes(nodes, min_line_number), LineType._assert)
Assert block is all nodes that are after the Act node. Note: The filtering is *still* running off the line number of the Act node, when instead it should be using the last line of the Act block.
379,112
def from_indra_pickle(path: str, name: Optional[str] = None, version: Optional[str] = None, description: Optional[str] = None, authors: Optional[str] = None, contact: Optional[str] = None, license: Optional[str] = None, copyright: Optional[str] = None, disclaimer: Optional[str] = None, ): with open(path, ) as f: statements = load(f) return from_indra_statements( stmts=statements, name=name, version=version, description=description, authors=authors, contact=contact, license=license, copyright=copyright, disclaimer=disclaimer, )
Import a model from :mod:`indra`. :param path: Path to pickled list of :class:`indra.statements.Statement` :param name: The name for the BEL graph :param version: The version of the BEL graph :param description: The description of the graph :param authors: The authors of this graph :param contact: The contact email for this graph :param license: The license for this graph :param copyright: The copyright for this graph :param disclaimer: The disclaimer for this graph :rtype: pybel.BELGraph
379,113
def write(self, oprot): oprot.write_struct_begin() if self.pp_images_dir_path is not None: oprot.write_field_begin(name=, type=11, id=None) oprot.write_string(self.pp_images_dir_path) oprot.write_field_end() if self.pp_install_dir_path is not None: oprot.write_field_begin(name=, type=11, id=None) oprot.write_string(self.pp_install_dir_path) oprot.write_field_end() if self.pp_objects_dbf_file_path is not None: oprot.write_field_begin(name=, type=11, id=None) oprot.write_string(self.pp_objects_dbf_file_path) oprot.write_field_end() oprot.write_field_stop() oprot.write_struct_end() return self
Write this object to the given output protocol and return self. :type oprot: thryft.protocol._output_protocol._OutputProtocol :rtype: pastpy.gen.database.impl.dbf.dbf_database_configuration.DbfDatabaseConfiguration
379,114
def _get_value(self): x, y = self._point.x, self._point.y self._px, self._py = self._item_point.canvas.get_matrix_i2i(self._item_point, self._item_target).transform_point(x, y) return self._px, self._py
Return two delegating variables. Each variable should contain a value attribute with the real value.
379,115
def prune_influence_map_subj_obj(self): def get_rule_info(r): result = {} for ann in self.model.annotations: if ann.subject == r: if ann.predicate == : result[] = ann.object elif ann.predicate == : result[] = ann.object return result im = self.get_im() rules = im.nodes() edges_to_prune = [] for r1, r2 in itertools.permutations(rules, 2): if (r1, r2) not in im.edges(): continue r1_info = get_rule_info(r1) r2_info = get_rule_info(r2) if not in r1_info or not in r2_info: continue if r1_info[] != r2_info[]: logger.info("Removing edge %s --> %s" % (r1, r2)) edges_to_prune.append((r1, r2)) im.remove_edges_from(edges_to_prune)
Prune influence map to include only edges where the object of the upstream rule matches the subject of the downstream rule.
379,116
def translation(language): global _translations if language not in _translations: _translations[language] = Translations(language) return _translations[language]
Return a translation object in the default 'django' domain.
379,117
def ServiceWorker_inspectWorker(self, versionId): assert isinstance(versionId, (str,) ), "Argument must be of type str. Received type: " % type( versionId) subdom_funcs = self.synchronous_command(, versionId=versionId) return subdom_funcs
Function path: ServiceWorker.inspectWorker Domain: ServiceWorker Method name: inspectWorker Parameters: Required arguments: 'versionId' (type: string) -> No description No return value.
379,118
def RegisterParser(cls, parser_class): parser_name = parser_class.NAME.lower() if parser_name in cls._parser_classes: raise KeyError(.format( parser_class.NAME)) cls._parser_classes[parser_name] = parser_class
Registers a parser class. The parser classes are identified based on their lower case name. Args: parser_class (type): parser class (subclass of BaseParser). Raises: KeyError: if parser class is already set for the corresponding name.
379,119
def is_cnpj(numero, estrito=False): try: cnpj(digitos(numero) if not estrito else numero) return True except NumeroCNPJError: pass return False
Uma versão conveniente para usar em testes condicionais. Apenas retorna verdadeiro ou falso, conforme o argumento é validado. :param bool estrito: Padrão ``False``, indica se apenas os dígitos do número deverão ser considerados. Se verdadeiro, potenciais caracteres que formam a máscara serão removidos antes da validação ser realizada.
379,120
def is_any_type_set(sett: Set[Type]) -> bool: return len(sett) == 1 and is_any_type(min(sett))
Helper method to check if a set of types is the {AnyObject} singleton :param sett: :return:
379,121
def purge(**kwargs): * ret = {: [], : True} for name in list_(show_all=True, return_yaml=False): if name == : continue if name.startswith(): continue if in kwargs and kwargs[]: ret[] = True ret[].append(.format(name)) else: persist = True if in kwargs: persist = kwargs[] try: eventer = salt.utils.event.get_event(, opts=__opts__) res = __salt__[]({: name, : , : persist}, ) if res: event_ret = eventer.get_event(tag=, wait=30) if event_ret and event_ret[]: _schedule_ret = event_ret[] if name not in _schedule_ret: ret[] = True ret[].append(.format(name)) else: ret[].append(.format(name)) ret[] = True except KeyError: return ret
Purge all the jobs currently scheduled on the minion CLI Example: .. code-block:: bash salt '*' schedule.purge
379,122
def _tokenize_latex(self, exp): tokens = [] prevexp = "" while exp: t, exp = self._get_next_token(exp) if t.strip() != "": tokens.append(t) if prevexp == exp: break prevexp = exp return tokens
Internal method to tokenize latex
379,123
def to_json(self): capsule = {} capsule["Hierarchy"] = [] for ( dying, (persistence, surviving, saddle), ) in self.merge_sequence.items(): capsule["Hierarchy"].append( { "Dying": dying, "Persistence": persistence, "Surviving": surviving, "Saddle": saddle, } ) capsule["Partitions"] = [] base = np.array([None, None] * len(self.Y)).reshape(-1, 2) for (min_index, max_index), items in self.base_partitions.items(): base[items, :] = [min_index, max_index] capsule["Partitions"] = base.tolist() return json.dumps(capsule)
Writes the complete Morse-Smale merge hierarchy to a string object. @ Out, a string object storing the entire merge hierarchy of all minima and maxima.
379,124
def get_tagged_version(self): tags = list(self.get_tags()) if in tags and not self.is_modified(): tags = self.get_parent_tags() versions = self.__versions_from_tags(tags) return self.__best_version(versions)
Get the version of the local working set as a StrictVersion or None if no viable tag exists. If the local working set is itself the tagged commit and the tip and there are no local modifications, use the tag on the parent changeset.
379,125
def sed(regexpr, repl, force=False, recursive=False, dpath_list=None, fpath_list=None, verbose=None, include_patterns=None, exclude_patterns=[]): if include_patterns is None: include_patterns = [, , , , , , , , , , ] if dpath_list is None: dpath_list = [os.getcwd()] if verbose is None: verbose = ut.NOT_QUIET if fpath_list is None: greater_exclude_dirs = get_standard_exclude_dnames() exclude_dirs = [] fpath_generator = matching_fpaths( dpath_list, include_patterns, exclude_dirs, greater_exclude_dirs=greater_exclude_dirs, recursive=recursive, exclude_patterns=exclude_patterns) else: fpath_generator = fpath_list if verbose: print( % (dpath_list,)) print( % (regexpr,)) print( % (repl,)) print( % (include_patterns,)) print( % (recursive,)) print( % (force,)) from utool import util_str print( % (util_str.repr3(fpath_list),)) regexpr = extend_regex(regexpr) num_changed = 0 num_files_checked = 0 fpaths_changed = [] for fpath in fpath_generator: num_files_checked += 1 changed_lines = sedfile(fpath, regexpr, repl, force, verbose=verbose) if changed_lines is not None: fpaths_changed.append(fpath) num_changed += len(changed_lines) import utool as ut print( % (num_files_checked,)) print( % (ut.repr3(sorted(fpaths_changed)),)) print( % (num_changed,))
Python implementation of sed. NOT FINISHED searches and replaces text in files Args: regexpr (str): regx patterns to find repl (str): text to replace force (bool): recursive (bool): dpath_list (list): directories to search (defaults to cwd)
379,126
def from_list(cls, l): if len(l) == 3: x, y, z = map(float, l) return cls(x, y, z) elif len(l) == 2: x, y = map(float, l) return cls(x, y) else: raise AttributeError
Return a Point instance from a given list
379,127
def add_formats_by_name(self, rfmt_list): for fmt in rfmt_list: if fmt == "json": self.add_report_format(JSONReportFormat) elif fmt in ("txt", "text"): self.add_report_format(TextReportFormat) elif fmt in ("htm", "html"): self.add_report_format(CheetahReportFormat)
adds formats by short label descriptors, such as 'txt', 'json', or 'html'
379,128
def view_indexes(self, done=None): ret = [] if done is None: done = set() idx = 0 while idx < self.count(): if not idx in done: break idx += 1 while idx < self.count(): w = self.wp(idx) if idx in done: if w.x != 0 or w.y != 0: ret.append(idx) break done.add(idx) if w.command == mavutil.mavlink.MAV_CMD_DO_JUMP: idx = int(w.param1) w = self.wp(idx) if w.x != 0 or w.y != 0: ret.append(idx) continue if (w.x != 0 or w.y != 0) and self.is_location_command(w.command): ret.append(idx) idx += 1 return ret
return a list waypoint indexes in view order
379,129
def cell_fate(data, groupby=, disconnected_groups=None, self_transitions=False, n_neighbors=None, copy=False): adata = data.copy() if copy else data logg.info(, r=True) n_neighbors = 10 if n_neighbors is None else n_neighbors _adata = adata.copy() vgraph = VelocityGraph(_adata, n_neighbors=n_neighbors, approx=True, n_recurse_neighbors=1) vgraph.compute_cosines() _adata.uns[] = vgraph.graph _adata.uns[] = vgraph.graph_neg T = transition_matrix(_adata, self_transitions=self_transitions) I = np.eye(_adata.n_obs) fate = np.linalg.inv(I - T) if issparse(T): fate = fate.A cell_fates = np.array(_adata.obs[groupby][fate.argmax(1)]) if disconnected_groups is not None: idx = _adata.obs[groupby].isin(disconnected_groups) cell_fates[idx] = _adata.obs[groupby][idx] adata.obs[] = cell_fates adata.obs[] = fate.max(1) / fate.sum(1) strings_to_categoricals(adata) logg.info(, time=True, end= if settings.verbosity > 2 else ) logg.hint( cell_fate\ cell_fate_confidence\) return adata if copy else None
Computes individual cell endpoints Arguments --------- data: :class:`~anndata.AnnData` Annotated data matrix. groupby: `str` (default: `'clusters'`) Key to which to assign the fates. disconnected_groups: list of `str` (default: `None`) Which groups to treat as disconnected for fate assignment. n_neighbors: `int` (default: `None`) Number of neighbors to restrict transitions to. copy: `bool` (default: `False`) Return a copy instead of writing to `adata`. Returns ------- Returns or updates `adata` with the attributes cell_fate: `.obs` most likely cell fate for each individual cell cell_fate_confidence: `.obs` confidence of transitioning to the assigned fate
379,130
def _add_document(self, doc_id, conn=None, nosave=False, score=1.0, payload=None, replace=False, partial=False, language=None, **fields): if conn is None: conn = self.redis if partial: replace = True args = [self.ADD_CMD, self.index_name, doc_id, score] if nosave: args.append() if payload is not None: args.append() args.append(payload) if replace: args.append() if partial: args.append() if language: args += [, language] args.append() args += list(itertools.chain(*fields.items())) return conn.execute_command(*args)
Internal add_document used for both batch and single doc indexing
379,131
def info_authn(self): authz_header = request.headers.get(, ) if (not authz_header.startswith()): return False token = authz_header[7:] return self.access_token_valid( token, "info_authn: Authorization header")
Check to see if user if authenticated for info.json. Must have Authorization header with value that has the form "Bearer TOKEN", where TOKEN is an appropriate and valid access token.
379,132
def get_queryset(self): try: date = ElectionDay.objects.get(date=self.kwargs["date"]) except Exception: raise APIException( "No elections on {}.".format(self.kwargs["date"]) ) division_ids = [] normal_elections = date.elections.filter() if len(normal_elections) > 0: for election in date.elections.all(): if election.division.level.name == DivisionLevel.STATE: division_ids.append(election.division.uid) elif election.division.level.name == DivisionLevel.DISTRICT: division_ids.append(election.division.parent.uid) return Division.objects.filter(uid__in=division_ids)
Returns a queryset of all states holding a non-special election on a date.
379,133
def _iter_path(pointer): _check_status(pointer.status) data = pointer.data num_data = pointer.num_data points_per_type = PATH_POINTS_PER_TYPE position = 0 while position < num_data: path_data = data[position] path_type = path_data.header.type points = () for i in range(points_per_type[path_type]): point = data[position + i + 1].point points += (point.x, point.y) yield (path_type, points) position += path_data.header.length
Take a cairo_path_t * pointer and yield ``(path_operation, coordinates)`` tuples. See :meth:`Context.copy_path` for the data structure.
379,134
def store_text_cursor_anchor(self): self.__text_cursor_anchor = (self.textCursor(), self.horizontalScrollBar().sliderPosition(), self.verticalScrollBar().sliderPosition()) return True
Stores the document cursor anchor. :return: Method success. :rtype: bool
379,135
def chi_squareds(self, p=None): if len(self._set_xdata)==0 or len(self._set_ydata)==0: return None if p is None: p = self.results[0] rs = self.studentized_residuals(p) if rs == None: return None cs = [] for r in rs: cs.append(sum(r**2)) return cs
Returns a list of chi squared for each data set. Also uses ydata_massaged. p=None means use the fit results
379,136
def _post_login_page(self, login_url): data = {"login": self.username, "_58_password": self.password} try: raw_res = yield from self._session.post(login_url, data=data, timeout=self._timeout, allow_redirects=False) except OSError: raise PyHydroQuebecError("Can not submit login form") if raw_res.status != 302: raise PyHydroQuebecError("Login error: Bad HTTP status code. " "Please check your username/password.") return True
Login to HydroQuebec website.
379,137
def get_datastream_data(self, datastream, options): response_format=None if options and in options and options[] is not None: response_format = options[] options[] = None url = + str(datastream) + response = self.http.downstream(url, response_format) return response
Get input data for the datastream :param datastream: string :param options: dict
379,138
def get_source(self, name): path = self.get_filename(name) try: source_bytes = self.get_data(path) except OSError as exc: e = _ImportError(, name=name) e.__cause__ = exc raise e return decode_source(source_bytes)
Concrete implementation of InspectLoader.get_source.
379,139
def get_active_services(): with win32.OpenSCManager( dwDesiredAccess = win32.SC_MANAGER_ENUMERATE_SERVICE ) as hSCManager: return [ entry for entry in win32.EnumServicesStatusEx(hSCManager, dwServiceType = win32.SERVICE_WIN32, dwServiceState = win32.SERVICE_ACTIVE) \ if entry.ProcessId ]
Retrieve a list of all active system services. @see: L{get_services}, L{start_service}, L{stop_service}, L{pause_service}, L{resume_service} @rtype: list( L{win32.ServiceStatusProcessEntry} ) @return: List of service status descriptors.
379,140
def _decode_datetime(obj): if in obj: obj = datetime.datetime.strptime(obj[].decode(), "%Y%m%dT%H:%M:%S.%f") return obj
Decode a msgpack'ed datetime.
379,141
def copy(self, extra=None): if extra is None: extra = dict() that = copy.copy(self) that._paramMap = {} that._defaultParamMap = {} return self._copyValues(that, extra)
Creates a copy of this instance with the same uid and some extra params. The default implementation creates a shallow copy using :py:func:`copy.copy`, and then copies the embedded and extra parameters over and returns the copy. Subclasses should override this method if the default approach is not sufficient. :param extra: Extra parameters to copy to the new instance :return: Copy of this instance
379,142
def _consolidate_auth(ssh_password=None, ssh_pkey=None, ssh_pkey_password=None, allow_agent=True, host_pkey_directories=None, logger=None): ssh_loaded_pkeys = SSHTunnelForwarder.get_keys( logger=logger, host_pkey_directories=host_pkey_directories, allow_agent=allow_agent ) if isinstance(ssh_pkey, string_types): ssh_pkey_expanded = os.path.expanduser(ssh_pkey) if os.path.exists(ssh_pkey_expanded): ssh_pkey = SSHTunnelForwarder.read_private_key_file( pkey_file=ssh_pkey_expanded, pkey_password=ssh_pkey_password or ssh_password, logger=logger ) elif logger: logger.warning( .format(ssh_pkey)) if isinstance(ssh_pkey, paramiko.pkey.PKey): ssh_loaded_pkeys.insert(0, ssh_pkey) if not ssh_password and not ssh_loaded_pkeys: raise ValueError() return (ssh_password, ssh_loaded_pkeys)
Get sure authentication information is in place. ``ssh_pkey`` may be of classes: - ``str`` - in this case it represents a private key file; public key will be obtained from it - ``paramiko.Pkey`` - it will be transparently added to loaded keys
379,143
def get_mac_acl_for_intf_input_interface_type(self, **kwargs): config = ET.Element("config") get_mac_acl_for_intf = ET.Element("get_mac_acl_for_intf") config = get_mac_acl_for_intf input = ET.SubElement(get_mac_acl_for_intf, "input") interface_type = ET.SubElement(input, "interface-type") interface_type.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
379,144
def adjust_worker_number_by_load(self): if self.interrupted: logger.debug("Trying to adjust worker number. Ignoring because we are stopping.") return to_del = [] logger.debug("checking worker count." " Currently: %d workers, min per module : %d, max per module : %d", len(self.workers), self.min_workers, self.max_workers) for mod in self.q_by_mod: todo = max(0, self.min_workers - len(self.q_by_mod[mod])) for _ in range(todo): try: self.create_and_launch_worker(module_name=mod) except NotWorkerMod: to_del.append(mod) break for mod in to_del: logger.warning("The module %s is not a worker one, I remove it from the worker list.", mod) del self.q_by_mod[mod]
Try to create the minimum workers specified in the configuration :return: None
379,145
def clone(self, into=None): chroot_clone = self._chroot.clone(into=into) clone = self.__class__( chroot=chroot_clone, interpreter=self._interpreter, pex_info=self._pex_info.copy(), preamble=self._preamble, copy=self._copy) clone.set_shebang(self._shebang) clone._distributions = self._distributions.copy() return clone
Clone this PEX environment into a new PEXBuilder. :keyword into: (optional) An optional destination directory to clone this PEXBuilder into. If not specified, a temporary directory will be created. Clones PEXBuilder into a new location. This is useful if the PEXBuilder has been frozen and rendered immutable. .. versionchanged:: 0.8 The temporary directory created when ``into`` is not specified is now garbage collected on interpreter exit.
379,146
def pdf_case_report(institute_id, case_name): institute_obj, case_obj = institute_and_case(store, institute_id, case_name) data = controllers.case_report_content(store, institute_obj, case_obj) if current_app.config.get(): data[] = controllers.coverage_report_contents(store, institute_obj, case_obj, request.url_root) if case_obj.get() is not None: with open(os.path.join(cases_bp.static_folder, ), ) as temp_madeline: temp_madeline.write(case_obj[]) html_report = render_template(, institute=institute_obj, case=case_obj, format=, **data) return render_pdf(HTML(string=html_report), download_filename=case_obj[]++datetime.datetime.now().strftime("%Y-%m-%d")+)
Download a pdf report for a case
379,147
def get_user_shell(): try: pw_shell = pwd.getpwuid(os.geteuid()).pw_shell except KeyError: pw_shell = None return pw_shell or
For commands executed directly via an SSH command-line, SSH looks up the user's shell via getpwuid() and only defaults to /bin/sh if that field is missing or empty.
379,148
def _ige(message, key, iv, operation="decrypt"): message = bytes(message) if len(key) != 32: raise ValueError("key must be 32 bytes long (was " + str(len(key)) + " bytes)") if len(iv) != 32: raise ValueError("iv must be 32 bytes long (was " + str(len(iv)) + " bytes)") cipher = AES.new(key, AES.MODE_ECB, iv) blocksize = cipher.block_size if len(message) % blocksize != 0: raise ValueError("message must be a multiple of 16 bytes (try adding " + str(16 - len(message) % 16) + " bytes of padding)") ivp = iv[0:blocksize] ivp2 = iv[blocksize:] ciphered = bytes() for i in range(0, len(message), blocksize): indata = message[i:i+blocksize] if operation == "decrypt": xored = strxor(indata, ivp2) decrypt_xored = cipher.decrypt(xored) outdata = strxor(decrypt_xored, ivp) ivp = indata ivp2 = outdata elif operation == "encrypt": xored = strxor(indata, ivp) encrypt_xored = cipher.encrypt(xored) outdata = strxor(encrypt_xored, ivp2) ivp = outdata ivp2 = indata else: raise ValueError("operation must be either or ") ciphered += outdata return ciphered
Given a key, given an iv, and message do whatever operation asked in the operation field. Operation will be checked for: "decrypt" and "encrypt" strings. Returns the message encrypted/decrypted. message must be a multiple by 16 bytes (for division in 16 byte blocks) key must be 32 byte iv must be 32 byte (it's not internally used in AES 256 ECB, but it's needed for IGE)
379,149
def add_line(self, p1, p2, char_length): p1_id = self.get_point_id(p1, char_length) p2_id = self.get_point_id(p2, char_length) self.Lines.append((p1_id, p2_id)) return len(self.Lines)
Add a line to the list. Check if the nodes already exist, and add them if not. Return the line index (1-indixed, starting with 1)
379,150
def enumerate_device_serials(vid=FT232H_VID, pid=FT232H_PID): try: ctx = None ctx = ftdi.new() device_list = None count, device_list = ftdi.usb_find_all(ctx, vid, pid) if count < 0: raise RuntimeError(.format(count, ftdi.get_error_string(self._ctx))) devices = [] while device_list is not None: ret, manufacturer, description, serial = ftdi.usb_get_strings(ctx, device_list.dev, 256, 256, 256) if serial is not None: devices.append(serial) device_list = device_list.next return devices finally: if device_list is not None: ftdi.list_free(device_list) if ctx is not None: ftdi.free(ctx)
Return a list of all FT232H device serial numbers connected to the machine. You can use these serial numbers to open a specific FT232H device by passing it to the FT232H initializer's serial parameter.
379,151
def install_napps(cls, napps): mgr = NAppsManager() for napp in napps: mgr.set_napp(*napp) LOG.info(, mgr.napp_id) if not mgr.is_installed(): try: cls.install_napp(mgr) if not mgr.is_enabled(): cls.enable_napp(mgr) napp_dependencies = mgr.dependencies() if napp_dependencies: LOG.info() cls.install_napps(napp_dependencies) else: LOG.warning() except KytosException: continue else: LOG.warning()
Install local or remote NApps. This method is recursive, it will install each napps and your dependencies.
379,152
def _get_vars(self): if self.vars is None: self.vars = {} if type(self.vars) not in [dict, list]: raise errors.AnsibleError(" section must contain only key/value pairs") vars = {} if type(self.vars) == list: for item in self.vars: if getattr(item, , None) is None: raise errors.AnsibleError("expecting a key-value pair in section") k, v = item.items()[0] vars[k] = v else: vars.update(self.vars) if type(self.playbook.extra_vars) == dict: vars.update(self.playbook.extra_vars) if type(self.vars_prompt) == list: for var in self.vars_prompt: if not in var: raise errors.AnsibleError(" item is missing ") vname = var[] prompt = var.get("prompt", vname) default = var.get("default", None) private = var.get("private", True) confirm = var.get("confirm", False) encrypt = var.get("encrypt", None) salt_size = var.get("salt_size", None) salt = var.get("salt", None) if vname not in self.playbook.extra_vars: vars[vname] = self.playbook.callbacks.on_vars_prompt ( vname, private, prompt, encrypt, confirm, salt_size, salt, default ) elif type(self.vars_prompt) == dict: for (vname, prompt) in self.vars_prompt.iteritems(): prompt_msg = "%s: " % prompt if vname not in self.playbook.extra_vars: vars[vname] = self.playbook.callbacks.on_vars_prompt( varname=vname, private=False, prompt=prompt_msg, default=None ) else: raise errors.AnsibleError(" section is malformed, see docs") results = self.playbook.extra_vars.copy() results.update(vars) return results
load the vars section from a play, accounting for all sorts of variable features including loading from yaml files, prompting, and conditional includes of the first file found in a list.
379,153
def _format_info(self): info = % ( self.info.get(), self.info.get(), ) if self.info.get(, None): info += % ( self.info.get(), self.info.get(), ) else: info += if self.info.get(, None): info += % ( self.info.get(), self.info.get(), self.info.get() ) return info
Generate info line for GNTP Message :return string:
379,154
def as_text(self): from leonardo.templatetags.leonardo_tags import _render_content request = get_anonymous_request(self) content = try: for region in [region.key for region in self._feincms_all_regions]: content += .join( _render_content(content, request=request, context={}) for content in getattr(self.content, region)) except PermissionDenied: pass except Exception as e: LOG.exception(e) return content
Fetch and render all regions For search and test purposes just a prototype
379,155
def get_index(table, field_name, op, value): None counter = 0 for row in table: dict_row = convert_to_dict(row) if do_op(dict_row.get(field_name, None), op, value): return counter counter += 1 return None
Returns the index of the first list entry that matches. If no matches are found, it returns None NOTE: it is not returning a list. It is returning an integer in range 0..LEN(target) NOTE: both 'None' and 0 evaluate as False in python. So, if you are checking for a None being returned, be explicit. "if myindex==None:" not simply "if not myindex:"
379,156
def infer_shape(self, node, input_shapes): if isinstance(self.operator, Functional): return [()] else: return [tuple(native(si) for si in self.operator.range.shape)]
Return a list of output shapes based on ``input_shapes``. This method is optional. It allows to compute the shape of the output without having to evaluate. Parameters ---------- node : `theano.gof.graph.Apply` The node of this Op in the computation graph. input_shapes : 1-element list of `theano.compile.ops.Shape` Symbolic shape of the input. Returns ------- output_shapes : 1-element list of tuples Fixed shape of the output determined by `odl_op`.
379,157
def update_kwargs(kwargs, *updates): for update in updates: if not update: continue for key, val in six.iteritems(update): u_item = resolve_value(val) if u_item is None: continue if key in ( or ): kwargs[key] = u_item elif isinstance(u_item, (tuple, list)): kw_item = kwargs.get(key) u_list = map(resolve_value, u_item) if isinstance(kw_item, list): merge_list(kw_item, u_list) elif isinstance(kw_item, tuple): new_list = list(kw_item) merge_list(new_list, u_list) kwargs[key] = new_list else: kwargs[key] = list(u_list) elif isinstance(u_item, dict): kw_item = kwargs.get(key) u_dict = {u_k: resolve_value(u_v) for u_k, u_v in six.iteritems(u_item)} if isinstance(kw_item, dict): kw_item.update(u_dict) else: kwargs[key] = u_dict else: kwargs[key] = u_item
Utility function for merging multiple keyword arguments, depending on their type: * Non-existent keys are added. * Existing lists or tuples are extended, but not duplicating entries. The keywords ``command`` and ``entrypoint`` are however simply overwritten. * Nested dictionaries are updated, overriding previous key-value assignments. * Other items are simply overwritten (just like in a regular dictionary update) unless the updating value is ``None``. Lists/tuples and dictionaries are (shallow-)copied before adding and late resolving values are looked up. This function does not recurse. :param kwargs: Base keyword arguments. This is modified in-place. :type kwargs: dict :param updates: Dictionaries to update ``kwargs`` with. :type updates: dict
379,158
def _get_audio_sample_bit(self, audio_abs_path): sample_bit = int( subprocess.check_output( ( ).format(audio_abs_path, "Precision"), shell=True, universal_newlines=True).rstrip()) return sample_bit
Parameters ---------- audio_abs_path : str Returns ------- sample_bit : int
379,159
def get_slack_channels(self, token): ret = salt.utils.slack.query( function=, api_key=token, return channels
Get all channel names from Slack
379,160
def duration(self, value): if value == self._defaults[] and in self._values: del self._values[] else: self._values[] = value
The duration property. Args: value (string). the property value.
379,161
def _on_cluster_discovery(self, future): LOGGER.debug(, future) common.maybe_raise_exception(future) nodes = future.result() for node in nodes: name = .format(node.ip, node.port) if name in self._cluster: LOGGER.debug(, node.ip, node.port) self._cluster[name].set_slots(node.slots) self._cluster[name].set_read_only( in node.flags) else: self._create_cluster_connection(node) self._discovery = True
Invoked when the Redis server has responded to the ``CLUSTER_NODES`` command. :param future: The future containing the response from Redis :type future: tornado.concurrent.Future
379,162
def get_plat_specifier(): import setuptools import distutils plat_name = distutils.util.get_platform() plat_specifier = ".%s-%s" % (plat_name, sys.version[0:3]) if hasattr(sys, ): plat_specifier += return plat_specifier
Standard platform specifier used by distutils
379,163
def _get_asset_content(self, asset_id, asset_content_type_str=None, asset_content_id=None): rm = self.my_osid_object._get_provider_manager() if in self.my_osid_object._my_map: if self.my_osid_object._proxy is not None: als = rm.get_asset_lookup_session_for_repository( Id(self.my_osid_object._my_map[][0]), self.my_osid_object._proxy) else: als = rm.get_asset_lookup_session_for_repository( Id(self.my_osid_object._my_map[][0])) elif in self.my_osid_object._my_map: if self.my_osid_object._proxy is not None: als = rm.get_asset_lookup_session_for_repository( Id(self.my_osid_object._my_map[][0]), self.my_osid_object._proxy) else: als = rm.get_asset_lookup_session_for_repository( Id(self.my_osid_object._my_map[][0])) elif in self.my_osid_object._my_map: if self.my_osid_object._proxy is not None: als = rm.get_asset_lookup_session_for_repository( Id(self.my_osid_object._my_map[][0]), self.my_osid_object._proxy) else: als = rm.get_asset_lookup_session_for_repository( Id(self.my_osid_object._my_map[][0])) else: raise KeyError if asset_content_id is not None: ac_list = als.get_asset(asset_id).get_asset_contents() for ac in ac_list: if str(ac.ident) == str(asset_content_id): return ac if not asset_content_type_str: return next(als.get_asset(asset_id).get_asset_contents()) else: if isinstance(asset_content_type_str, Type): asset_content_type_str = str(asset_content_type_str) for ac in als.get_asset(asset_id).get_asset_contents(): if ac.get_genus_type() == Type(asset_content_type_str): return ac raise NotFound()
stub
379,164
def ROC_AUC_analysis(adata,groupby,group=None, n_genes=100): if group is None: pass name_list = list() for j, k in enumerate(adata.uns[]): if j >= n_genes: break name_list.append(adata.uns[][j][group]) groups = groups_order, groups_masks = utils.select_groups( adata, groups, groupby) imask = group mask = groups_masks[group] fpr={} tpr={} thresholds={} roc_auc={} y_true=mask for i, j in enumerate(name_list): vec=adata[:,[j]].X if issparse(vec): y_score = vec.todense() else: y_score = vec fpr[name_list[i]], tpr[name_list[i]], thresholds[name_list[i]] = metrics.roc_curve(y_true, y_score, pos_label=None, sample_weight=None, drop_intermediate=False) roc_auc[name_list[i]]=metrics.auc(fpr[name_list[i]],tpr[name_list[i]]) adata.uns[ +groupby+ str(group)] = fpr adata.uns[ +groupby+ str(group)] = tpr adata.uns[ +groupby+ str(group)] = thresholds adata.uns[ + groupby + str(group)] = roc_auc
Calculate correlation matrix. Calculate a correlation matrix for genes strored in sample annotation using rank_genes_groups.py Parameters ---------- adata : :class:`~anndata.AnnData` Annotated data matrix. groupby : `str` The key of the sample grouping to consider. group : `str`, int, optional (default: None) Group name or index for which the correlation matrix for top_ranked genes should be calculated. If no parameter is passed, ROC/AUC is calculated for all groups n_genes : `int`, optional (default: 100) For how many genes to calculate ROC and AUC. If no parameter is passed, calculation is done for all stored top ranked genes.
379,165
def authorize(self, callback=None, state=None, **kwargs): params = dict(self.request_token_params) or {} params.update(**kwargs) if self.request_token_url: token = self.generate_request_token(callback)[0] url = % ( self.expand_url(self.authorize_url), url_quote(token) ) if params: url += + url_encode(params) else: assert callback is not None, client = self.make_client() if in params: scope = params.pop() else: scope = None if isinstance(scope, str): scope = _encode(scope, self.encoding) if in params: if not state: state = params.pop() else: params.pop() if callable(state): state = state() session[ % self.name] = callback url = client.prepare_request_uri( self.expand_url(self.authorize_url), redirect_uri=callback, scope=scope, state=state, **params ) return redirect(url)
Returns a redirect response to the remote authorization URL with the signed callback given. :param callback: a redirect url for the callback :param state: an optional value to embed in the OAuth request. Use this if you want to pass around application state (e.g. CSRF tokens). :param kwargs: add optional key/value pairs to the query string
379,166
def flush(self): messages = [] while self._packets: p = self._packets.popleft() try: msg = decode(p) except ProtocolError: self._messages = messages raise messages.append(msg) return messages
flush() -> List of decoded messages. Decodes the packets in the internal buffer. This enables the continuation of the processing of received packets after a :exc:`ProtocolError` has been handled. :return: A (possibly empty) list of decoded messages from the buffered packets. :rtype: list(bytes) :raises ProtocolError: An invalid byte sequence has been detected.
379,167
def Close(self): if not self._connection: raise RuntimeError() self._connection.commit() self._connection.close() self._connection = None self._cursor = None self.filename = None self.read_only = None
Closes the database file. Raises: RuntimeError: if the database is not opened.
379,168
def graphviz_parser(preprocessor, tag, markup): m = DOT_BLOCK_RE.search(markup) if m: code = m.group() program = m.group().strip() output = run_graphviz(program, code) return % base64.b64encode(output).decode() else: raise ValueError( .format(SYNTAX))
Simple Graphviz parser
379,169
def _set_minimum_links(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={: []}, int_size=32), restriction_dict={: [u]}), default=RestrictedClassType(base_type=long, restriction_dict={: []}, int_size=32)(1), is_leaf=True, yang_name="minimum-links", rest_name="minimum-links", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "uint32", : , }) self.__minimum_links = t if hasattr(self, ): self._set()
Setter method for minimum_links, mapped from YANG variable /interface/port_channel/minimum_links (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_minimum_links is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_minimum_links() directly. YANG Description: The least number of operationally 'UP' links to indicate port-channel being UP.
379,170
def disconnect(self, si, logger, vcenter_data_model, vm_uuid, network_name=None, vm=None): logger.debug("Disconnect Interface VM: Network: ...".format(vm_uuid, network_name or "ALL")) if vm is None: vm = self.pyvmomi_service.find_by_uuid(si, vm_uuid) if not vm: return "Warning: failed to locate vm {0} in vCenter".format(vm_uuid) if network_name: network = self.pyvmomi_service.vm_get_network_by_name(vm, network_name) if network is None: raise KeyError(.format(network_name)) else: network = None network_full_name = VMLocation.combine( [vcenter_data_model.default_datacenter, vcenter_data_model.holding_network]) default_network = self.pyvmomi_service.get_network_by_full_name(si, network_full_name) if network: return self.port_group_configurer.disconnect_network(vm, network, default_network, vcenter_data_model.reserved_networks, logger=logger) else: return self.port_group_configurer.disconnect_all_networks(vm, default_network, vcenter_data_model.reserved_networks, logger=logger)
disconnect network adapter of the vm. If 'network_name' = None - disconnect ALL interfaces :param <str> si: :param logger: :param VMwarevCenterResourceModel vcenter_data_model: :param <str> vm_uuid: the uuid of the vm :param <str | None> network_name: the name of the specific network to disconnect :param <pyvmomi vm object> vm: If the vm obj is None will use vm_uuid to fetch the object :return: Started Task
379,171
def traverse_pagination(response, endpoint, content_filter_query, query_params): results = response.get(, []) page = 1 while response.get(): page += 1 response = endpoint().post(content_filter_query, **dict(query_params, page=page)) results += response.get(, []) return results
Traverse a paginated API response and extracts and concatenates "results" returned by API. Arguments: response (dict): API response object. endpoint (Slumber.Resource): API endpoint object. content_filter_query (dict): query parameters used to filter catalog results. query_params (dict): query parameters used to paginate results. Returns: list: all the results returned by the API.
379,172
def configure_api(app): from heman.api.empowering import resources as empowering_resources from heman.api.cch import resources as cch_resources from heman.api.form import resources as form_resources from heman.api import ApiCatchall for resource in empowering_resources: api.add_resource(*resource) for resource in cch_resources: api.add_resource(*resource) for resource in form_resources: api.add_resource(*resource) api.add_resource(ApiCatchall, ) api.init_app(app)
Configure API Endpoints.
379,173
def end_of_month(val): if type(val) == date: val = datetime.fromordinal(val.toordinal()) if val.month == 12: return start_of_month(val).replace(year=val.year + 1, month=1) \ - timedelta(microseconds=1) else: return start_of_month(val).replace(month=val.month + 1) \ - timedelta(microseconds=1)
Return a new datetime.datetime object with values that represent a end of a month. :param val: Date to ... :type val: datetime.datetime | datetime.date :rtype: datetime.datetime
379,174
def _get_labels(self, y): y = np.asarray(y) assert y.ndim == 1 labels = np.unique(y).tolist() oh = np.zeros((y.size, len(labels)), dtype=float) for i, label in enumerate(y): oh[i, labels.index(label)] = 1. return oh
Construct pylearn2 dataset labels. Parameters ---------- y : array_like, optional Labels.
379,175
def decode(data_url): metadata, data = data_url.rsplit(, 1) _, metadata = metadata.split(, 1) parts = metadata.split() if parts[-1] == : data = b64decode(data) else: data = unquote(data) for part in parts: if part.startswith("charset="): data = data.decode(part[8:]) return data
Decode DataURL data
379,176
def http(self): if self._use_cached_http and hasattr(self._local, ): return self._local.http if self._http_replay is not None: http = self._http_replay else: http = _build_http() authorized_http = google_auth_httplib2.AuthorizedHttp( self._credentials, http=http) if self._use_cached_http: self._local.http = authorized_http return authorized_http
A thread local instance of httplib2.Http. Returns: httplib2.Http: An Http instance authorized by the credentials.
379,177
def os_version(self, value): if value == self._defaults[] and in self._values: del self._values[] else: self._values[] = value
The os_version property. Args: value (string). the property value.
379,178
def get_allowed_operations(resource, subresouce_path): uri = get_subresource_path_by(resource, subresouce_path) response = resource._conn.get(path=uri) return response.headers[]
Helper function to get the HTTP allowed methods. :param resource: ResourceBase instance from which the path is loaded. :param subresource_path: JSON field to fetch the value from. Either a string, or a list of strings in case of a nested field. :returns: A list of allowed HTTP methods.
379,179
def convert_to_dataset(obj, *, group="posterior", coords=None, dims=None): inference_data = convert_to_inference_data(obj, group=group, coords=coords, dims=dims) dataset = getattr(inference_data, group, None) if dataset is None: raise ValueError( "Can not extract {group} from {obj}! See {filename} for other " "conversion utilities.".format(group=group, obj=obj, filename=__file__) ) return dataset
Convert a supported object to an xarray dataset. This function is idempotent, in that it will return xarray.Dataset functions unchanged. Raises `ValueError` if the desired group can not be extracted. Note this goes through a DataInference object. See `convert_to_inference_data` for more details. Raises ValueError if it can not work out the desired conversion. Parameters ---------- obj : dict, str, np.ndarray, xr.Dataset, pystan fit, pymc3 trace A supported object to convert to InferenceData: InferenceData: returns unchanged str: Attempts to load the netcdf dataset from disk pystan fit: Automatically extracts data pymc3 trace: Automatically extracts data xarray.Dataset: adds to InferenceData as only group dict: creates an xarray dataset as the only group numpy array: creates an xarray dataset as the only group, gives the array an arbitrary name group : str If `obj` is a dict or numpy array, assigns the resulting xarray dataset to this group. coords : dict[str, iterable] A dictionary containing the values that are used as index. The key is the name of the dimension, the values are the index values. dims : dict[str, List(str)] A mapping from variables to a list of coordinate names for the variable Returns ------- xarray.Dataset
379,180
def thresholdForIdentity(identity, colors): for threshold, _ in colors: if identity >= threshold: return threshold raise ValueError()
Get the best identity threshold for a specific identity value. @param identity: A C{float} nucleotide identity. @param colors: A C{list} of (threshold, color) tuples, where threshold is a C{float} and color is a C{str} to be used as a cell background. This is as returned by C{parseColors}. @return: The first C{float} threshold that the given identity is at least as big as.
379,181
def datetime_to_time(date, time): if (255 in date) or (255 in time): raise RuntimeError("specific date and time required") time_tuple = ( date[0]+1900, date[1], date[2], time[0], time[1], time[2], 0, 0, -1, ) return _mktime(time_tuple)
Take the date and time 4-tuples and return the time in seconds since the epoch as a floating point number.
379,182
def del_character(self, name): self.query.del_character(name) self.del_graph(name) del self.character[name]
Remove the Character from the database entirely. This also deletes all its history. You'd better be sure.
379,183
def truncate_table(self, tablename): self.get(tablename).remove() self.db.commit()
SQLite3 doesn't support direct truncate, so we just use delete here
379,184
def lookup(parser, var, context, resolve=True, apply_filters=True): if resolve: try: return Variable(var).resolve(context) except VariableDoesNotExist: if apply_filters and var.find() > -1: return parser.compile_filter(var).resolve(context) return Constant(var) except TypeError: return var return var
Try to resolve the varialbe in a context If ``resolve`` is ``False``, only string variables are returned
379,185
def from_value(value): if isinstance(value, PagingParams): return value if isinstance(value, AnyValueMap): return PagingParams.from_map(value) map = AnyValueMap.from_value(value) return PagingParams.from_map(map)
Converts specified value into PagingParams. :param value: value to be converted :return: a newly created PagingParams.
379,186
def try_log_part(self, context=None, with_start_message=True): if context is None: context = {} self.__counter += 1 if time.time() - self.__begin_time > self.__part_log_time_seconds: self.__begin_time = time.time() context[] = self.__counter if self.__total: self.__percent_done = int(self.__counter * 100 / self.__total) context[] = self.__percent_done context[] = self.__total self.__log.info(msg=self.__log_message, context=context) return True elif self.__counter == 1: if with_start_message: self.__log.info(u"Начали цикл: " + self.__log_message) return True return False
Залогировать, если пришло время из part_log_time_minutes :return: boolean Возвращает True если лог был записан
379,187
def set_sample_weight(pipeline_steps, sample_weight=None): sample_weight_dict = {} if not isinstance(sample_weight, type(None)): for (pname, obj) in pipeline_steps: if inspect.getargspec(obj.fit).args.count(): step_sw = pname + sample_weight_dict[step_sw] = sample_weight if sample_weight_dict: return sample_weight_dict else: return None
Recursively iterates through all objects in the pipeline and sets sample weight. Parameters ---------- pipeline_steps: array-like List of (str, obj) tuples from a scikit-learn pipeline or related object sample_weight: array-like List of sample weight Returns ------- sample_weight_dict: A dictionary of sample_weight
379,188
def File(self, name, directory = None, create = 1): return self._lookup(name, directory, File, create)
Look up or create a File node with the specified name. If the name is a relative path (begins with ./, ../, or a file name), then it is looked up relative to the supplied directory node, or to the top level directory of the FS (supplied at construction time) if no directory is supplied. This method will raise TypeError if a directory is found at the specified path.
379,189
def mmatch(expr, delimiter, greedy, search_type, regex_match=False, exact_match=False, opts=None): greedygreedy if not opts: opts = __opts__ ckminions = salt.utils.minions.CkMinions(opts) return ckminions._check_cache_minions(expr, delimiter, greedy, search_type, regex_match=regex_match, exact_match=exact_match)
Helper function to search for minions in master caches If 'greedy' return accepted minions that matched by the condition or absent in the cache. If not 'greedy' return the only minions have cache data and matched by the condition.
379,190
def flavor_list(request): try: return api.nova.flavor_list(request) except Exception: exceptions.handle(request, _()) return []
Utility method to retrieve a list of flavors.
379,191
def getDetailInfo(self, CorpNum, ItemCode, MgtKey): if MgtKey == None or MgtKey == "": raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.") if ItemCode == None or ItemCode == "": raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.") return self._httpget( + str(ItemCode) + + MgtKey + , CorpNum)
전자명세서 상세정보 확인 args CorpNum : 팝빌회원 사업자번호 ItemCode : 명세서 종류 코드 [121 - 거래명세서], [122 - 청구서], [123 - 견적서], [124 - 발주서], [125 - 입금표], [126 - 영수증] MgtKey : 파트너 문서관리번호 return 문서 상세정보 object raise PopbillException
379,192
def find_file( self, folder_id, basename, limit = 500 ): .partN search_folder = self.client.folder( folder_id = folder_id ) offset = 0 search_items = search_folder.get_items( limit = limit, offset = offset ) found_files = [] while len(search_items) > 0: files = [ (f[], f[]) for f in search_items if f[].startswith( basename ) and f[] == ] files.sort() for f_id, f_name in files: assert( f_name == basename or ( f_name.startswith( basename ) and f_name[len(basename):len(basename)+5] == ) ) found_files.extend( files ) offset += limit search_items = search_folder.get_items( limit = limit, offset = offset ) return [f[0] for f in found_files]
Finds a file based on a box path Returns a list of file IDs Returns multiple file IDs if the file was split into parts with the extension '.partN' (where N is an integer)
379,193
def fetch_session_start_times(data_dir, pivot, session_dates): session_start_times = SessionStartTimesDataset() df = session_start_times.fetch(pivot, session_dates) save_to_csv(df, data_dir, "session-start-times") log.info("Dates requested:", len(session_dates)) found = pd.to_datetime(df[], format="%Y-%m-%d %H:%M:%S").dt.date.unique() log.info("Dates found:", len(found)) return df
:param data_dir: (str) directory in which the output file will be saved :param pivot: (int) congressperson document to use as a pivot for scraping the data :param session_dates: (list) datetime objects to fetch the start times for
379,194
def generate_sinusoidal_lightcurve( times, mags=None, errs=None, paramdists={ :sps.uniform(loc=0.04,scale=500.0), :[2,10], :sps.uniform(loc=0.1,scale=0.9), :0.0, }, magsarefluxes=False ): periodfourierorderamplitudephioffsetfrozenvartypesinusoidalparamsperiodepochamplitudefourierorderfourierampsfourierphasestimesmagserrsvarperiodperiodvaramplitudeamplitude if mags is None: mags = np.full_like(times, 0.0) if errs is None: errs = np.full_like(times, 0.0) epoch = npr.random()*(times.max() - times.min()) + times.min() period = paramdists[].rvs(size=1) fourierorder = npr.randint(paramdists[][0], high=paramdists[][1]) amplitude = paramdists[].rvs(size=1) if magsarefluxes and amplitude < 0.0: amplitude = -amplitude elif not magsarefluxes and amplitude > 0.0: amplitude = -amplitude ampcomps = [abs(amplitude/2.0)/float(x) for x in range(1,fourierorder+1)] phacomps = [paramdists[]*float(x) for x in range(1,fourierorder+1)] modelmags, phase, ptimes, pmags, perrs = sinusoidal.sine_series_sum( [period, epoch, ampcomps, phacomps], times, mags, errs ) timeind = np.argsort(ptimes) mtimes = ptimes[timeind] mmags = modelmags[timeind] merrs = perrs[timeind] mphase = phase[timeind] modeldict = { :, :{x:y for x,y in zip([, , , , , ], [period, epoch, amplitude, fourierorder, ampcomps, phacomps])}, :mtimes, :mmags, :merrs, :mphase, :period, :amplitude } return modeldict
This generates fake sinusoidal light curves. This can be used for a variety of sinusoidal variables, e.g. RRab, RRc, Cepheids, Miras, etc. The functions that generate these model LCs below implement the following table:: ## FOURIER PARAMS FOR SINUSOIDAL VARIABLES # # type fourier period [days] # order dist limits dist # RRab 8 to 10 uniform 0.45--0.80 uniform # RRc 3 to 6 uniform 0.10--0.40 uniform # HADS 7 to 9 uniform 0.04--0.10 uniform # rotator 2 to 5 uniform 0.80--120.0 uniform # LPV 2 to 5 uniform 250--500.0 uniform FIXME: for better model LCs, figure out how scipy.signal.butter works and low-pass filter using scipy.signal.filtfilt. Parameters ---------- times : np.array This is an array of time values that will be used as the time base. mags,errs : np.array These arrays will have the model added to them. If either is None, `np.full_like(times, 0.0)` will used as a substitute and the model light curve will be centered around 0.0. paramdists : dict This is a dict containing parameter distributions to use for the model params, containing the following keys :: {'period', 'fourierorder', 'amplitude', 'phioffset'} The values of these keys should all be 'frozen' scipy.stats distribution objects, e.g.: https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions The variability epoch will be automatically chosen from a uniform distribution between `times.min()` and `times.max()`. The `amplitude` will be flipped automatically as appropriate if `magsarefluxes=True`. magsarefluxes : bool If the generated time series is meant to be a flux time-series, set this to True to get the correct sign of variability amplitude. Returns ------- dict A dict of the form below is returned:: {'vartype': 'sinusoidal', 'params': {'period': generated value of period, 'epoch': generated value of epoch, 'amplitude': generated value of amplitude, 'fourierorder': generated value of fourier order, 'fourieramps': generated values of fourier amplitudes, 'fourierphases': generated values of fourier phases}, 'times': the model times, 'mags': the model mags, 'errs': the model errs, 'varperiod': the generated period of variability == 'period' 'varamplitude': the generated amplitude of variability == 'amplitude'}
379,195
def get_clamav_conf(filename): if os.path.isfile(filename): return ClamavConfig(filename) log.warn(LOG_PLUGIN, "No ClamAV config file found at %r.", filename)
Initialize clamav configuration.
379,196
def waitForSlotEvent(self, flags=0): tmp = 0 (rv, slot) = self.lib.C_WaitForSlotEvent(flags, tmp) if rv != CKR_OK: raise PyKCS11Error(rv) return slot
C_WaitForSlotEvent :param flags: 0 (default) or `CKF_DONT_BLOCK` :type flags: integer :return: slot :rtype: integer
379,197
def unique_id(self): chain = self.parent.parent.id residue = self.parent.id return chain, residue, self.id
Creates a unique ID for the `Atom` based on its parents. Returns ------- unique_id : (str, str, str) (polymer.id, residue.id, atom.id)
379,198
def parse_ical(vcal): vcal = vcal.replace(, ).replace(, ) vevents = vcal.split() del(vevents[0]) events = [] for vevent in vevents: event = {} for line in vevent.split(): line = line.split(, 1) key = line[0].lower() if len(line) <= 1 or key == : continue if key.startswith(): event[key] = unix_ts(dateutil.parser.parse(line[1])) continue if not key.startswith(): event[key] = line[1] continue event[] = event.get(, []) attachment = {} for x in [x.split() for x in line[0].split()]: if x[0].lower() in [, ]: attachment[x[0].lower()] = x[1] attachment[] = b64decode(line[1]).decode() event[].append(attachment) events.append(event) return events
Parse Opencast schedule iCalendar file and return events as dict
379,199
def stage_http_request(self, conn_id, version, url, target, method, headers, payload): self._http_request_version = version self._http_request_conn_id = conn_id self._http_request_url = url self._http_request_target = target self._http_request_method = method self._http_request_headers = headers self._http_request_payload = payload
Set request HTTP information including url, headers, etc.