Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
15,200
def deserialize(self, data, columns=None): if not data: return pd.DataFrame() meta = data[0][METADATA] if isinstance(data, list) else data[METADATA] index = INDEX in meta if columns: if index: columns = columns[:] columns.extend(meta[INDEX]) if len(columns) > len(set(columns)): raise Exception("Duplicate columns specified, cannot de-serialize") if not isinstance(data, list): df = self.converter.objify(data, columns) else: df = pd.concat([self.converter.objify(d, columns) for d in data], ignore_index=not index) if index: df = df.set_index(meta[INDEX]) if meta[TYPE] == : return df[df.columns[0]] return df
Deserializes SON to a DataFrame Parameters ---------- data: SON data columns: None, or list of strings optionally you can deserialize a subset of the data in the SON. Index columns are ALWAYS deserialized, and should not be specified Returns ------- pandas dataframe or series
15,201
def __buildDomainRanges(self, aProp): domains = chain(aProp.rdflib_graph.objects( None, rdflib.term.URIRef(u)), aProp.rdflib_graph.objects( None, rdflib.RDFS.domain)) ranges = chain(aProp.rdflib_graph.objects( None, rdflib.term.URIRef(u)), aProp.rdflib_graph.objects( None, rdflib.RDFS.range)) for x in domains: if isBlankNode(x): aProp.domains += [RDF_Entity(x, None, self.namespaces, is_Bnode=True)] else: aClass = self.get_class(uri=str(x)) if aClass: aProp.domains += [aClass] aClass.domain_of += [aProp] else: aProp.ranges += [OntoClass(x, None, self.namespaces, ext_model=True)]
extract domain/range details and add to Python objects
15,202
def force_bytes(s, encoding=, errors=): if isinstance(s, bytes): if encoding == : return s else: return s.decode(, errors).encode(encoding, errors) else: return s.encode(encoding, errors)
A function turns "s" into bytes object, similar to django.utils.encoding.force_bytes
15,203
def schema(ctx, schema): data = yaml.load(schema) if not isinstance(data, (list, tuple)): data = [data] with click.progressbar(data, label=schema.name) as bar: for schema in bar: ctx.obj[].schemata.upsert(schema)
Load schema definitions from a YAML file.
15,204
def check_profile_id(self, profile_name: str) -> Profile: profile = None with suppress(ProfileNotExistsException): profile = Profile.from_username(self.context, profile_name) profile_exists = profile is not None id_filename = self._get_id_filename(profile_name) try: with open(id_filename, ) as id_file: profile_id = int(id_file.read()) if (not profile_exists) or \ (profile_id != profile.userid): if profile_exists: self.context.log("Profile {0} does not match the stored unique ID {1}.".format(profile_name, profile_id)) else: self.context.log("Trying to find profile {0} using its unique ID {1}.".format(profile_name, profile_id)) profile_from_id = Profile.from_id(self.context, profile_id) newname = profile_from_id.username self.context.log("Profile {0} has changed its name to {1}.".format(profile_name, newname)) if ((format_string_contains_key(self.dirname_pattern, ) or format_string_contains_key(self.dirname_pattern, ))): os.rename(self.dirname_pattern.format(profile=profile_name.lower(), target=profile_name.lower()), self.dirname_pattern.format(profile=newname.lower(), target=newname.lower())) else: os.rename(.format(self.dirname_pattern.format(), profile_name.lower()), .format(self.dirname_pattern.format(), newname.lower())) return profile_from_id return profile except (FileNotFoundError, ValueError): pass if profile_exists: self.save_profile_id(profile) return profile raise ProfileNotExistsException("Profile {0} does not exist.".format(profile_name))
Consult locally stored ID of profile with given name, check whether ID matches and whether name has changed and return current name of the profile, and store ID of profile. :param profile_name: Profile name :return: Instance of current profile
15,205
def copyPropList(self, cur): if cur is None: cur__o = None else: cur__o = cur._o ret = libxml2mod.xmlCopyPropList(self._o, cur__o) if ret is None:raise treeError() __tmp = xmlAttr(_obj=ret) return __tmp
Do a copy of an attribute list.
15,206
def associate_failure_node(self, parent, child=None, **kwargs): return self._assoc_or_create(, parent, child, **kwargs)
Add a node to run on failure. =====API DOCS===== Add a node to run on failure. :param parent: Primary key of parent node to associate failure node to. :type parent: int :param child: Primary key of child node to be associated. :type child: int :param `**kwargs`: Fields used to create child node if ``child`` is not provided. :returns: Dictionary of only one key "changed", which indicates whether the association succeeded. :rtype: dict =====API DOCS=====
15,207
def spl_json(self): _splj = {} _splj["type"] = self._type _splj["value"] = self._value return _splj
Private method. May be removed at any time.
15,208
def delete_dependency(self, from_task_name, to_task_name): logger.debug(.format(from_task_name, to_task_name)) if not self.state.allow_change_graph: raise DagobahError("job's graph is immutable in its current state: %s" % self.state.status) self.delete_edge(from_task_name, to_task_name) self.commit()
Delete a dependency between two tasks.
15,209
def cached_value(self, source_file, configuration): key = self._create_cache_key(source_file) entry = self.__index.get(key) if entry is None: return None configsig = self._create_config_signature(configuration) if configsig != entry.configsig: return None for id_, sig in entry.filesigs: if self.__filename_rep.is_file_modified(id_, sig): return None cachefilename = self._create_cache_filename(source_file) decls = self._read_file(cachefilename) return decls
Return the cached declarations or None. :param source_file: Header file name :type source_file: str :param configuration: Configuration object :type configuration: :class:`parser.xml_generator_configuration_t` :rtype: Cached declarations or None
15,210
def restore(self, fade=False): if self.is_coordinator: self.device.stop()
Restore the state of a device to that which was previously saved. For coordinator devices restore everything. For slave devices only restore volume etc., not transport info (transport info comes from the slave's coordinator). Args: fade (bool): Whether volume should be faded up on restore.
15,211
def decode_chain_list(in_bytes): tot_strings = len(in_bytes) // mmtf.utils.constants.CHAIN_LEN out_strings = [] for i in range(tot_strings): out_s = in_bytes[i * mmtf.utils.constants.CHAIN_LEN:i * mmtf.utils.constants.CHAIN_LEN + mmtf.utils.constants.CHAIN_LEN] out_strings.append(out_s.decode("ascii").strip(mmtf.utils.constants.NULL_BYTE)) return out_strings
Convert a list of bytes to a list of strings. Each string is of length mmtf.CHAIN_LEN :param in_bytes: the input bytes :return the decoded list of strings
15,212
def fit_model(ts, sc=None): assert sc != None, "Missing SparkContext" jvm = sc._jvm jmodel = jvm.com.cloudera.sparkts.models.ARGARCH.fitModel(_py2java(sc, Vectors.dense(ts))) return ARGARCHModel(jmodel=jmodel, sc=sc)
Fits an AR(1) + GARCH(1, 1) model to the given time series. Parameters ---------- ts: the time series to which we want to fit a AR+GARCH model as a Numpy array Returns an ARGARCH model
15,213
def create_producer(self): with self.connection_pool.acquire(block=True) as conn: yield self.producer(conn)
Context manager that yields an instance of ``Producer``.
15,214
def install_vendored(cls, prefix, root=None, expose=None): from pex import vendor root = cls._abs_root(root) vendored_path_items = [spec.relpath for spec in vendor.iter_vendor_specs()] installed = list(cls._iter_installed_vendor_importers(prefix, root, vendored_path_items)) assert len(installed) <= 1, ( .format(.join(map(str, installed))) ) if installed: vendor_importer = installed[0] else: vendor_importer = cls.install(uninstallable=True, prefix=prefix, path_items=vendored_path_items, root=root) if expose: exposed_paths = [] for path in cls.expose(expose, root): sys.path.insert(0, path) exposed_paths.append(os.path.relpath(path, root)) vendor_importer._expose(exposed_paths)
Install an importer for all vendored code with the given import prefix. All distributions listed in ``expose`` will also be made available for import in direct, un-prefixed form. :param str prefix: The import prefix the installed importer will be responsible for. :param str root: The root path of the distribution containing the vendored code. NB: This is the the path to the pex code, which serves as the root under which code is vendored at ``pex/vendor/_vendored``. :param expose: Optional names of distributions to expose for direct, un-prefixed import. :type expose: list of str :raise: :class:`ValueError` if any distributions to expose cannot be found.
15,215
def reference_preprocessing(job, config): job.fileStore.logToMaster() genome_id = config.genome_fasta if getattr(config, , None) is None: config.genome_fai = job.addChildJobFn(run_samtools_faidx, genome_id, cores=config.cores).rv() if getattr(config, , None) is None: config.genome_dict = job.addChildJobFn(run_picard_create_sequence_dictionary, genome_id, cores=config.cores, memory=config.xmx).rv() return config
Creates a genome fasta index and sequence dictionary file if not already present in the pipeline config. :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Pipeline configuration options and shared files. Requires FileStoreID for genome fasta file as config.genome_fasta :return: Updated config with reference index files :rtype: Namespace
15,216
def remove(name, path): ret = {: name, : path, : True, : {}, : } isinstalled = __salt__[](name, path) if isinstalled: if __opts__[]: ret[] = ( .format(name)) ret[] = None return ret __salt__[](name, path) current = __salt__[](name) if current: ret[] = True ret[] = ( ).format(name, current) ret[] = {: current} return ret ret[] = .format(name) ret[] = {} return ret current = __salt__[](name) if current: ret[] = True ret[] = ( s default path {1}resultcommentAlternative for {0} doesn\ ).format(name) return ret
Removes installed alternative for defined <name> and <path> or fallback to default alternative, if some defined before. name is the master name for this link group (e.g. pager) path is the location of one of the alternative target files. (e.g. /usr/bin/less)
15,217
def _call_function(name, returner=None, **kwargs): argspec = salt.utils.args.get_function_argspec(__salt__[name]) func_kw = dict(zip(argspec.args[-len(argspec.defaults or []):], argspec.defaults or [])) func_args = argspec.args[:len(argspec.args or []) - len(argspec.defaults or [])] arg_type, kw_to_arg_type, na_type, kw_type = [], {}, {}, False for funcset in reversed(kwargs.get() or []): if not isinstance(funcset, dict): arg_type.append(funcset) else: for kwarg_key in six.iterkeys(funcset): if kwarg_key in func_args: kw_to_arg_type[kwarg_key] = funcset[kwarg_key] continue else: return mret
Calls a function from the specified module. :param name: :param kwargs: :return:
15,218
def show_history(self, status=None, nids=None, full_history=False, metadata=False): nrows, ncols = get_terminal_size() works_done = [] for task in self.iflat_tasks(status=status, nids=nids): work = task.work if work not in works_done: works_done.append(work) if work.history or full_history: cprint(make_banner(str(work), width=ncols, mark="="), **work.status.color_opts) print(work.history.to_string(metadata=metadata)) if task.history or full_history: cprint(make_banner(str(task), width=ncols, mark="="), **task.status.color_opts) print(task.history.to_string(metadata=metadata)) if self.history or full_history: cprint(make_banner(str(self), width=ncols, mark="="), **self.status.color_opts) print(self.history.to_string(metadata=metadata))
Print the history of the flow to stdout. Args: status: if not None, only the tasks with this status are select full_history: Print full info set, including nodes with an empty history. nids: optional list of node identifiers used to filter the tasks. metadata: print history metadata (experimental)
15,219
def unpack_response(file_information_class, buffer): structs = smbprotocol.query_info resp_structure = { FileInformationClass.FILE_DIRECTORY_INFORMATION: structs.FileDirectoryInformation, FileInformationClass.FILE_NAMES_INFORMATION: structs.FileNamesInformation, FileInformationClass.FILE_BOTH_DIRECTORY_INFORMATION: structs.FileBothDirectoryInformation, FileInformationClass.FILE_ID_BOTH_DIRECTORY_INFORMATION: structs.FileIdBothDirectoryInformation, FileInformationClass.FILE_FULL_DIRECTORY_INFORMATION: structs.FileFullDirectoryInformation, FileInformationClass.FILE_ID_FULL_DIRECTORY_INFORMATION: structs.FileIdFullDirectoryInformation, }[file_information_class] query_results = [] current_offset = 0 is_next = True while is_next: result = resp_structure() result.unpack(buffer[current_offset:]) query_results.append(result) current_offset += result[].get_value() is_next = result[].get_value() != 0 return query_results
Pass in the buffer value from the response object to unpack it and return a list of query response structures for the request. :param buffer: The raw bytes value of the SMB2QueryDirectoryResponse buffer field. :return: List of query_info.* structures based on the FileInformationClass used in the initial query request.
15,220
def _float(text): text = text.strip() if text[0] in (, ): text = "%s.%s" % (text[0], text[1:]) else: text = "+.%s" % text if "+" in text[1:] or "-" in text[1:]: value, exp_sign, expo = text.rpartition() if in text[1:] else text.rpartition() v = float(.format(value=value, exp_sign=exp_sign, expo=expo)) else: v = float(text) return v
Fonction to convert the 'decimal point assumed' format of TLE to actual float >>> _float('0000+0') 0.0 >>> _float('+0000+0') 0.0 >>> _float('34473-3') 0.00034473 >>> _float('-60129-4') -6.0129e-05 >>> _float('+45871-4') 4.5871e-05
15,221
def sym(self, nested_scope=None): operation = self.children[0].operation() expr = self.children[1].sym(nested_scope) return operation(expr)
Return the correspond symbolic number.
15,222
def dns_get_conf(self, domainName, environment): response = self.client.service.dns_get_conf(domainName, environment) dns_config = CotendoDNS(response) return dns_config
Returns the existing domain configuration and token from the ADNS
15,223
def validate(self, value, model=None, context=None): regex = self.regex() match = regex.match(value) if not match: return Error(self.not_email) return Error()
Validate Perform value validation and return result :param value: value to check :param model: parent model being validated :param context: object or None, validation context :return: shiftschema.results.SimpleResult
15,224
def setProduct(self, cache=False, *args, **kwargs): if cache: try: prods = self.allmambuproductsclass(*args, **kwargs) except AttributeError as ae: from .mambuproduct import AllMambuProducts self.allmambuproductsclass = AllMambuProducts prods = self.allmambuproductsclass(*args, **kwargs) for prod in prods: if prod[] == self[]: self[] = prod try: prods.noinit except AttributeError: return 1 return 0 try: product = self.mambuproductclass(entid=self[], *args, **kwargs) except AttributeError as ae: from .mambuproduct import MambuProduct self.mambuproductclass = MambuProduct product = self.mambuproductclass(entid=self[], *args, **kwargs) self[] = product return 1
Adds the product for this loan to a 'product' field. Product is a MambuProduct object. cache argument allows to use AllMambuProducts singleton to retrieve the products. See mambuproduct.AllMambuProducts code and pydoc for further information. Returns the number of requests done to Mambu.
15,225
def _edit_tags(self, tag, items, locked=True, remove=False): if not isinstance(items, list): items = [items] value = getattr(self, tag + ) existing_cols = [t.tag for t in value if t and remove is False] d = tag_helper(tag, existing_cols + items, locked, remove) self.edit(**d) self.refresh()
Helper to edit and refresh a tags. Parameters: tag (str): tag name items (list): list of tags to add locked (bool): lock this field. remove (bool): If this is active remove the tags in items.
15,226
def add_edge(self, source, target): edge = Edge(len(self.edges)) self.edges.append(edge) source.out_edges.append(edge.idx) target.in_edges.append(edge.idx) edge.source = source.idx edge.target = target.idx return edge
Returns a new edge connecting source and target vertices. Args: source: The source Vertex. target: The target Vertex. Returns: A new Edge linking source to target.
15,227
def encrypt(api_context, request_bytes, custom_headers): key = Random.get_random_bytes(_AES_KEY_SIZE) iv = Random.get_random_bytes(_BLOCK_SIZE) _add_header_client_encryption_key(api_context, key, custom_headers) _add_header_client_encryption_iv(iv, custom_headers) request_bytes = _encrypt_request_bytes(request_bytes, key, iv) _add_header_client_encryption_hmac(request_bytes, key, iv, custom_headers) return request_bytes
:type api_context: bunq.sdk.context.ApiContext :type request_bytes: bytes :type custom_headers: dict[str, str] :rtype: bytes
15,228
def absorb(self, trits, offset=0, length=None): pad = ((len(trits) % HASH_LENGTH) or HASH_LENGTH) trits += [0] * (HASH_LENGTH - pad) if length is None: length = len(trits) if length < 1: raise with_context( exc=ValueError(), context={ : trits, : offset, : length, }, ) while offset < length: start = offset stop = min(start + HASH_LENGTH, length) self._state[0:stop - start] = trits[start:stop] self._transform() offset += HASH_LENGTH
Absorb trits into the sponge. :param trits: Sequence of trits to absorb. :param offset: Starting offset in ``trits``. :param length: Number of trits to absorb. Defaults to ``len(trits)``.
15,229
def check_api_key(request, key, hproPk): if settings.PIAPI_STANDALONE: return True (_, _, hproject) = getPlugItObject(hproPk) if not hproject: return False if hproject.plugItApiKey is None or hproject.plugItApiKey == : return False return hproject.plugItApiKey == key
Check if an API key is valid
15,230
def create(self, project, title, href, **attrs): attrs.update({: project, : title, : href}) return self._new_resource(payload=attrs)
Create a new :class:`WikiLink` :param project: :class:`Project` id :param title: title of the wiki link :param href: href for the wiki link :param attrs: optional attributes for the :class:`WikiLink`
15,231
def s2p(self): M_P = 7.28897050 f = lambda parent, daugther: -parent + daugther + 2 * M_P return self.derived(, (-2, 0), f)
Return 2 proton separation energy
15,232
def convertDict2Attrs(self, *args, **kwargs): for n,l in enumerate(self.attrs): try: params = self.params except AttributeError as aerr: params = {} kwargs.update(params) try: loan = self.mambuloanclass(urlfunc=None, entid=None, *args, **kwargs) except AttributeError as ae: self.mambuloanclass = self.itemclass loan = self.mambuloanclass(urlfunc=None, entid=None, *args, **kwargs) loan.init(l, *args, **kwargs) self.attrs[n] = loan
The trick for iterable Mambu Objects comes here: You iterate over each element of the responded List from Mambu, and create a Mambu Loan (or your own itemclass) object for each one, initializing them one at a time, and changing the attrs attribute (which just holds a list of plain dictionaries) with a MambuLoan (or your own itemclass) just created. .. todo:: pass a valid (perhaps default) urlfunc, and its corresponding id to entid to each itemclass, telling MambuStruct not to connect() by default. It's desirable to connect at any other further moment to refresh some element in the list.
15,233
def intersect(self, other): if not isinstance(other, self.__class__): m = "You can only intersect striplogs with each other." raise StriplogError(m) result = [] for iv in self: for jv in other: try: result.append(iv.intersect(jv)) except IntervalError: pass return Striplog(result)
Makes a striplog of all intersections. Args: Striplog. The striplog instance to intersect with. Returns: Striplog. The result of the intersection.
15,234
def fetchMore(self, index): sourceModel = self.sourceModel() if not sourceModel: return False return sourceModel.fetchMore(self.mapToSource(index))
Fetch additional data under *index*.
15,235
def parse_response(response): if response.headers.get(, JSON_TYPE).startswith(JSON_TYPE): return ResponseObject(json.loads(response.body)) else: return response.body
parse response and return a dictionary if the content type. is json/application. :param response: HTTPRequest :return dictionary for json content type otherwise response body
15,236
def rm_r(sftp, path): files = sftp.listdir(path) for f in files: filepath = os.path.join(path, f) logger.info( % (filepath)) try: sftp.remove(filepath) except IOError: rm_r(sftp, filepath)
Recursively delete contents of path https://stackoverflow.com/a/23256181
15,237
def shell(command, *args): if args: command = command.format(*args) print LOCALE[].format(command) try: return subprocess.check_output(command, shell=True) except subprocess.CalledProcessError, ex: return ex
Pass a command into the shell.
15,238
def get_typecast(self): midx, marker = self.token_next_by(m=(T.Punctuation, )) nidx, next_ = self.token_next(midx, skip_ws=False) return next_.value if next_ else None
Returns the typecast or ``None`` of this object as a string.
15,239
def cumprod_to_tensor_axis(self, cumprod): try: return len(self) - 1 - self.cumprod[::-1].index(cumprod) except ValueError: return None
Maximum tensor axis i such that self.cumprod[i] == cumprod, or None.
15,240
def get_file_info(hash, context=None): if context is None: stmt = _get_sql() args = dict(hash=hash) else: stmt = _get_sql() id, version = get_id_n_version(context) args = dict(hash=hash, id=id, version=version) with db_connect() as db_conn: with db_conn.cursor() as cursor: cursor.execute(stmt, args) try: filename, media_type = cursor.fetchone() except TypeError: raise FileNotFound(hash) return filename, media_type
Returns information about the file, identified by ``hash``. If the `context` (an ident-hash) is supplied, the information returned will be specific to that context.
15,241
def load_xml(self, xmlfile, **kwargs): extdir = kwargs.get(, self.extdir) coordsys = kwargs.get(, ) if not os.path.isfile(xmlfile): xmlfile = os.path.join(fermipy.PACKAGE_DATA, , xmlfile) root = ElementTree.ElementTree(file=xmlfile).getroot() diffuse_srcs = [] srcs = [] ra, dec = [], [] for s in root.findall(): src = Source.create_from_xml(s, extdir=extdir) if src.diffuse: diffuse_srcs += [src] else: srcs += [src] ra += [src[]] dec += [src[]] src_skydir = SkyCoord(ra=np.array(ra) * u.deg, dec=np.array(dec) * u.deg) radec = np.vstack((src_skydir.ra.deg, src_skydir.dec.deg)).T glonlat = np.vstack((src_skydir.galactic.l.deg, src_skydir.galactic.b.deg)).T offset = self.skydir.separation(src_skydir).deg offset_cel = wcs_utils.sky_to_offset(self.skydir, radec[:, 0], radec[:, 1], ) offset_gal = wcs_utils.sky_to_offset(self.skydir, glonlat[:, 0], glonlat[:, 1], ) m0 = get_skydir_distance_mask(src_skydir, self.skydir, self.config[]) m1 = get_skydir_distance_mask(src_skydir, self.skydir, self.config[], square=True, coordsys=coordsys) m = (m0 & m1) srcs = np.array(srcs)[m] for i, s in enumerate(srcs): s.data[] = offset[m][i] s.data[] = offset_cel[:, 0][m][i] s.data[] = offset_cel[:, 1][m][i] s.data[] = offset_gal[:, 0][m][i] s.data[] = offset_gal[:, 1][m][i] self.load_source(s, False, merge_sources=self.config[]) for i, s in enumerate(diffuse_srcs): self.load_source(s, False, merge_sources=self.config[]) self._build_src_index() return srcs
Load sources from an XML file.
15,242
def build_kernel_to_data(self, Y, knn=None, bandwidth=None, bandwidth_scale=None): if knn is None: knn = self.knn if bandwidth is None: bandwidth = self.bandwidth if bandwidth_scale is None: bandwidth_scale = self.bandwidth_scale if knn > self.data.shape[0]: warnings.warn("Cannot set knn ({k}) to be greater than " "n_samples ({n}). Setting knn={n}".format( k=knn, n=self.data.shape[0])) Y = self._check_extension_shape(Y) tasklogger.log_start("KNN search") if self.decay is None or self.thresh == 1: K = self.knn_tree.kneighbors_graph( Y, n_neighbors=knn, mode=) tasklogger.log_complete("KNN search") else: knn_tree = self.knn_tree search_knn = min(knn * 20, self.data_nu.shape[0]) distances, indices = knn_tree.kneighbors( Y, n_neighbors=search_knn) self._check_duplicates(distances, indices) tasklogger.log_complete("KNN search") tasklogger.log_start("affinities") if bandwidth is None: bandwidth = distances[:, knn - 1] bandwidth = bandwidth * bandwidth_scale radius = bandwidth * np.power(-1 * np.log(self.thresh), 1 / self.decay) update_idx = np.argwhere( np.max(distances, axis=1) < radius).reshape(-1) tasklogger.log_debug("search_knn = {}; {} remaining".format( search_knn, len(update_idx))) if len(update_idx) > 0: distances = [d for d in distances] indices = [i for i in indices] while len(update_idx) > Y.shape[0] // 10 and \ search_knn < self.data_nu.shape[0] / 2: search_knn = min(search_knn * 20, self.data_nu.shape[0]) dist_new, ind_new = knn_tree.kneighbors( Y[update_idx], n_neighbors=search_knn) for i, idx in enumerate(update_idx): distances[idx] = dist_new[i] indices[idx] = ind_new[i] update_idx = [i for i, d in enumerate(distances) if np.max(d) < (radius if isinstance(bandwidth, numbers.Number) else radius[i])] tasklogger.log_debug("search_knn = {}; {} remaining".format( search_knn, len(update_idx))) if search_knn > self.data_nu.shape[0] / 2: knn_tree = NearestNeighbors( search_knn, algorithm=, n_jobs=self.n_jobs).fit(self.data_nu) if len(update_idx) > 0: tasklogger.log_debug( "radius search on {}".format(len(update_idx))) dist_new, ind_new = knn_tree.radius_neighbors( Y[update_idx, :], radius=radius if isinstance(bandwidth, numbers.Number) else np.max(radius[update_idx])) for i, idx in enumerate(update_idx): distances[idx] = dist_new[i] indices[idx] = ind_new[i] if isinstance(bandwidth, numbers.Number): data = np.concatenate(distances) / bandwidth else: data = np.concatenate([distances[i] / bandwidth[i] for i in range(len(distances))]) indices = np.concatenate(indices) indptr = np.concatenate( [[0], np.cumsum([len(d) for d in distances])]) K = sparse.csr_matrix((data, indices, indptr), shape=(Y.shape[0], self.data_nu.shape[0])) K.data = np.exp(-1 * np.power(K.data, self.decay)) K.data = np.where(np.isnan(K.data), 1, K.data) K.data[K.data < self.thresh] = 0 K = K.tocoo() K.eliminate_zeros() K = K.tocsr() tasklogger.log_complete("affinities") return K
Build a kernel from new input data `Y` to the `self.data` Parameters ---------- Y: array-like, [n_samples_y, n_features] new data for which an affinity matrix is calculated to the existing data. `n_features` must match either the ambient or PCA dimensions knn : `int` or `None`, optional (default: `None`) If `None`, defaults to `self.knn` bandwidth : `float`, `callable`, or `None`, optional (default: `None`) If `None`, defaults to `self.bandwidth` bandwidth_scale : `float`, optional (default : `None`) Rescaling factor for bandwidth. If `None`, defaults to self.bandwidth_scale Returns ------- K_yx: array-like, [n_samples_y, n_samples] kernel matrix where each row represents affinities of a single sample in `Y` to all samples in `self.data`. Raises ------ ValueError: if the supplied data is the wrong shape
15,243
def isPartitionMarkedForEvent(self, db_name, tbl_name, part_vals, eventType): self.send_isPartitionMarkedForEvent(db_name, tbl_name, part_vals, eventType) return self.recv_isPartitionMarkedForEvent()
Parameters: - db_name - tbl_name - part_vals - eventType
15,244
def create_client_from_env(username=None, api_key=None, endpoint_url=None, timeout=None, auth=None, config_file=None, proxy=None, user_agent=None, transport=None, verify=True): settings = config.get_client_settings(username=username, api_key=api_key, endpoint_url=endpoint_url, timeout=timeout, proxy=proxy, verify=verify, config_file=config_file) if transport is None: url = settings.get() if url is not None and in url: transport = transports.RestTransport( endpoint_url=settings.get(), proxy=settings.get(), timeout=settings.get(), user_agent=user_agent, verify=verify, ) else: transport = transports.XmlRpcTransport( endpoint_url=settings.get(), proxy=settings.get(), timeout=settings.get(), user_agent=user_agent, verify=verify, ) ) return BaseClient(auth=auth, transport=transport)
Creates a SoftLayer API client using your environment. Settings are loaded via keyword arguments, environemtal variables and config file. :param username: an optional API username if you wish to bypass the package's built-in username :param api_key: an optional API key if you wish to bypass the package's built in API key :param endpoint_url: the API endpoint base URL you wish to connect to. Set this to API_PRIVATE_ENDPOINT to connect via SoftLayer's private network. :param proxy: proxy to be used to make API calls :param integer timeout: timeout for API requests :param auth: an object which responds to get_headers() to be inserted into the xml-rpc headers. Example: `BasicAuthentication` :param config_file: A path to a configuration file used to load settings :param user_agent: an optional User Agent to report when making API calls if you wish to bypass the packages built in User Agent string :param transport: An object that's callable with this signature: transport(SoftLayer.transports.Request) :param bool verify: decide to verify the server's SSL/TLS cert. DO NOT SET TO FALSE WITHOUT UNDERSTANDING THE IMPLICATIONS. Usage: >>> import SoftLayer >>> client = SoftLayer.create_client_from_env() >>> resp = client.call('Account', 'getObject') >>> resp['companyName'] 'Your Company'
15,245
def splice(self, mark, newdata): self.jump_to(mark) self._data = self._data[:self._offset] + bytearray(newdata)
Replace the data after the marked location with the specified data.
15,246
def _verify_run(out, cmd=None): if out.get(, 0) and out[]: if cmd: log.debug(%s\, cmd) log.debug(, out.get()) log.debug(, out.get(, )) raise CommandExecutionError(out[])
Crash to the log if command execution was not successful.
15,247
def boolmask(indices, maxval=None): if maxval is None: indices = list(indices) maxval = max(indices) + 1 mask = [False] * maxval for index in indices: mask[index] = True return mask
Constructs a list of booleans where an item is True if its position is in `indices` otherwise it is False. Args: indices (list): list of integer indices maxval (int): length of the returned list. If not specified this is inferred from `indices` Note: In the future the arg `maxval` may change its name to `shape` Returns: list: mask: list of booleans. mask[idx] is True if idx in indices Example: >>> import ubelt as ub >>> indices = [0, 1, 4] >>> mask = ub.boolmask(indices, maxval=6) >>> assert mask == [True, True, False, False, True, False] >>> mask = ub.boolmask(indices) >>> assert mask == [True, True, False, False, True]
15,248
def build_image_list(config, image, imagefile, all_local, include_allanchore, dockerfile=None, exclude_file=None): if not image and not (imagefile or all_local): raise click.BadOptionUsage() if image and imagefile: raise click.BadOptionUsage() filter_images = [] if exclude_file: with open(exclude_file) as f: for line in f.readlines(): filter_images.append(line.strip()) imagelist = {} if image: imagelist[image] = {:dockerfile} if imagefile: filelist = anchore_utils.read_kvfile_tolist(imagefile) for i in range(len(filelist)): l = filelist[i] imageId = l[0] try: dfile = l[1] except: dfile = None imagelist[imageId] = {:dfile} if all_local: docker_cli = contexts[] if docker_cli: for f in docker_cli.images(all=True, quiet=True, filters={: False}): if f not in imagelist and f not in filter_images: imagelist[f] = {:None} else: raise Exception("Could not load any images from local docker host - is docker running?") if include_allanchore: ret = contexts[].load_all_images().keys() if ret and len(ret) > 0: for l in list(set(imagelist.keys()) | set(ret)): imagelist[l] = {:None} for excluded in filter_images: docker_cli = contexts[] if not docker_cli: raise Exception("Could not query docker - is docker running?") for img in docker_cli.images(name=excluded, quiet=True): imagelist.pop(img, None) return imagelist
Given option inputs from the cli, construct a list of image ids. Includes all found with no exclusion logic
15,249
def get_bool(self, key, default=UndefinedKey): bool_conversions = { None: None, : True, : True, : True, : False, : False, : False } string_value = self.get_string(key, default) if string_value is not None: string_value = string_value.lower() try: return bool_conversions[string_value] except KeyError: raise ConfigException( u"{key} does not translate to a Boolean value".format(key=key))
Return boolean representation of value found at key :param key: key to use (dot separated). E.g., a.b.c :type key: basestring :param default: default value if key not found :type default: bool :return: boolean value :type return: bool
15,250
def get_n_cluster_per_event_hist(cluster_table): logging.info("Histogram number of cluster per event") cluster_in_events = analysis_utils.get_n_cluster_in_events(cluster_table)[:, 1] return np.histogram(cluster_in_events, bins=range(0, np.max(cluster_in_events) + 2))
Calculates the number of cluster in every event. Parameters ---------- cluster_table : pytables.table Returns ------- numpy.Histogram
15,251
def integer(self, x): if type(x) is str: hex = binascii.unhexlify(x) return int.from_bytes(hex, ) return x.value if isinstance(x, FiniteField.Value) else x
returns a plain integer
15,252
def get_profile(self, ann_el_demand_per_sector): return self.slp_frame.multiply(pd.Series( ann_el_demand_per_sector), axis=1).dropna(how=, axis=1) * 4
Get the profiles for the given annual demand Parameters ---------- ann_el_demand_per_sector : dictionary Key: sector, value: annual value Returns ------- pandas.DataFrame : Table with all profiles
15,253
def iob2json(input_data, n_sents=10, *args, **kwargs): docs = [] for group in minibatch(docs, n_sents): group = list(group) first = group.pop(0) to_extend = first["paragraphs"][0]["sentences"] for sent in group[1:]: to_extend.extend(sent["paragraphs"][0]["sentences"]) docs.append(first) return docs
Convert IOB files into JSON format for use with train cli.
15,254
def getmember(self, name): tarinfo = self._getmember(name) if tarinfo is None: raise KeyError("filename %r not found" % name) return tarinfo
Return a TarInfo object for member `name'. If `name' can not be found in the archive, KeyError is raised. If a member occurs more than once in the archive, its last occurrence is assumed to be the most up-to-date version.
15,255
def state(self, abbr: bool = False) -> str: return self.random.choice( self._data[][ if abbr else ])
Get a random administrative district of country. :param abbr: Return ISO 3166-2 code. :return: Administrative district.
15,256
def write_min_max(self, file): report = CaseReport(self.case) col1_header = "Attribute" col1_width = 19 col2_header = "Minimum" col3_header = "Maximum" col_width = 22 sep = "="*col1_width +" "+ "="*col_width +" "+ "="*col_width + "\n" file.write(sep) file.write("%s" % col1_header.center(col1_width)) file.write(" ") file.write("%s" % col2_header.center(col_width)) file.write(" ") file.write("%s" % col3_header.center(col_width)) file.write("\n") file.write(sep) min_val, min_i = getattr(report, "min_v_magnitude") max_val, max_i = getattr(report, "max_v_magnitude") file.write("%s %7.3f p.u. @ bus %2d %7.3f p.u. @ bus %2d\n" % ("Voltage Amplitude".ljust(col1_width), min_val, min_i, max_val, max_i)) min_val, min_i = getattr(report, "min_v_angle") max_val, max_i = getattr(report, "max_v_angle") file.write("%s %16.3f %16.3f\n" % ("Voltage Phase Angle".ljust(col1_width), min_val, max_val)) file.write(sep) file.write("\n") del report
Writes minimum and maximum values to a table.
15,257
def langids(self): if self._langids is None: try: self._langids = util.get_langids(self) except USBError: self._langids = () return self._langids
Return the USB device's supported language ID codes. These are 16-bit codes familiar to Windows developers, where for example instead of en-US you say 0x0409. USB_LANGIDS.pdf on the usb.org developer site for more info. String requests using a LANGID not in this array should not be sent to the device. This property will cause some USB traffic the first time it is accessed and cache the resulting value for future use.
15,258
def _serialize_json(obj, fp): json.dump(obj, fp, indent=4, default=serialize)
Serialize ``obj`` as a JSON formatted stream to ``fp``
15,259
def legacy_signature(**kwargs_mapping): def signature_decorator(f): @wraps(f) def wrapper(*args, **kwargs): redirected_kwargs = { kwargs_mapping[k] if k in kwargs_mapping else k: v for k, v in kwargs.items() } return f(*args, **redirected_kwargs) return wrapper return signature_decorator
This decorator makes it possible to call a function using old argument names when they are passed as keyword arguments. @legacy_signature(old_arg1='arg1', old_arg2='arg2') def func(arg1, arg2=1): return arg1 + arg2 func(old_arg1=1) == 2 func(old_arg1=1, old_arg2=2) == 3
15,260
def get_versions(): versions = ffi.new() num_versions = ffi.new() cairo.cairo_pdf_get_versions(versions, num_versions) versions = versions[0] return [versions[i] for i in range(num_versions[0])]
Return the list of supported PDF versions. See :meth:`restrict_to_version`. :return: A list of :ref:`PDF_VERSION` strings. *New in cairo 1.10.*
15,261
def start_auth(self, context, internal_req): target_entity_id = context.get_decoration(Context.KEY_TARGET_ENTITYID) if target_entity_id: entity_id = target_entity_id return self.authn_request(context, entity_id) idps = self.sp.metadata.identity_providers() if len(idps) == 1 and "mdq" not in self.config["sp_config"]["metadata"]: entity_id = idps[0] return self.authn_request(context, entity_id) return self.disco_query()
See super class method satosa.backends.base.BackendModule#start_auth :type context: satosa.context.Context :type internal_req: satosa.internal.InternalData :rtype: satosa.response.Response
15,262
def pop_first_arg(argv): for arg in argv: if not arg.startswith(): argv.remove(arg) return (arg, argv) return (None, argv)
find first positional arg (does not start with -), take it out of array and return it separately returns (arg, array)
15,263
def rpush(self, name, *values): with self.pipe as pipe: v_encode = self.valueparse.encode values = [v_encode(v) for v in self._parse_values(values)] return pipe.rpush(self.redis_key(name), *values)
Push the value into the list from the *right* side :param name: str the name of the redis key :param values: a list of values or single value to push :return: Future()
15,264
def responses_of(self, request): responses = [response for index, response in self._responses(request)] if responses: return responses % (self._path, request) )
Find the responses corresponding to a request. This function isn't actually used by VCR internally, but is provided as an external API.
15,265
def mean_field(self): mean_field = [] for sp_oper in [self.oper[], self.oper[]]: avgO = np.array([self.expected(op) for op in sp_oper]) avgO[abs(avgO) < 1e-10] = 0. mean_field.append(avgO*self.param[]) return np.array(mean_field)
Calculates mean field
15,266
def raise_on_errors(errors, level=logging.CRITICAL): if errors: log.log(level, "\n".join(errors)) raise CoTError("\n".join(errors))
Raise a CoTError if errors. Helper function because I had this code block everywhere. Args: errors (list): the error errors level (int, optional): the log level to use. Defaults to logging.CRITICAL Raises: CoTError: if errors is non-empty
15,267
def _get_algorithm_info(self, algorithm_info): if algorithm_info[] not in self.ALGORITHMS: raise Exception( % algorithm_info[]) algorithm = self.ALGORITHMS[algorithm_info[]] algorithm_info.update(algorithm) return algorithm_info
Get algorithm info
15,268
def clear_dns_cache(self, host: Optional[str]=None, port: Optional[int]=None) -> None: if host is not None and port is not None: self._cached_hosts.remove((host, port)) elif host is not None or port is not None: raise ValueError("either both host and port " "or none of them are allowed") else: self._cached_hosts.clear()
Remove specified host/port or clear all dns local cache.
15,269
def update_and_transform(self, y, exogenous, **kwargs): check_is_fitted(self, "p_") self._check_endog(y) _, Xt = self.transform(y, exogenous, n_periods=len(y), **kwargs) self.n_ += len(y) return y, Xt
Update the params and return the transformed arrays Since no parameters really get updated in the Fourier featurizer, all we do is compose forecasts for ``n_periods=len(y)`` and then update ``n_``. Parameters ---------- y : array-like or None, shape=(n_samples,) The endogenous (time-series) array. exogenous : array-like or None, shape=(n_samples, n_features) The exogenous array of additional covariates. **kwargs : keyword args Keyword arguments required by the transform function.
15,270
def as_dict(self): d = MSONable.as_dict(self) d["data"] = self.data.tolist() return d
Returns dict representations of Xmu object
15,271
def in_stroke(self, x, y): return bool(cairo.cairo_in_stroke(self._pointer, x, y))
Tests whether the given point is inside the area that would be affected by a :meth:`stroke` operation given the current path and stroking parameters. Surface dimensions and clipping are not taken into account. See :meth:`stroke`, :meth:`set_line_width`, :meth:`set_line_join`, :meth:`set_line_cap`, :meth:`set_dash`, and :meth:`stroke_preserve`. :param x: X coordinate of the point to test :param y: Y coordinate of the point to test :type x: float :type y: float :returns: A boolean.
15,272
def add_source(self, evidence_line, source, label=None, src_type=None): self.graph.addTriple(evidence_line, self.globaltt[], source) self.model.addIndividualToGraph(source, label, src_type) return
Applies the triples: <evidence> <dc:source> <source> <source> <rdf:type> <type> <source> <rdfs:label> "label" TODO this should belong in a higher level class :param evidence_line: str curie :param source: str source as curie :param label: optional, str type as curie :param type: optional, str type as curie :return: None
15,273
def keyword(self, text): cls = self.KEYWORDS[text] self.push_token(cls(text, self.lineno, self.offset))
Push a keyword onto the token queue.
15,274
def connect(self): if self._conn: return self._conn self._conn = psycopg2.connect( self.config, cursor_factory=psycopg2.extras.RealDictCursor, ) self._conn.set_session(autocommit=True) psycopg2.extras.register_hstore(self._conn) return self._conn
Construct the psycopg2 connection instance :return: psycopg2.connect instance
15,275
def read_namespaced_ingress_status(self, name, namespace, **kwargs): kwargs[] = True if kwargs.get(): return self.read_namespaced_ingress_status_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_ingress_status_with_http_info(name, namespace, **kwargs) return data
read_namespaced_ingress_status # noqa: E501 read status of the specified Ingress # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_ingress_status(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Ingress (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1beta1Ingress If the method is called asynchronously, returns the request thread.
15,276
def poisson_ll(data, means): if sparse.issparse(data): return sparse_poisson_ll(data, means) genes, cells = data.shape clusters = means.shape[1] ll = np.zeros((cells, clusters)) for i in range(clusters): means_i = np.tile(means[:,i], (cells, 1)) means_i = means_i.transpose() + eps ll[:,i] = np.sum(xlogy(data, means_i) - means_i, 0) return ll
Calculates the Poisson log-likelihood. Args: data (array): 2d numpy array of genes x cells means (array): 2d numpy array of genes x k Returns: cells x k array of log-likelihood for each cell/cluster pair
15,277
def is_volatile(self): hw = self.get_property() return ( (hw in (rdltypes.AccessType.rw, rdltypes.AccessType.rw1, rdltypes.AccessType.w, rdltypes.AccessType.w1)) or self.get_property() or (self.get_property() is not None) or self.get_property() or self.get_property() )
True if combination of field access properties result in a field that should be interpreted as volatile. (Any hardware-writable field is inherently volatile)
15,278
def paths_wanted(self): return set(address.new(b, target=) for b in self.missing_nodes)
The set of paths where we expect to find missing nodes.
15,279
def extract(self, item, list_article_candidate): languages_extracted = [] language_newspaper = None for article_candidate in list_article_candidate: if article_candidate.language is not None: languages_extracted.append(article_candidate.language) if article_candidate.extractor == "newspaper": language_newspaper = article_candidate.language if not languages_extracted: return None languages_extracted_set = set(languages_extracted) languages_extracted_number = [] for language in languages_extracted_set: languages_extracted_number.append((languages_extracted.count(language), language)) if not (languages_extracted_number): return None if max(languages_extracted_number)[0] == min(languages_extracted_number)[0] and language_newspaper is not None: return language_newspaper if languages_extracted_number: return (max(languages_extracted_number))[1] else: return None
Compares how often any language was detected. :param item: The corresponding NewscrawlerItem :param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted :return: A string, the language which was most frequently detected
15,280
def add_to_class(self, cls, name): self._name = name self._container_model_class = cls setattr(cls, name, FieldDescriptor(self)) self._bound = True
Hook that replaces the `Field` attribute on a class with a named ``FieldDescriptor``. Called by the metaclass during construction of the ``Model``.
15,281
def cmd_position(self, args): if (len(args) != 3): print("Usage: position x y z (meters)") return if (len(args) == 3): x_m = float(args[0]) y_m = float(args[1]) z_m = float(args[2]) print("x:%f, y:%f, z:%f" % (x_m, y_m, z_m)) self.master.mav.set_position_target_local_ned_send( 0, 1, 0, 8, 3576, x_m, y_m, z_m, 0, 0, 0, 0, 0, 0, 0, 0)
position x-m y-m z-m
15,282
def render(self, rect, data): extra_height = rect.h - self.get_minimum_size(data).y num_elements = len(self.elements) if num_elements == 0: return elif num_elements > 1: per_margin = 1.0 / float(num_elements-1) else: per_margin = 0.0 per_element = 1.0 / float(num_elements) y = rect.y if self.vertical_align == VerticalLM.ALIGN_MIDDLE: y = rect.y + extra_height*0.5 elif self.vertical_align == VerticalLM.ALIGN_TOP: y = rect.y + extra_height for element in reversed(self.elements): size = element.get_minimum_size(data) if self.horizontal_align == VerticalLM.ALIGN_LEFT: x = rect.x w = size.x elif self.horizontal_align == VerticalLM.ALIGN_CENTER: x = rect.center - size.x*0.5 w = size.x elif self.horizontal_align == VerticalLM.ALIGN_RIGHT: x = rect.right - size.x w = size.x else: assert self.horizontal_align == VerticalLM.ALIGN_GROW x = rect.x w = rect.w if self.vertical_align in VerticalLM._ALIGN_SIMPLE_SET: h = size.y next_y = y + size.y + self.margin elif self.vertical_align == VerticalLM.ALIGN_EQUAL_SPACING: h = size.y next_y = y + size.y + self.margin + extra_height*per_margin else: assert self.vertical_align == VerticalLM.ALIGN_EQUAL_GROWTH h = size.y + extra_height*per_element next_y = y + h + self.margin element.render(datatypes.Rectangle(x, y, w, h), data) y = next_y
Displays the elements according to the align properties.
15,283
def _check_no_current_table(new_obj, current_table): if current_table is None: msg = if isinstance(new_obj, Relation): raise NoCurrentTableException(msg.format()) if isinstance(new_obj, Column): raise NoCurrentTableException(msg.format())
Raises exception if we try to add a relation or a column with no current table.
15,284
def set_policy(name, table=, family=, **kwargs): ret = {: name, : {}, : None, : } for ignore in _STATE_INTERNAL_KEYWORDS: if ignore in kwargs: del kwargs[ignore] if __salt__[]( table, kwargs[], family) == kwargs[]: ret[] = True ret[] = ( .format(kwargs[], table, family, kwargs[])) return ret if __opts__[]: ret[] = .format( kwargs[], table, family, kwargs[] ) return ret if not __salt__[]( table, kwargs[], kwargs[], family): ret[] = {: name} ret[] = True ret[] = .format( kwargs[], kwargs[], family ) if in kwargs: if kwargs[]: __salt__[](filename=None, family=family) ret[] = .format( kwargs[], kwargs[], family ) return ret else: ret[] = False ret[] = return ret
.. versionadded:: 2014.1.0 Sets the default policy for iptables firewall tables table The table that owns the chain that should be modified family Networking family, either ipv4 or ipv6 policy The requested table policy
15,285
def _run_grid_multithread(self, func, iterables): f = lambda x: threading.Thread(target = func,args = (x,)) threads = map(f, iterables) for thread in threads: thread.setDaemon(True) thread.start() thread.join()
running case with mutil process to support selenium grid-mode(multiple web) and appium grid-mode(multiple devices). @param func: function object @param iterables: iterable objects
15,286
def create(self,image_path, size=1024, sudo=False): from spython.utils import check_install check_install() cmd = self.init_command() cmd = cmd + [, str(size), image_path ] output = self.run_command(cmd,sudo=sudo) self.println(output) if not os.path.exists(image_path): bot.exit("Could not create image %s" %image_path) return image_path
create will create a a new image Parameters ========== image_path: full path to image size: image sizein MiB, default is 1024MiB filesystem: supported file systems ext3/ext4 (ext[2/3]: default ext3
15,287
def node_inclusion_predicate_builder(nodes: Iterable[BaseEntity]) -> NodePredicate: nodes = set(nodes) @node_predicate def node_inclusion_predicate(node: BaseEntity) -> bool: return node in nodes return node_inclusion_predicate
Build a function that returns true for the given nodes.
15,288
def get_buckets(min_length, max_length, bucket_count): if bucket_count <= 0: return [max_length] unit_length = int((max_length - min_length) // (bucket_count)) buckets = [min_length + unit_length * (i + 1) for i in range(0, bucket_count)] buckets[-1] = max_length return buckets
Get bucket by length.
15,289
def process_response(self, request_id=None): self.__errors = [] self.__error_reason = None if in self.__request_data and in self.__request_data[]: response = OneLogin_Saml2_Response(self.__settings, self.__request_data[][]) self.__last_response = response.get_xml_document() if response.is_valid(self.__request_data, request_id): self.__attributes = response.get_attributes() self.__nameid = response.get_nameid() self.__nameid_format = response.get_nameid_format() self.__session_index = response.get_session_index() self.__session_expiration = response.get_session_not_on_or_after() self.__last_message_id = response.get_id() self.__last_assertion_id = response.get_assertion_id() self.__last_authn_contexts = response.get_authn_contexts() self.__authenticated = True self.__last_assertion_not_on_or_after = response.get_assertion_not_on_or_after() else: self.__errors.append() self.__error_reason = response.get_error() else: self.__errors.append() raise OneLogin_Saml2_Error( , OneLogin_Saml2_Error.SAML_RESPONSE_NOT_FOUND )
Process the SAML Response sent by the IdP. :param request_id: Is an optional argument. Is the ID of the AuthNRequest sent by this SP to the IdP. :type request_id: string :raises: OneLogin_Saml2_Error.SAML_RESPONSE_NOT_FOUND, when a POST with a SAMLResponse is not found
15,290
def add_torques(self, torques): j = 0 for joint in self.joints: joint.add_torques( list(torques[j:j+joint.ADOF]) + [0] * (3 - joint.ADOF)) j += joint.ADOF
Add torques for each degree of freedom in the skeleton. Parameters ---------- torques : list of float A list of the torques to add to each degree of freedom in the skeleton.
15,291
def _value_with_fmt(self, val): fmt = None if is_integer(val): val = int(val) elif is_float(val): val = float(val) elif is_bool(val): val = bool(val) elif isinstance(val, datetime): fmt = self.datetime_format elif isinstance(val, date): fmt = self.date_format elif isinstance(val, timedelta): val = val.total_seconds() / float(86400) fmt = else: val = compat.to_str(val) return val, fmt
Convert numpy types to Python types for the Excel writers. Parameters ---------- val : object Value to be written into cells Returns ------- Tuple with the first element being the converted value and the second being an optional format
15,292
def delete_component(self, id): url = self._get_url( + str(id)) return self._session.delete(url)
Delete component by id. :param id: ID of the component to use :type id: str :rtype: Response
15,293
def _exclude_pattern(self, pattern, anchor=True, prefix=None, is_regex=False): found = False pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex) for f in list(self.files): if pattern_re.search(f): self.files.remove(f) found = True return found
Remove strings (presumably filenames) from 'files' that match 'pattern'. Other parameters are the same as for 'include_pattern()', above. The list 'self.files' is modified in place. Return True if files are found. This API is public to allow e.g. exclusion of SCM subdirs, e.g. when packaging source distributions
15,294
def list_current_filter_set(self,raw=False): buf = [] self.open_umanager() self.ser.write(.join((self.cmd_current_filter_list,self.cr))) if self.read_loop(lambda x: x.endswith(self.umanager_prompt),self.timeout,lambda x,y,z: buf.append(y.rstrip()[:-1])): if raw: rv = buf = buf[0] else: rv, buf = self.filter_organizer(buf[0]) else: raise Dam1021Error(16,"Failed to list currently selected filter set") self.close_umanager() log.info(buf) return rv
User to list a currently selected filter set
15,295
def update_launch_metadata(self, scaling_group, metadata): if not isinstance(scaling_group, ScalingGroup): scaling_group = self.get(scaling_group) curr_meta = scaling_group.launchConfiguration.get("args", {}).get( "server", {}).get("metadata", {}) curr_meta.update(metadata) return self.update_launch_config(scaling_group, metadata=curr_meta)
Adds the given metadata dict to the existing metadata for the scaling group's launch configuration.
15,296
def ban_show(self, ban_id, **kwargs): "https://developer.zendesk.com/rest_api/docs/chat/bans api_path = "/api/v2/bans/{ban_id}" api_path = api_path.format(ban_id=ban_id) return self.call(api_path, **kwargs)
https://developer.zendesk.com/rest_api/docs/chat/bans#get-ban
15,297
def _compute_f1(self, C, mag, rrup): r = np.sqrt(rrup ** 2 + C[] ** 2) f1 = ( C[] + C[] * (8.5 - mag) ** C[] + (C[] + C[] * (mag - C[])) * np.log(r) ) if mag <= C[]: f1 += C[] * (mag - C[]) else: f1 += C[] * (mag - C[]) return f1
Compute f1 term (eq.4, page 105)
15,298
def is_dict_equal(d1, d2, keys=None, ignore_none_values=True): if keys or ignore_none_values: d1 = {k: v for k, v in d1.items() if (keys is None or k in keys) and (v is not None or not ignore_none_values)} d2 = {k: v for k, v in d2.items() if (keys is None or k in keys) and (v is not None or not ignore_none_values)} return d1 == d2
Compares two dictionaries to see if they are equal :param d1: the first dictionary :param d2: the second dictionary :param keys: the keys to limit the comparison to (optional) :param ignore_none_values: whether to ignore none values :return: true if the dictionaries are equal, else false
15,299
def plot_and_save(self, **kwargs): self.fig = pyplot.figure() self.plot() self.axes = pyplot.gca() self.save_plot(self.fig, self.axes, **kwargs) pyplot.close(self.fig)
Used when the plot method defined does not create a figure nor calls save_plot Then the plot method has to use self.fig