Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
387,900
def bounds(self): points = np.array([e.bounds(self.vertices) for e in self.entities], dtype=np.float64) points = points.reshape((-1, self.vertices.shape[1])) bounds = np.array([points.min(axis=0), points.max(axis=0)], dtype=np.float64) return bounds
Return the axis aligned bounding box of the current path. Returns ---------- bounds: (2, dimension) float, (min, max) coordinates
387,901
def weighted_minkowski(x, y, w=_mock_identity, p=2): result = 0.0 for i in range(x.shape[0]): result += (w[i] * np.abs(x[i] - y[i])) ** p return result ** (1.0 / p)
A weighted version of Minkowski distance. ..math:: D(x, y) = \left(\sum_i w_i |x_i - y_i|^p\right)^{\frac{1}{p}} If weights w_i are inverse standard deviations of data in each dimension then this represented a standardised Minkowski distance (and is equivalent to standardised Euclidean distance for p=1).
387,902
def dict_strict_update(base_dict, update_dict): additional_keys = set(update_dict.keys()) - set(base_dict.keys()) if len(additional_keys) > 0: raise RuntimeError( .format(str(additional_keys)), additional_keys) base_dict.update(update_dict)
This function updates base_dict with update_dict if and only if update_dict does not contain keys that are not already in base_dict. It is essentially a more strict interpretation of the term "updating" the dict. If update_dict contains keys that are not in base_dict, a RuntimeError is raised. :param base_dict: The dict that is to be updated. This dict is modified. :param update_dict: The dict containing the new values.
387,903
def _lookup_enum_in_ns(namespace, value): for attribute in dir(namespace): if getattr(namespace, attribute) == value: return attribute
Return the attribute of namespace corresponding to value.
387,904
def iter_predict(self, X, include_init=False): utils.validation.check_is_fitted(self, ) X = utils.check_array(X, accept_sparse=[, ], dtype=None, force_all_finite=False) y_pred = self.init_estimator_.predict(X) if include_init: yield y_pred for estimators, line_searchers, cols in itertools.zip_longest(self.estimators_, self.line_searchers_, self.columns_): for i, (estimator, line_searcher) in enumerate(itertools.zip_longest(estimators, line_searchers or [])): if cols is None: direction = estimator.predict(X) else: direction = estimator.predict(X[:, cols]) if line_searcher: direction = line_searcher.update(direction) y_pred[:, i] += self.learning_rate * direction yield y_pred
Returns the predictions for ``X`` at every stage of the boosting procedure. Args: X (array-like or sparse matrix of shape (n_samples, n_features): The input samples. Sparse matrices are accepted only if they are supported by the weak model. include_init (bool, default=False): If ``True`` then the prediction from ``init_estimator`` will also be returned. Returns: iterator of arrays of shape (n_samples,) containing the predicted values at each stage
387,905
async def deserialize(self, data: dict, silent=True): s fields. Ignores data attributes who self.import_data(self. _deserialize(data)) self.validate()
Deserializes a Python ``dict`` into the model by assigning values to their respective fields. Ignores data attributes that do not match one of the Model's fields. Ignores data attributes who's matching fields are declared with the ``readonly`` attribute Validates the data after import. Override in sub classes to modify or add to deserialization behavior :param data: Python dictionary with data :type data: ``dict`` :param silent: Determines if an exception is thrown if illegal fields are passed. Such fields can be non existent or readonly. Default is True :type silent: ``bool``
387,906
def _parse_json(self, json, exactly_one=True): features = json[] if features == []: return None def parse_feature(feature): location = feature[] place = feature[] longitude = feature[][][0] latitude = feature[][][1] return Location(location, (latitude, longitude), place) if exactly_one: return parse_feature(features[0]) else: return [parse_feature(feature) for feature in features]
Returns location, (latitude, longitude) from json feed.
387,907
def _ssl_agent(self): certfile = self.lookup(self.profile, ) certfile = os.path.expanduser(certfile) with open(certfile) as certfp: pemdata = certfp.read() client_cert = PrivateCertificate.loadPEM(pemdata) trustRoot = None if servercafile: servercafile = os.path.expanduser(servercafile) trustRoot = RootCATrustRoot(servercafile) policy = ClientCertPolicy(trustRoot=trustRoot, client_cert=client_cert) return Agent(reactor, policy)
Get a Twisted Agent that performs Client SSL authentication for Koji.
387,908
def get_fuzzed(self, indent=False, utf8=False): try: if "array" in self.json: return self.fuzz_elements(dict(self.json))["array"] else: return self.fuzz_elements(dict(self.json)) except Exception as e: raise PJFBaseException(e.message if hasattr(e, "message") else str(e))
Return the fuzzed object
387,909
def encode_list(cls, value): encoded = base64.b64encode(six.b(" ".join(str(v) for v in value) or "-")) return encoded.decode("utf-8") if six.PY3 else encoded
Encodes a list *value* into a string via base64 encoding.
387,910
def _get_parameter_values(template_dict, parameter_overrides): default_values = SamBaseProvider._get_default_parameter_values(template_dict) parameter_values = {} parameter_values.update(SamBaseProvider._DEFAULT_PSEUDO_PARAM_VALUES) parameter_values.update(default_values) parameter_values.update(parameter_overrides or {}) return parameter_values
Construct a final list of values for CloudFormation template parameters based on user-supplied values, default values provided in template, and sane defaults for pseudo-parameters. Parameters ---------- template_dict : dict SAM template dictionary parameter_overrides : dict User-supplied values for CloudFormation template parameters Returns ------- dict Values for template parameters to substitute in template with
387,911
def word_break(el, max_width=40, avoid_elements=_avoid_word_break_elements, avoid_classes=_avoid_word_break_classes, break_character=unichr(0x200b)): if el.tag in _avoid_word_break_elements: return class_name = el.get() if class_name: dont_break = False class_name = class_name.split() for avoid in avoid_classes: if avoid in class_name: dont_break = True break if dont_break: return if el.text: el.text = _break_text(el.text, max_width, break_character) for child in el: word_break(child, max_width=max_width, avoid_elements=avoid_elements, avoid_classes=avoid_classes, break_character=break_character) if child.tail: child.tail = _break_text(child.tail, max_width, break_character)
Breaks any long words found in the body of the text (not attributes). Doesn't effect any of the tags in avoid_elements, by default ``<textarea>`` and ``<pre>`` Breaks words by inserting &#8203;, which is a unicode character for Zero Width Space character. This generally takes up no space in rendering, but does copy as a space, and in monospace contexts usually takes up space. See http://www.cs.tut.fi/~jkorpela/html/nobr.html for a discussion
387,912
def _build_command_chain(self, command): next = command for intercepter in reversed(self._intercepters): next = InterceptedCommand(intercepter, next) self._commands_by_name[next.get_name()] = next
Builds execution chain including all intercepters and the specified command. :param command: the command to build a chain.
387,913
def _create_binary_mathfunction(name, doc=""): def _(col1, col2): sc = SparkContext._active_spark_context if isinstance(col1, Column): arg1 = col1._jc elif isinstance(col1, basestring): arg1 = _create_column_from_name(col1) else: arg1 = float(col1) if isinstance(col2, Column): arg2 = col2._jc elif isinstance(col2, basestring): arg2 = _create_column_from_name(col2) else: arg2 = float(col2) jc = getattr(sc._jvm.functions, name)(arg1, arg2) return Column(jc) _.__name__ = name _.__doc__ = doc return _
Create a binary mathfunction by name
387,914
def disk_check_size(ctx, param, value): if value: return value
Validation callback for disk size parameter.
387,915
def vspec(data): vdata, Dirdata, step_meth = [], [], [] tr0 = data[0][0] data.append("Stop") k, R = 1, 0 for i in range(k, len(data)): Dirdata = [] if data[i][0] != tr0: if i == k: vdata.append(data[i - 1]) step_meth.append(" ") else: for l in range(k - 1, i): Dirdata.append([data[l][1], data[l][2], data[l][3]]) dir, R = vector_mean(Dirdata) vdata.append([data[i - 1][0], dir[0], dir[1], old_div(R, (i - k + 1)), , ]) step_meth.append("DE-VM") tr0 = data[i][0] k = i + 1 if tr0 == "stop": break del data[-1] return step_meth, vdata
Takes the vector mean of replicate measurements at a given step
387,916
def _start_connect(self, connect_type): if self._connect_state.value != self.CS_NOT_CONNECTED: return self._connected.value = 0 self._connect_state.value = self.CS_ATTEMPTING_CONNECT self._attempting_connect.value = connect_type if self.update_data_func is not None: self._create_update_data_system( None, setFunc=False, createThread=True) self.update_data_thread.start() if self.state_transition_func is not None: self._create_state_transition_system( None, setFunc=False, createThread=True) self.state_transition_thread.start()
Starts the connection process, as called (internally) from the user context, either from auto_connect() or connect(). Never call this from the _comm() process context.
387,917
def validate_split_runs_file(split_runs_file): try: content = [l.strip() for l in split_runs_file.readlines()] if content[0].upper().split() == [, ]: return {c.split()[1]: c.split()[0] for c in content[1:] if c} else: sys.exit("ERROR: Mandatory header of --split_runs tsv file not found: , ") logging.error("Mandatory header of --split_runs tsv file not found: , ") except IndexError: sys.exit("ERROR: Format of --split_runs tab separated file not as expected") logging.error("ERROR: Format of --split_runs tab separated file not as expected")
Check if structure of file is as expected and return dictionary linking names to run_IDs.
387,918
def umi_transform(data): fqfiles = data["files"] fqfiles.extend(list(repeat("", 4-len(fqfiles)))) fq1, fq2, fq3, fq4 = fqfiles umi_dir = os.path.join(dd.get_work_dir(data), "umis") safe_makedir(umi_dir) transform = dd.get_umi_type(data) if not transform: logger.info("No UMI transform specified, assuming pre-transformed data.") if is_transformed(fq1): logger.info("%s detected as pre-transformed, passing it on unchanged." % fq1) data["files"] = [fq1] return [[data]] else: logger.error("No UMI transform was specified, but %s does not look " "pre-transformed." % fq1) sys.exit(1) if file_exists(transform): transform_file = transform else: transform_file = get_transform_file(transform) if not file_exists(transform_file): logger.error( "The UMI transform can be specified as either a file or a " "bcbio-supported transform. Either the file %s does not exist " "or the transform is not supported by bcbio. Supported " "transforms are %s." %(dd.get_umi_type(data), ", ".join(SUPPORTED_TRANSFORMS))) sys.exit(1) out_base = dd.get_sample_name(data) + ".umitransformed.fq.gz" out_file = os.path.join(umi_dir, out_base) if file_exists(out_file): data["files"] = [out_file] return [[data]] cellular_barcodes = get_cellular_barcodes(data) if len(cellular_barcodes) > 1: split_option = "--separate_cb" else: split_option = "" if dd.get_demultiplexed(data): demuxed_option = "--demuxed_cb %s" % dd.get_sample_name(data) split_option = "" else: demuxed_option = "" cores = dd.get_num_cores(data) with open_fastq(fq1) as in_handle: read = next(in_handle) if "UMI_" in read: data["files"] = [out_file] return [[data]] locale_export = utils.locale_export() umis = _umis_cmd(data) cmd = ("{umis} fastqtransform {split_option} {transform_file} " "--cores {cores} {demuxed_option} " "{fq1} {fq2} {fq3} {fq4}" "| seqtk seq -L 20 - | gzip > {tx_out_file}") message = ("Inserting UMI and barcode information into the read name of %s" % fq1) with file_transaction(out_file) as tx_out_file: do.run(cmd.format(**locals()), message) data["files"] = [out_file] return [[data]]
transform each read by identifying the barcode and UMI for each read and putting the information in the read name
387,919
def execute(self, *args, **kwargs): num_clients = len(self._seed_clients) start_client = (self._client_idx + 1) % num_clients def _client_error(failure, client_i): failure.trap(ConnectError) client_i = (client_i + 1) % num_clients if client_i == start_client: return failure else: return _try_execute(client_i) def _try_execute(client_i): self._client_idx = client_i d = self._seed_clients[client_i].execute(*args, **kwargs) return d.addErrback(_client_error, client_i) return _try_execute(start_client)
See :py:func:`silverberg.client.CQLClient.execute`
387,920
def __start_experiment(self, parameters): repository = Repo(self.__repository_directory, search_parent_directories=True) if len(repository.untracked_files) > 0: logging.warning("Untracked files will not be recorded: %s", repository.untracked_files) current_commit = repository.head.commit started_state_is_dirty = repository.is_dirty() if started_state_is_dirty: repository.index.add([p for p in self.__get_files_to_be_added(repository)]) commit_obj = repository.index.commit("Temporary commit for experiment " + self.__experiment_name) sha = commit_obj.hexsha else: sha = repository.head.object.hexsha data = {"parameters": parameters, "started": time.time(), "description": self.__description, "commit_sha": sha} tag_object = self.__tag_repo(data, repository) if started_state_is_dirty: repository.head.reset(current_commit, working_tree=False, index=True) return tag_object
Start an experiment by capturing the state of the code :param parameters: a dictionary containing the parameters of the experiment :type parameters: dict :return: the tag representing this experiment :rtype: TagReference
387,921
def save(state, filename=None, desc=, extra=None): if isinstance(state.image, util.RawImage): desc = desc or filename = filename or state.image.filename + + desc + else: if not filename: raise AttributeError("Must provide filename since RawImage is not used") if extra is None: save = state else: save = [state] + extra if os.path.exists(filename): ff = "{}-tmp-for-copy".format(filename) if os.path.exists(ff): os.remove(ff) os.rename(filename, ff) pickle.dump(save, open(filename, ), protocol=2)
Save the current state with extra information (for example samples and LL from the optimization procedure). Parameters ---------- state : peri.states.ImageState the state object which to save filename : string if provided, will override the default that is constructed based on the state's raw image file. If there is no filename and the state has a RawImage, the it is saved to RawImage.filename + "-peri-save.pkl" desc : string if provided, will augment the default filename to be RawImage.filename + '-peri-' + desc + '.pkl' extra : list of pickleable objects if provided, will be saved with the state
387,922
def clone(self, population): new = empty_clone(self) new_dict = new.__dict__ for key, value in self.__dict__.items(): if key not in (, , ): new_dict[key] = value new_dict[] = population new_dict[] = population.simulation return new
Copy the holder just enough to be able to run a new simulation without modifying the original simulation.
387,923
def align_rna(job, fastqs, univ_options, star_options): star = job.wrapJobFn(run_star, fastqs, univ_options, star_options, cores=star_options[], memory=PromisedRequirement(lambda x: int(1.85 * x.size), star_options[]), disk=PromisedRequirement(star_disk, fastqs, star_options[])) s_and_i = job.wrapJobFn(sort_and_index_star, star.rv(), univ_options, star_options).encapsulate() job.addChild(star) star.addChild(s_and_i) return s_and_i.rv()
A wrapper for the entire rna alignment subgraph. :param list fastqs: The input fastqs for alignment :param dict univ_options: Dict of universal options used by almost all tools :param dict star_options: Options specific to star :return: Dict containing input bam and the generated index (.bam.bai) :rtype: dict
387,924
def get_requirements(): with open(os.path.join(topdir, )) as fin: lines = fin.readlines() lines = [line.strip() for line in lines] return lines
Parse a requirements.txt file and return as a list.
387,925
def createTable(dbconn, pd): cols = ( % (defn.name, getTypename(defn)) for defn in pd.fields) sql = % (pd.name, .join(cols)) dbconn.execute(sql) dbconn.commit()
Creates a database table for the given PacketDefinition.
387,926
def _set_base_path_env(): local_config_dir = os.path.join(os.path.expanduser(), , , str(time.time()), , ) logger.info( % local_config_dir) os.environ[BASE_PATH_ENV] = local_config_dir
Sets the environment variable SAGEMAKER_BASE_DIR as ~/sagemaker_local/{timestamp}/opt/ml Returns: (bool): indicating whe
387,927
def logging_set_filter(name, filter_def, ttl, **kwargs): ctx = Context(**kwargs) ctx.execute_action(, **{ : ctx.repo.create_secure_service(), : name, : filter_def, : ttl, })
Set local filter.
387,928
def to_struct(model): model.validate() resp = {} for _, name, field in model.iterate_with_name(): value = field.__get__(model) if value is None: continue value = field.to_struct(value) resp[name] = value return resp
Cast instance of model to python structure. :param model: Model to be casted. :rtype: ``dict``
387,929
def get_review_average(obj): total = 0 reviews = get_reviews(obj) if not reviews: return False for review in reviews: average = review.get_average_rating() if average: total += review.get_average_rating() if total > 0: return total / reviews.count() return False
Returns the review average for an object.
387,930
def results(self, Snwp): r net = self.project.network P12 = net[] Vp = net[self.settings[]] Vt = net[self.settings[]] Np = self[] Nt = self[] Pinv = (Np[P12].T == Nt).T Vinv = sp.vstack(((Pinv*Vp[P12]).T, Vt)).T Vinv = sp.sum(Vinv, axis=1) x = sp.argsort(Nt) Vinv_cum = np.cumsum(Vinv[x]) S = Vinv_cum/(Vp.sum() + Vt.sum()) try: N = sp.where(S < Snwp)[0][-1] except: N = -np.inf data = {: Np <= N, : Nt <= N} return data
r""" Returns the phase configuration at the specified non-wetting phase (invading phase) saturation. Parameters ---------- Snwp : scalar, between 0 and 1 The network saturation for which the phase configuration is desired. Returns ------- Two dictionary containing arrays that describe the pore and throat distribution at the given saturation. Specifically, these are: **'pore.occupancy'** : 1 indicates the pores is invaded and 0 otherwise. **'throat.occupancy'** : Same as described above but for throats.
387,931
def post(action, params=None, version=6): if params is None: params = dict() to_send = { : action, : version, : params } r = requests.post(AnkiConnect.URL, json=to_send) return r.json()
For the documentation, see https://foosoft.net/projects/anki-connect/ :param str action: :param dict params: :param int version: :return:
387,932
def RecreateInstanceDisks(r, instance, disks=None, nodes=None): body = {} if disks is not None: body["disks"] = disks if nodes is not None: body["nodes"] = nodes return r.request("post", "/2/instances/%s/recreate-disks" % instance, content=body)
Recreate an instance's disks. @type instance: string @param instance: Instance name @type disks: list of int @param disks: List of disk indexes @type nodes: list of string @param nodes: New instance nodes, if relocation is desired @rtype: string @return: job id
387,933
def heap_item(clock, record, shard): second_ordering = int(record["meta"]["sequence_number"]) total_ordering = (ordering, second_ordering, clock()) return total_ordering, record, shard
Create a tuple of (ordering, (record, shard)) for use in a RecordBuffer.
387,934
def json(self): return { "elevation": self.elevation, "latitude": self.latitude, "longitude": self.longitude, "icao_code": self.icao_code, "name": self.name, "quality": self.quality, "wban_ids": self.wban_ids, "recent_wban_id": self.recent_wban_id, "climate_zones": { "iecc_climate_zone": self.iecc_climate_zone, "iecc_moisture_regime": self.iecc_moisture_regime, "ba_climate_zone": self.ba_climate_zone, "ca_climate_zone": self.ca_climate_zone, }, }
Return a JSON-serializeable object containing station metadata.
387,935
def _wrap_key(function, args, kws): return hashlib.md5(pickle.dumps((_from_file(function) + function.__name__, args, kws))).hexdigest()
get the key from the function input.
387,936
def notify( self, method_name: str, *args: Any, trim_log_values: Optional[bool] = None, validate_against_schema: Optional[bool] = None, **kwargs: Any ) -> Response: return self.send( Notification(method_name, *args, **kwargs), trim_log_values=trim_log_values, validate_against_schema=validate_against_schema, )
Send a JSON-RPC request, without expecting a response. Args: method_name: The remote procedure's method name. args: Positional arguments passed to the remote procedure. kwargs: Keyword arguments passed to the remote procedure. trim_log_values: Abbreviate the log entries of requests and responses. validate_against_schema: Validate response against the JSON-RPC schema.
387,937
def enable_one_shot_process_breakpoints(self, dwProcessId): for bp in self.get_process_code_breakpoints(dwProcessId): if bp.is_disabled(): self.enable_one_shot_code_breakpoint(dwProcessId, bp.get_address()) for bp in self.get_process_page_breakpoints(dwProcessId): if bp.is_disabled(): self.enable_one_shot_page_breakpoint(dwProcessId, bp.get_address()) if self.system.has_process(dwProcessId): aProcess = self.system.get_process(dwProcessId) else: aProcess = Process(dwProcessId) aProcess.scan_threads() for aThread in aProcess.iter_threads(): dwThreadId = aThread.get_tid() for bp in self.get_thread_hardware_breakpoints(dwThreadId): if bp.is_disabled(): self.enable_one_shot_hardware_breakpoint(dwThreadId, bp.get_address())
Enables for one shot all disabled breakpoints for the given process. @type dwProcessId: int @param dwProcessId: Process global ID.
387,938
def trim_wavs(org_wav_dir=ORG_WAV_DIR, tgt_wav_dir=TGT_WAV_DIR, org_xml_dir=ORG_XML_DIR): logging.info("Trimming wavs...") if not os.path.exists(os.path.join(tgt_wav_dir, "TEXT")): os.makedirs(os.path.join(tgt_wav_dir, "TEXT")) if not os.path.exists(os.path.join(tgt_wav_dir, "WORDLIST")): os.makedirs(os.path.join(tgt_wav_dir, "WORDLIST")) for fn in os.listdir(org_xml_dir): path = os.path.join(org_xml_dir, fn) prefix, _ = os.path.splitext(fn) if os.path.isdir(path): continue if not path.endswith(".xml"): continue logging.info("Trimming wavs from {}".format(fn)) rec_type, _, times, _ = pangloss.get_sents_times_and_translations(path) for i, (start_time, end_time) in enumerate(times): if prefix.endswith("PLUSEGG"): in_wav_path = os.path.join(org_wav_dir, prefix.upper()[:-len("PLUSEGG")]) + ".wav" else: in_wav_path = os.path.join(org_wav_dir, prefix.upper()) + ".wav" headmic_path = os.path.join(org_wav_dir, prefix.upper()) + "_HEADMIC.wav" if os.path.isfile(headmic_path): in_wav_path = headmic_path out_wav_path = os.path.join(tgt_wav_dir, rec_type, "%s.%d.wav" % (prefix, i)) if not os.path.isfile(in_wav_path): raise PersephoneException("{} not a file.".format(in_wav_path)) start_time = start_time * ureg.seconds end_time = end_time * ureg.seconds wav.trim_wav_ms(Path(in_wav_path), Path(out_wav_path), start_time.to(ureg.milliseconds).magnitude, end_time.to(ureg.milliseconds).magnitude)
Extracts sentence-level transcriptions, translations and wavs from the Na Pangloss XML and WAV files. But otherwise doesn't preprocess them.
387,939
def concatenate(self, other): if not isinstance(other, LineString): other = LineString(other) return self.deepcopy( coords=np.concatenate([self.coords, other.coords], axis=0))
Concatenate this line string with another one. This will add a line segment between the end point of this line string and the start point of `other`. Parameters ---------- other : imgaug.augmentables.lines.LineString or ndarray \ or iterable of tuple of number The points to add to this line string. Returns ------- imgaug.augmentables.lines.LineString New line string with concatenated points. The `label` of this line string will be kept.
387,940
def duplicate(self): collection = self.__class__(self.header.duplicate(), self.values, self.datetimes) collection._validated_a_period = self._validated_a_period return collection
Return a copy of the current Data Collection.
387,941
def sg_get_context(): r global _context res = tf.sg_opt() for c in _context: res += c return res
r"""Get current context information Returns: tf.sg_opt class object which contains all context information
387,942
def parse_interface(iface): sections = [ ] docs = iface[] code = % iface[] for v in iface["functions"]: func_code = % v[] i = 0 for p in v["params"]: if i == 0: i = 1 else: func_code += ", " func_code += % (p[], format_type(p)) func_code += % format_type(v[]) if v.has_key() and v[]: if code: sections.append(to_section(docs, code)) docs = v[] code = func_code else: code += func_code code += "}" sections.append(to_section(docs, code)) return sections
Returns a docco section for the given interface. :Parameters: iface Parsed IDL interface dict. Keys: 'comment', 'name', 'returns', 'params'
387,943
def contains(self, time: datetime.datetime, inclusive: bool = True) -> bool: if inclusive: return self.start <= time <= self.end else: return self.start < time < self.end
Does the interval contain a momentary time? Args: time: the ``datetime.datetime`` to check inclusive: use inclusive rather than exclusive range checks?
387,944
def write_msr(address, value): if win32.arch not in (win32.ARCH_I386, win32.ARCH_AMD64): raise NotImplementedError( "MSR writing is only supported on i386 or amd64 processors.") msr = win32.SYSDBG_MSR() msr.Address = address msr.Data = value win32.NtSystemDebugControl(win32.SysDbgWriteMsr, InputBuffer = msr)
Set the contents of the specified MSR (Machine Specific Register). @type address: int @param address: MSR to write. @type value: int @param value: Contents to write on the MSR. @raise WindowsError: Raises an exception on error. @raise NotImplementedError: Current architecture is not C{i386} or C{amd64}. @warning: It could potentially brick your machine. It works on my machine, but your mileage may vary.
387,945
def users_with_birthday(self, month, day): users = User.objects.filter(properties___birthday__month=month, properties___birthday__day=day) results = [] for user in users: results.append(user) return results
Return a list of user objects who have a birthday on a given date.
387,946
def _parse_posts(self, raw_posts): parsed_posts = self.parse_json(raw_posts) for post_id in parsed_posts[]: yield parsed_posts[][post_id]
Parse posts and returns in order.
387,947
def dashed(requestContext, seriesList, dashLength=5): for series in seriesList: series.name = % (series.name, dashLength) series.options[] = dashLength return seriesList
Takes one metric or a wildcard seriesList, followed by a float F. Draw the selected metrics with a dotted line with segments of length F If omitted, the default length of the segments is 5.0 Example:: &target=dashed(server01.instance01.memory.free,2.5)
387,948
def add_perfdata(self, *args, **kwargs): self._perfdata.append(Perfdata(*args, **kwargs))
add a perfdata to the internal perfdata list arguments: the same arguments as for Perfdata()
387,949
def _url_to_epub( self): self.log.debug() from polyglot import htmlCleaner cleaner = htmlCleaner( log=self.log, settings=self.settings, url=self.urlOrPath, outputDirectory=self.outputDirectory, title=self.title, style=False, return epub
*generate the epub book from a URL*
387,950
def predict(self, predict_set ): predict_data = np.array( [instance.features for instance in predict_set ] ) return self.update( predict_data )
This method accepts a list of Instances Eg: list_of_inputs = [ Instance([0.12, 0.54, 0.84]), Instance([0.15, 0.29, 0.49]) ]
387,951
def is_ancestor_of_family(self, id_, family_id): if self._catalog_session is not None: return self._catalog_session.is_ancestor_of_catalog(id_=id_, catalog_id=family_id) return self._hierarchy_session.is_ancestor(id_=id_, ancestor_id=family_id)
Tests if an ``Id`` is an ancestor of a family. arg: id (osid.id.Id): an ``Id`` arg: family_id (osid.id.Id): the ``Id`` of a family return: (boolean) - ``true`` if this ``id`` is an ancestor of ``family_id,`` ``false`` otherwise raise: NotFound - ``family_id`` is not found raise: NullArgument - ``id`` or ``family_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``.
387,952
def convert_iris(directory, output_directory, output_filename=): classes = {b: 0, b: 1, b: 2} data = numpy.loadtxt( os.path.join(directory, ), converters={4: lambda x: classes[x]}, delimiter=) features = data[:, :-1].astype() targets = data[:, -1].astype().reshape((-1, 1)) data = ((, , features), (, , targets)) output_path = os.path.join(output_directory, output_filename) h5file = h5py.File(output_path, mode=) fill_hdf5_file(h5file, data) h5file[].dims[0].label = h5file[].dims[1].label = h5file[].dims[0].label = h5file[].dims[1].label = h5file.flush() h5file.close() return (output_path,)
Convert the Iris dataset to HDF5. Converts the Iris dataset to an HDF5 dataset compatible with :class:`fuel.datasets.Iris`. The converted dataset is saved as 'iris.hdf5'. This method assumes the existence of the file `iris.data`. Parameters ---------- directory : str Directory in which input files reside. output_directory : str Directory in which to save the converted dataset. output_filename : str, optional Name of the saved dataset. Defaults to `None`, in which case a name based on `dtype` will be used. Returns ------- output_paths : tuple of str Single-element tuple containing the path to the converted dataset.
387,953
def partial_ratio(s1, s2): s1, s2 = utils.make_type_consistent(s1, s2) if len(s1) <= len(s2): shorter = s1 longer = s2 else: shorter = s2 longer = s1 m = SequenceMatcher(None, shorter, longer) blocks = m.get_matching_blocks() scores = [] for block in blocks: long_start = block[1] - block[0] if (block[1] - block[0]) > 0 else 0 long_end = long_start + len(shorter) long_substr = longer[long_start:long_end] m2 = SequenceMatcher(None, shorter, long_substr) r = m2.ratio() if r > .995: return 100 else: scores.append(r) return utils.intr(100 * max(scores))
Return the ratio of the most similar substring as a number between 0 and 100.
387,954
def set_power_supplies(self, power_supplies): power_supply_id = 0 for power_supply in power_supplies: yield from self._hypervisor.send(.format(name=self._name, power_supply_id=power_supply_id, powered_on=power_supply)) log.info(.format(name=self._name, id=self._id, power_supply_id=power_supply_id, powered_on=power_supply)) power_supply_id += 1 self._power_supplies = power_supplies
Sets the 2 power supplies with 0 = off, 1 = on. :param power_supplies: list of 2 power supplies. Example: [1, 0] = first power supply is on, second is off.
387,955
def list_namespaces(self): response = self._perform_get( self._get_path(, None), None) return _MinidomXmlToObject.convert_response_to_feeds( response, _ServiceBusManagementXmlSerializer.xml_to_namespace)
List the service bus namespaces defined on the account.
387,956
def on_backward_end(self, **kwargs): "Clip the gradient before the optimizer step." if self.clip: nn.utils.clip_grad_norm_(self.learn.model.parameters(), self.clip)
Clip the gradient before the optimizer step.
387,957
def getport(self, default=None): port = self.port if port: return int(port) else: return default
Return the port subcomponent of the URI authority as an :class:`int`, or `default` if the original URI reference did not contain a port or if the port was empty.
387,958
def entity_data(self, entity_type, entity_id, history_index): return self.entity_history(entity_type, entity_id)[history_index]
Return the data dict for an entity at a specific index of its history.
387,959
def add_petabencana_layer(self): from safe.gui.tools.peta_bencana_dialog import PetaBencanaDialog dialog = PetaBencanaDialog(self.iface.mainWindow(), self.iface) dialog.show()
Add petabencana layer to the map. This uses the PetaBencana API to fetch the latest floods in JK. See https://data.petabencana.id/floods
387,960
def get_for_site(cls, site): instance, created = cls.objects.get_or_create(site=site) return instance
Return the 'main menu' instance for the provided site
387,961
def auto_forward(auto=True): global __auto_forward_state prev = __auto_forward_state __auto_forward_state = auto yield __auto_forward_state = prev
Context for dynamic graph execution mode. Args: auto (bool): Whether forward computation is executed during a computation graph construction. Returns: bool
387,962
def next(self): try: next = self.outbox.next() except StopIteration, excp: self.log.debug( % self) self.finished = True raise excp except (AttributeError, RuntimeError), excp: self.log.error( % self) raise PiperError( % self, excp) except IndexError, excp: self.log.error( % self) raise PiperError( % self, excp) except TimeoutError, excp: self.log.error( % \ (self, self.timeout)) next = PiperError(excp) if isinstance(next, WorkerError): self.log.error( % \ (self, type(next[0]), next[0], next[1], next[2])) if self.debug: raise PiperError( % \ (self, type(next[0]), next[0], next[1], next[2])) next = PiperError(next) elif isinstance(next, PiperError): if self.debug: raise next self.log.debug( % (self, next[0])) return next
Returns the next result. If no result is availble within the specified (during construction) "timeout" then a ``PiperError`` which wraps a ``TimeoutError`` is **returned**. If the result is a ``WorkerError`` it is also wrapped in a ``PiperError`` and is returned or raised if "debug" mode was specified at initialization. If the result is a ``PiperError`` it is propagated.
387,963
def create(dataset, target, feature=None, model = , l2_penalty=0.01, l1_penalty=0.0, solver=, feature_rescaling=True, convergence_threshold = _DEFAULT_SOLVER_OPTIONS[], step_size = _DEFAULT_SOLVER_OPTIONS[], lbfgs_memory_level = _DEFAULT_SOLVER_OPTIONS[], max_iterations = _DEFAULT_SOLVER_OPTIONS[], class_weights = None, validation_set = , verbose=True, seed=None, batch_size=64): start_time = _time.time() allowed_models = list(_pre_trained_models.MODELS.keys()) if _mac_ver() >= (10,14): allowed_models.append() } return ImageClassifier(state)
Create a :class:`ImageClassifier` model. Parameters ---------- dataset : SFrame Input data. The column named by the 'feature' parameter will be extracted for modeling. target : string, or int Name of the column containing the target variable. The values in this column must be of string or integer type. String target variables are automatically mapped to integers in the order in which they are provided. For example, a target variable with 'cat' and 'dog' as possible values is mapped to 0 and 1 respectively with 0 being the base class and 1 being the reference class. Use `model.classes` to retrieve the order in which the classes are mapped. feature : string, optional indicates that the SFrame has only column of Image type and that will Name of the column containing the input images. 'None' (the default) indicates the only image column in `dataset` should be used as the feature. l2_penalty : float, optional Weight on l2 regularization of the model. The larger this weight, the more the model coefficients shrink toward 0. This introduces bias into the model but decreases variance, potentially leading to better predictions. The default value is 0.01; setting this parameter to 0 corresponds to unregularized logistic regression. See the ridge regression reference for more detail. l1_penalty : float, optional Weight on l1 regularization of the model. Like the l2 penalty, the higher the l1 penalty, the more the estimated coefficients shrink toward 0. The l1 penalty, however, completely zeros out sufficiently small coefficients, automatically indicating features that are not useful for the model. The default weight of 0 prevents any features from being discarded. See the LASSO regression reference for more detail. solver : string, optional Name of the solver to be used to solve the regression. See the references for more detail on each solver. Available solvers are: - *auto (default)*: automatically chooses the best solver for the data and model parameters. - *newton*: Newton-Raphson - *lbfgs*: limited memory BFGS - *fista*: accelerated gradient descent For this model, the Newton-Raphson method is equivalent to the iteratively re-weighted least squares algorithm. If the l1_penalty is greater than 0, use the 'fista' solver. The model is trained using a carefully engineered collection of methods that are automatically picked based on the input data. The ``newton`` method works best for datasets with plenty of examples and few features (long datasets). Limited memory BFGS (``lbfgs``) is a robust solver for wide datasets (i.e datasets with many coefficients). ``fista`` is the default solver for l1-regularized linear regression. The solvers are all automatically tuned and the default options should function well. See the solver options guide for setting additional parameters for each of the solvers. See the user guide for additional details on how the solver is chosen. (see `here <https://apple.github.io/turicreate/docs/userguide/supervised-learning/linear-regression.html>`_) feature_rescaling : boolean, optional Feature rescaling is an important pre-processing step that ensures that all features are on the same scale. An l2-norm rescaling is performed to make sure that all features are of the same norm. Categorical features are also rescaled by rescaling the dummy variables that are used to represent them. The coefficients are returned in original scale of the problem. This process is particularly useful when features vary widely in their ranges. convergence_threshold : float, optional Convergence is tested using variation in the training objective. The variation in the training objective is calculated using the difference between the objective values between two steps. Consider reducing this below the default value (0.01) for a more accurately trained model. Beware of overfitting (i.e a model that works well only on the training data) if this parameter is set to a very low value. lbfgs_memory_level : float, optional The L-BFGS algorithm keeps track of gradient information from the previous ``lbfgs_memory_level`` iterations. The storage requirement for each of these gradients is the ``num_coefficients`` in the problem. Increasing the ``lbfgs_memory_level ``can help improve the quality of the model trained. Setting this to more than ``max_iterations`` has the same effect as setting it to ``max_iterations``. model : string optional Uses a pretrained model to bootstrap an image classifier: - "resnet-50" : Uses a pretrained resnet model. Exported Core ML model will be ~90M. - "squeezenet_v1.1" : Uses a pretrained squeezenet model. Exported Core ML model will be ~4.7M. - "VisionFeaturePrint_Scene": Uses an OS internal feature extractor. Only on available on iOS 12.0+, macOS 10.14+ and tvOS 12.0+. Exported Core ML model will be ~41K. Models are downloaded from the internet if not available locally. Once downloaded, the models are cached for future use. step_size : float, optional The starting step size to use for the ``fista`` solver. The default is set to 1.0, this is an aggressive setting. If the first iteration takes a considerable amount of time, reducing this parameter may speed up model training. class_weights : {dict, `auto`}, optional Weights the examples in the training data according to the given class weights. If set to `None`, all classes are supposed to have weight one. The `auto` mode set the class weight to be inversely proportional to number of examples in the training data with the given class. validation_set : SFrame, optional A dataset for monitoring the model's generalization performance. The format of this SFrame must be the same as the training set. By default this argument is set to 'auto' and a validation set is automatically sampled and used for progress printing. If validation_set is set to None, then no additional metrics are computed. The default value is 'auto'. max_iterations : int, optional The maximum number of allowed passes through the data. More passes over the data can result in a more accurately trained model. Consider increasing this (the default value is 10) if the training accuracy is low and the *Grad-Norm* in the display is large. verbose : bool, optional If True, prints progress updates and model details. seed : int, optional Seed for random number generation. Set this value to ensure that the same model is created every time. batch_size : int, optional If you are getting memory errors, try decreasing this value. If you have a powerful computer, increasing this value may improve performance. Returns ------- out : ImageClassifier A trained :class:`ImageClassifier` model. Examples -------- .. sourcecode:: python >>> model = turicreate.image_classifier.create(data, target='is_expensive') # Make predictions (in various forms) >>> predictions = model.predict(data) # predictions >>> predictions = model.classify(data) # predictions with confidence >>> predictions = model.predict_topk(data) # Top-5 predictions (multiclass) # Evaluate the model with ground truth data >>> results = model.evaluate(data) See Also -------- ImageClassifier
387,964
def force_leave(self, node): params = {"node": node} return self.request("force-leave", params=params, method="post").status_code
Force a failed gossip member into the left state. https://www.nomadproject.io/docs/http/agent-force-leave.html returns: 200 status code raises: - nomad.api.exceptions.BaseNomadException - nomad.api.exceptions.URLNotFoundNomadException
387,965
def feature_path(self, gff_path): if not gff_path: self.feature_dir = None self.feature_file = None else: if not op.exists(gff_path): raise OSError(.format(gff_path)) if not op.dirname(gff_path): self.feature_dir = else: self.feature_dir = op.dirname(gff_path) self.feature_file = op.basename(gff_path)
Load a GFF file with information on a single sequence and store features in the ``features`` attribute Args: gff_path: Path to GFF file.
387,966
def post(self, route: str(), callback: object()): self.__set_route(, {route: callback}) return RouteMapping
Binds a POST route with the given callback :rtype: object
387,967
def getWorksheet(self): worksheet = self.getBackReferences() if not worksheet: return None if len(worksheet) > 1: logger.error( "Analysis %s is assigned to more than one worksheet." % self.getId()) return worksheet[0]
Returns the Worksheet to which this analysis belongs to, or None
387,968
def get_frequency_dict(lang, wordlist=, match_cutoff=30): freqs = {} pack = get_frequency_list(lang, wordlist, match_cutoff) for index, bucket in enumerate(pack): freq = cB_to_freq(-index) for word in bucket: freqs[word] = freq return freqs
Get a word frequency list as a dictionary, mapping tokens to frequencies as floating-point probabilities.
387,969
def create(self, instance, parameters, existing=True): return self.service_instance.create(instance, parameters, existing)
Create an instance Args: instance (AtlasServiceInstance.Instance): Existing or New instance parameters (dict): Parameters for the instance Keyword Arguments: existing (bool): True (use an existing cluster), False (create a new cluster) Returns: ProvisionedServiceSpec: Status
387,970
def select_by_visible_text(self, text): xpath = .format(self._escape_string(text)) opts = self.find_elements_by_xpath(xpath) matched = False for opt in opts: self._set_selected(opt) if not self.is_multiple: return matched = True sub_string_without_space = self._get_longest_token(text) if sub_string_without_space == "": candidates = self.get_options() else: xpath = ".//option[contains(.,{0})]".format(self._escape_string(sub_string_without_space)) candidates = self.find_elements_by_xpath(xpath) for candidate in candidates: if text == candidate.text: self._set_selected(candidate) if not self.is_multiple: return matched = True if not matched: raise NoSuchElementException("Could not locate element with visible text: " + str(text))
Performs search of selected item from Web List @params text - string visible text
387,971
def get_under_hollow(self): C0 = self.B[-1:] * (3, 3, 1) ads_pos = C0.positions[4] C = self.get_subsurface_layer() * (3, 3, 1) ret = if np.any([np.linalg.norm(ads_pos[:2] - ele.position[:2]) < 0.5 * cradii[ele.number] for ele in C]): ret = return ret
Return HCP if an atom is present below the adsorbate in the subsurface layer and FCC if not
387,972
def cross_signal(s1, s2, continuous=0): def _convert(src, other): if isinstance(src, pd.DataFrame): return src.min(axis=1, skipna=0), src.max(axis=1, skipna=0) elif isinstance(src, pd.Series): return src, src elif isinstance(src, (int, float)): s = pd.Series(src, index=other.index) return s, s elif isinstance(src, (tuple, list)): l, u = min(src), max(src) assert l <= u, lower, upper = pd.Series(l, index=other.index), pd.Series(u, index=other.index) return lower, upper else: raise Exception( % type(src)) lower1, upper1 = _convert(s1, s2) lower2, upper2 = _convert(s2, s1) df = pd.DataFrame({: upper1, : lower1, : upper2, : lower2}) df.ffill(inplace=True) signal = pd.Series(np.nan, index=df.index) signal[df.upper1 > df.upper2] = 1 signal[df.lower1 < df.lower2] = -1 if continuous: signal = signal.fillna(method=) m1, m2 = df.upper1.first_valid_index(), df.upper2.first_valid_index() if m1 is not None or m2 is not None: m1 = m2 if m1 is None else m1 m2 = m1 if m2 is None else m2 fv = max(m1, m2) if np.isnan(signal[fv]): signal[fv] = 0 signal.ffill(inplace=1) else: signal[(df.upper1 < df.upper2) & (df.lower1 > df.lower2)] = 0 eq = (df.upper1 == df.upper2) if eq.any(): tmp = signal[eq] for i in tmp.index: loc = signal.index.get_loc(i) if loc != 0: u, l = df.upper2.iloc[loc], df.lower2.iloc[loc] ps = signal.iloc[loc - 1] if u == l or ps == 1.: signal[i] = ps else: signal[i] = 0 eq = (df.lower1 == df.lower2) if eq.any(): tmp = signal[eq] for i in tmp.index: loc = signal.index.get_loc(i) if loc != 0: u, l = df.upper2.iloc[loc], df.lower2.iloc[loc] ps = signal.iloc[loc - 1] if u == l or ps == -1.: signal[i] = ps else: signal[i] = 0 return signal
return a signal with the following 1 : when all values of s1 cross all values of s2 -1 : when all values of s2 cross below all values of s2 0 : if s1 < max(s2) and s1 > min(s2) np.nan : if s1 or s2 contains np.nan at position s1: Series, DataFrame, float, int, or tuple(float|int) s2: Series, DataFrame, float, int, or tuple(float|int) continous: bool, if true then once the signal starts it is always 1 or -1
387,973
def _plot_simple_fault(self, source, border=, border_width=1.0): trace_lons = np.array([pnt.longitude for pnt in source.fault_trace.points]) trace_lats = np.array([pnt.latitude for pnt in source.fault_trace.points]) surface_projection = _fault_polygon_from_mesh(source) x, y = self.m(surface_projection[:, 0], surface_projection[:, 1]) self.m.plot(x, y, border, linewidth=border_width) x, y = self.m(trace_lons, trace_lats) self.m.plot(x, y, border, linewidth=1.3 * border_width)
Plots the simple fault source as a composite of the fault trace and the surface projection of the fault. :param source: Fault source as instance of :class: mtkSimpleFaultSource :param str border: Line properties of border (see matplotlib documentation for detail) :param float border_width: Line width of border (see matplotlib documentation for detail)
387,974
def textFromHTML(html): cleaner = lxml.html.clean.Cleaner(scripts=True) cleaned = cleaner.clean_html(html) return lxml.html.fromstring(cleaned).text_content()
Cleans and parses text from the given HTML.
387,975
def get(self, key, value=None): "x.get(k[,d]) -> x[k] if k in x, else d. d defaults to None." _key = self._prepare_key(key) prefix, node = self._get_node_by_key(_key) if prefix==_key and node.value is not None: return self._unpickle_value(node.value) else: return value
x.get(k[,d]) -> x[k] if k in x, else d. d defaults to None.
387,976
def _gser(a,x): "Series representation of Gamma. NumRec sect 6.1." ITMAX=100 EPS=3.e-7 gln=lgamma(a) assert(x>=0), if x == 0 : return 0,gln ap = a delt = sum = 1./a for i in range(ITMAX): ap=ap+1. delt=delt*x/ap sum=sum+delt if abs(delt) < abs(sum)*EPS: break else: print() gamser=sum*np.exp(-x+a*np.log(x)-gln) return gamser,gln
Series representation of Gamma. NumRec sect 6.1.
387,977
def initialize(self, argv=None): super(BaseParallelApplication, self).initialize(argv) self.to_work_dir() self.reinit_logging()
initialize the app
387,978
def attach(self, lun_or_snap, skip_hlu_0=False): try: return self._attach_with_retry(lun_or_snap, skip_hlu_0) except ex.SystemAPINotSupported: raise except ex.UnityAttachExceedLimitError: raise except: self.detach(lun_or_snap) raise
Attaches lun, snap or member snap of cg snap to host. Don't pass cg snapshot in as `lun_or_snap`. :param lun_or_snap: the lun, snap, or a member snap of cg snap :param skip_hlu_0: whether to skip hlu 0 :return: the hlu number
387,979
def image_props(event): x, y = event.mouseevent.xdata, event.mouseevent.ydata i, j = _coords2index(event.artist, x, y) z = event.artist.get_array()[i,j] if z.size > 1: z = .join(.format(item) for item in z) return dict(z=z, i=i, j=j)
Get information for a pick event on an ``AxesImage`` artist. Returns a dict of "i" & "j" index values of the image for the point clicked, and "z": the (uninterpolated) value of the image at i,j. Parameters ----------- event : PickEvent The pick event to process Returns -------- props : dict A dict with keys: z, i, j
387,980
async def find( self, *, types=None, data=None, countries=None, post=False, strict=False, dnsbl=None, limit=0, **kwargs ): ip = await self._resolver.get_real_ext_ip() types = _update_types(types) if not types: raise ValueError() self._checker = Checker( judges=self._judges, timeout=self._timeout, verify_ssl=self._verify_ssl, max_tries=self._max_tries, real_ext_ip=ip, types=types, post=post, strict=strict, dnsbl=dnsbl, loop=self._loop, ) self._countries = countries self._limit = limit tasks = [asyncio.ensure_future(self._checker.check_judges())] if data: task = asyncio.ensure_future(self._load(data, check=True)) else: task = asyncio.ensure_future(self._grab(types, check=True)) tasks.append(task) self._all_tasks.extend(tasks)
Gather and check proxies from providers or from a passed data. :ref:`Example of usage <proxybroker-examples-find>`. :param list types: Types (protocols) that need to be check on support by proxy. Supported: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25 And levels of anonymity (HTTP only): Transparent, Anonymous, High :param data: (optional) String or list with proxies. Also can be a file-like object supports `read()` method. Used instead of providers :param list countries: (optional) List of ISO country codes where should be located proxies :param bool post: (optional) Flag indicating use POST instead of GET for requests when checking proxies :param bool strict: (optional) Flag indicating that anonymity levels of types (protocols) supported by a proxy must be equal to the requested types and levels of anonymity. By default, strict mode is off and for a successful check is enough to satisfy any one of the requested types :param list dnsbl: (optional) Spam databases for proxy checking. `Wiki <https://en.wikipedia.org/wiki/DNSBL>`_ :param int limit: (optional) The maximum number of proxies :raises ValueError: If :attr:`types` not given. .. versionchanged:: 0.2.0 Added: :attr:`post`, :attr:`strict`, :attr:`dnsbl`. Changed: :attr:`types` is required.
387,981
def whoami(ctx, opts): click.echo("Retrieving your authentication status from the API ... ", nl=False) context_msg = "Failed to retrieve your authentication status!" with handle_api_exceptions(ctx, opts=opts, context_msg=context_msg): with maybe_spinner(opts): is_auth, username, email, name = get_user_brief() click.secho("OK", fg="green") click.echo("You are authenticated as:") if not is_auth: click.secho("Nobody (i.e. anonymous user)", fg="yellow") else: click.secho( "%(name)s (slug: %(username)s, email: %(email)s)" % { "name": click.style(name, fg="cyan"), "username": click.style(username, fg="magenta"), "email": click.style(email, fg="green"), } )
Retrieve your current authentication status.
387,982
def apply_parallel(func: Callable, data: List[Any], cpu_cores: int = None) -> List[Any]: if not cpu_cores: cpu_cores = cpu_count() try: chunk_size = ceil(len(data) / cpu_cores) pool = Pool(cpu_cores) transformed_data = pool.map(func, chunked(data, chunk_size), chunksize=1) finally: pool.close() pool.join() return transformed_data
Apply function to list of elements. Automatically determines the chunk size.
387,983
def check_classes(self, scope=-1): for entry in self[scope].values(): if entry.class_ is None: syntax_error(entry.lineno, "Unknown identifier " % entry.name)
Check if pending identifiers are defined or not. If not, returns a syntax error. If no scope is given, the current one is checked.
387,984
def make_default_options_response(self): adapter = _request_ctx_stack.top.url_adapter if hasattr(adapter, ): methods = adapter.allowed_methods() else: methods = [] try: adapter.match(method=) except MethodNotAllowed as e: methods = e.valid_methods except HTTPException as e: pass rv = self.response_class() rv.allow.update(methods) return rv
This method is called to create the default `OPTIONS` response. This can be changed through subclassing to change the default behavior of `OPTIONS` responses. .. versionadded:: 0.7
387,985
def reindex_repo_dev_panel(self, project, repository): url = .format(projectKey=project, repositorySlug=repository) return self.post(url)
Reindex all of the Jira issues related to this repository, including branches and pull requests. This automatically happens as part of an upgrade, and calling this manually should only be required if something unforeseen happens and the index becomes out of sync. The authenticated user must have REPO_ADMIN permission for the specified repository to call this resource. :param project: :param repository: :return:
387,986
def get(self, name, **kwargs): name = self.prefix + name param = self._get_impl(name) if param is None: param = Parameter(name, **kwargs) self._params[name] = param else: for k, v in kwargs.items(): if hasattr(param, k) and getattr(param, k) is not None: existing = getattr(param, k) if k == and len(v) == len(existing): inferred_shape = [] matched = True for dim1, dim2 in zip(v, existing): if dim1 != dim2 and dim1 * dim2 != 0: matched = False break elif dim1 == dim2: inferred_shape.append(dim1) elif dim1 == 0: inferred_shape.append(dim2) else: inferred_shape.append(dim1) if matched: param._shape = tuple(inferred_shape) continue elif k == and np.dtype(v) == np.dtype(existing): continue assert v is None or v == existing, \ "Cannot retrieve Parameter because desired attribute " \ "does not match with stored for attribute : " \ "desired vs stored ."%( name, k, str(v), str(getattr(param, k))) else: setattr(param, k, v) return param
Retrieves a :py:class:`Parameter` with name ``self.prefix+name``. If not found, :py:func:`get` will first try to retrieve it from "shared" dict. If still not found, :py:func:`get` will create a new :py:class:`Parameter` with key-word arguments and insert it to self. Parameters ---------- name : str Name of the desired Parameter. It will be prepended with this dictionary's prefix. **kwargs : dict The rest of key-word arguments for the created :py:class:`Parameter`. Returns ------- Parameter The created or retrieved :py:class:`Parameter`.
387,987
def preprocess(S, coloring_method=None): if not isspmatrix_csr(S): raise TypeError() if S.shape[0] != S.shape[1]: raise ValueError( % (S.shape,)) N = S.shape[0] S = csr_matrix((np.ones(S.nnz, dtype=), S.indices, S.indptr), shape=(N, N)) T = S.T.tocsr() G = S + T G.data[:] = 1 weights = np.ravel(T.sum(axis=1)) if coloring_method is None: weights = weights + sp.rand(len(weights)) else: coloring = vertex_coloring(G, coloring_method) num_colors = coloring.max() + 1 weights = weights + (sp.rand(len(weights)) + coloring)/num_colors return (weights, G, S, T)
Preprocess splitting functions. Parameters ---------- S : csr_matrix Strength of connection matrix method : string Algorithm used to compute the vertex coloring: * 'MIS' - Maximal Independent Set * 'JP' - Jones-Plassmann (parallel) * 'LDF' - Largest-Degree-First (parallel) Returns ------- weights: ndarray Weights from a graph coloring of G S : csr_matrix Strength matrix with ones T : csr_matrix transpose of S G : csr_matrix union of S and T Notes ----- Performs the following operations: - Checks input strength of connection matrix S - Replaces S.data with ones - Creates T = S.T in CSR format - Creates G = S union T in CSR format - Creates random weights - Augments weights with graph coloring (if use_color == True)
387,988
def get_thread(self, thread_id, update_if_cached=True, raise_404=False): cached_thread = self._thread_cache.get(thread_id) if cached_thread: if update_if_cached: cached_thread.update() return cached_thread res = self._requests_session.get( self._url.thread_api_url( thread_id = thread_id ) ) if raise_404: res.raise_for_status() elif not res.ok: return None thread = Thread._from_request(self, res, thread_id) self._thread_cache[thread_id] = thread return thread
Get a thread from 4chan via 4chan API. Args: thread_id (int): Thread ID update_if_cached (bool): Whether the thread should be updated if it's already in our cache raise_404 (bool): Raise an Exception if thread has 404'd Returns: :class:`basc_py4chan.Thread`: Thread object
387,989
def smooth(polylines): for c in polylines: if len(c) < 9: continue x = c[:, 0] y = c[:, 1] t = np.arange(x.shape[0], dtype=float) t /= t[-1] x = UnivariateSpline(t, x)(t) y = UnivariateSpline(t, y)(t) c[:, 0] = x c[:, 1] = y
smooth every polyline using spline interpolation
387,990
def map_concepts_to_indicators( self, n: int = 1, min_temporal_res: Optional[str] = None ): for node in self.nodes(data=True): query_parts = [ "select Indicator from concept_to_indicator_mapping", f"where `Concept` like ", ] query = " ".join(query_parts) results = engine.execute(query) if min_temporal_res is not None: if min_temporal_res not in ["month"]: raise ValueError("min_temporal_res must be ") vars_with_required_temporal_resolution = [ r[0] for r in engine.execute( "select distinct `Variable` from indicator where " f"`{min_temporal_res.capitalize()}` is not null" ) ] results = [ r for r in results if r[0] in vars_with_required_temporal_resolution ] node[1]["indicators"] = { x: Indicator(x, "MITRE12") for x in [r[0] for r in take(n, results)] }
Map each concept node in the AnalysisGraph instance to one or more tangible quantities, known as 'indicators'. Args: n: Number of matches to keep min_temporal_res: Minimum temporal resolution that the indicators must have data for.
387,991
def send_stream_tail(self): with self.lock: if not self._socket or self._hup: logger.debug(u"Cannot send stream closing tag: already closed") return data = self._serializer.emit_tail() try: self._write(data.encode("utf-8")) except (IOError, SystemError, socket.error), err: logger.debug(u"Sending stream closing tag failed: {0}" .format(err)) self._serializer = None self._hup = True if self._tls_state is None: try: self._socket.shutdown(socket.SHUT_WR) except socket.error: pass self._set_state("closing") self._write_queue.clear() self._write_queue_cond.notify()
Send stream tail via the transport.
387,992
def pack(self, value=None): if value is None: self.update_header_length() return super().pack() elif isinstance(value, type(self)): return value.pack() else: msg = "{} is not an instance of {}".format(value, type(self).__name__) raise PackException(msg)
Pack the message into a binary data. One of the basic operations on a Message is the pack operation. During the packing process, we convert all message attributes to binary format. Since that this is usually used before sending the message to a switch, here we also call :meth:`update_header_length`. .. seealso:: This method call its parent's :meth:`GenericStruct.pack` after :meth:`update_header_length`. Returns: bytes: A binary data thats represents the Message. Raises: Exception: If there are validation errors.
387,993
def register_metrics(self, metrics_collector, interval): for field, metrics in self.metrics.items(): metrics_collector.register_metric(field, metrics, interval)
Registers its metrics to a given metrics collector with a given interval
387,994
def transform(self, Y): r check_is_fitted(self, ) n_samples_x, n_features = self.X_fit_.shape Y = numpy.asarray(Y) if Y.shape[1] != n_features: raise ValueError( % (n_features, Y.shape[1])) n_samples_y = Y.shape[0] mat = numpy.zeros((n_samples_y, n_samples_x), dtype=float) continuous_ordinal_kernel_with_ranges(Y[:, self._numeric_columns].astype(numpy.float64), self.X_fit_[:, self._numeric_columns].astype(numpy.float64), self._numeric_ranges, mat) if len(self._nominal_columns) > 0: _nominal_kernel(Y[:, self._nominal_columns], self.X_fit_[:, self._nominal_columns], mat) mat /= n_features return mat
r"""Compute all pairwise distances between `self.X_fit_` and `Y`. Parameters ---------- y : array-like, shape = (n_samples_y, n_features) Returns ------- kernel : ndarray, shape = (n_samples_y, n_samples_X_fit\_) Kernel matrix. Values are normalized to lie within [0, 1].
387,995
def run_recipe_timed(task, recipe, rinput): _logger.info() now1 = datetime.datetime.now() task.state = 1 task.time_start = now1 result = recipe(rinput) _logger.info(, result) task.result = result now2 = datetime.datetime.now() task.state = 2 task.time_end = now2 return task
Run the recipe and count the time it takes.
387,996
def __fade_in(self): self.__timer.stop() self.__vector = self.__fade_speed self.__timer.start()
Starts the Widget fade in.
387,997
def __replace_capall(sentence): if sentence is not None: while sentence.find() != -1: sentence = sentence.upper() sentence = sentence.replace(, , 1) if sentence.find() == -1: return sentence else: return sentence
here we replace all instances of #CAPALL and cap the entire sentence. Don't believe that CAPALL is buggy anymore as it forces all uppercase OK? :param _sentence:
387,998
def decorate_set_on_listener(prototype): def add_annotation(method): method._event_info = {} method._event_info[] = method.__name__ method._event_info[] = prototype return method return add_annotation
Private decorator for use in the editor. Allows the Editor to create listener methods. Args: params (str): The list of parameters for the listener method (es. "(self, new_value)")
387,999
def list_members(self, list_id=None, slug=None, owner_screen_name=None, owner_id=None): assert list_id or (slug and (owner_screen_name or owner_id)) url = params = {: -1} if list_id: params[] = list_id else: params[] = slug if owner_screen_name: params[] = owner_screen_name else: params[] = owner_id while params[] != 0: try: resp = self.get(url, params=params, allow_404=True) except requests.exceptions.HTTPError as e: if e.response.status_code == 404: log.error("no matching list") raise e users = resp.json() for user in users[]: yield user params[] = users[]
Returns the members of a list. List id or (slug and (owner_screen_name or owner_id)) are required