Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
19,400
def get_primary_contributors(self, permitted=True): primary_credits = [] credits = self.credits.exclude(role=None).order_by() if credits: primary_role = credits[0].role for credit in credits: if credit.role == primary_role: primary_credits.append(credit) contributors = [] for credit in primary_credits: contributor = credit.contributor if permitted and contributor.is_permitted: contributors.append(contributor) else: contributors.append(contributor) return contributors
Returns a list of primary contributors, with primary being defined as those contributors that have the highest role assigned(in terms of priority). When permitted is set to True only permitted contributors are returned.
19,401
def merge(move, output_dir, sources): jsons = [] for s in sources: filename = "%s.json" % os.path.split(s)[1] jsons += [os.path.join(s, filename)] reference_config = TinyDB(jsons[0]).table() for j in jsons[1:]: for i, j in zip(reference_config.all(), TinyDB(j).table().all()): assert i == j filename = "%s.json" % os.path.split(output_dir)[1] output_json = os.path.join(output_dir, filename) output_data = os.path.join(output_dir, ) os.makedirs(output_data) db = TinyDB(output_json) db.table().insert_multiple(reference_config.all()) for s in sources: filename = "%s.json" % os.path.split(s)[1] current_db = TinyDB(os.path.join(s, filename)) db.table().insert_multiple(current_db.table().all()) for s in sources: for r in glob.glob(os.path.join(s, )): basename = os.path.basename(r) if move: shutil.move(r, os.path.join(output_data, basename)) else: shutil.copytree(r, os.path.join(output_data, basename)) if move: for s in sources: shutil.rmtree(os.path.join(s, )) shutil.rmtree(os.path.join(s, "%s.json" % os.path.split(s)[1])) shutil.rmtree(s)
Merge multiple results folder into one, by copying the results over to a new folder. For a faster operation (which on the other hand destroys the campaign data if interrupted), the move option can be used to directly move results to the new folder.
19,402
def process_request(self, request): global _urlconf_pages page_list = list( Page.objects.exclude(glitter_app_name=).values_list(, ).order_by() ) with _urlconf_lock: if page_list != _urlconf_pages: glitter_urls = if glitter_urls in sys.modules: importlib.reload(sys.modules[glitter_urls]) _urlconf_pages = page_list
Reloads glitter URL patterns if page URLs change. Avoids having to restart the server to recreate the glitter URLs being used by Django.
19,403
def popitem(self, dict_name, priority_min=, priority_max=): if self._session_lock_identifier is None: raise ProgrammerError() conn = redis.Redis(connection_pool=self.pool) script = conn.register_script() dict_name = self._namespace(dict_name) key_value = script(keys=[self._lock_name, dict_name, dict_name + "keys"], args=[self._session_lock_identifier, priority_min, priority_max]) if key_value == -1: raise KeyError( % dict_name) if key_value == []: return None return self._decode(key_value[0]), self._decode(key_value[1])
Select an item and remove it. The item comes from `dict_name`, and has the lowest score at least `priority_min` and at most `priority_max`. If some item is found, remove it from `dict_name` and return it. This runs as a single atomic operation but still requires a session lock. :param str dict_name: source dictionary :param float priority_min: lowest score :param float priority_max: highest score :return: pair of (key, value) if an item was popped, or :const:`None`
19,404
def get (self, feature): if type(feature) == type([]): feature = feature[0] if not isinstance(feature, b2.build.feature.Feature): feature = b2.build.feature.get(feature) assert isinstance(feature, b2.build.feature.Feature) if self.feature_map_ is None: self.feature_map_ = {} for v in self.all_: if v.feature not in self.feature_map_: self.feature_map_[v.feature] = [] self.feature_map_[v.feature].append(v.value) return self.feature_map_.get(feature, [])
Returns all values of 'feature'.
19,405
def get_stp_mst_detail_output_msti_port_interface_id(self, **kwargs): config = ET.Element("config") get_stp_mst_detail = ET.Element("get_stp_mst_detail") config = get_stp_mst_detail output = ET.SubElement(get_stp_mst_detail, "output") msti = ET.SubElement(output, "msti") instance_id_key = ET.SubElement(msti, "instance-id") instance_id_key.text = kwargs.pop() port = ET.SubElement(msti, "port") interface_id = ET.SubElement(port, "interface-id") interface_id.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
19,406
def format_signed(feature, formatter=None, **kwargs ): txt = if feature[] > 0 else name = feature[] if formatter is not None: name = formatter(name, **kwargs) return .format(txt, name)
Format unhashed feature with sign. >>> format_signed({'name': 'foo', 'sign': 1}) 'foo' >>> format_signed({'name': 'foo', 'sign': -1}) '(-)foo' >>> format_signed({'name': ' foo', 'sign': -1}, lambda x: '"{}"'.format(x)) '(-)" foo"'
19,407
def _releaseModifiers(self, modifiers, globally=False): modifiers.reverse() modFlags = self._pressModifiers(modifiers, pressed=False, globally=globally) return modFlags
Release given modifiers (provided in list form). Parameters: modifiers list Returns: None
19,408
def default_config(): kernel:Linux if salt.utils.systemd.booted(__context__) \ and salt.utils.systemd.version(__context__) >= 207: return return
Linux hosts using systemd 207 or later ignore ``/etc/sysctl.conf`` and only load from ``/etc/sysctl.d/*.conf``. This function will do the proper checks and return a default config file which will be valid for the Minion. Hosts running systemd >= 207 will use ``/etc/sysctl.d/99-salt.conf``. CLI Example: .. code-block:: bash salt -G 'kernel:Linux' sysctl.default_config
19,409
def post(self): alllastfirstformat data = request.get_json() options, sql_raw = data.get(), data.get() if options == : sql_formmated = sqlparse.format(sql_raw, keyword_case=, reindent=True) return build_response(dict(data=sql_formmated, code=200)) elif options in (, ): conn = SQL(config.sql_host, config.sql_port, config.sql_user, config.sql_pwd, config.sql_db) result = conn.run(sql_raw) return build_response(dict(data=result, code=200)) else: pass pass
return executed sql result to client. post data format: {"options": ['all', 'last', 'first', 'format'], "sql_raw": "raw sql ..."} Returns: sql result.
19,410
def exclude_samples(in_file, out_file, to_exclude, ref_file, config, filters=None): include, exclude = _get_exclude_samples(in_file, to_exclude) if len(exclude) == 0: out_file = in_file elif not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: bcftools = config_utils.get_program("bcftools", config) output_type = "z" if out_file.endswith(".gz") else "v" include_str = ",".join(include) filter_str = "-f %s" % filters if filters is not None else "" cmd = "{bcftools} view -O {output_type} -s {include_str} {filter_str} {in_file} > {tx_out_file}" do.run(cmd.format(**locals()), "Exclude samples: {}".format(to_exclude)) return out_file
Exclude specific samples from an input VCF file.
19,411
def ocr(img, mrz_mode=True, extra_cmdline_params=): input_file_name = % _tempnam() output_file_name_base = % _tempnam() output_file_name = "%s.txt" % output_file_name_base try: if str(img.dtype).startswith() and np.nanmin(img) >= 0 and np.nanmax(img) <= 1: img = img.astype(np.float64) * (np.power(2.0, 8) - 1) + 0.499999999 img = img.astype(np.uint8) imwrite(input_file_name, img) if mrz_mode: config = ("--psm 6 -c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789><" " -c load_system_dawg=F -c load_freq_dawg=F {}").format(extra_cmdline_params) else: config = "{}".format(extra_cmdline_params) pytesseract.run_tesseract(input_file_name, output_file_name_base, , lang=None, config=config) if sys.version_info.major == 3: f = open(output_file_name, encoding=) else: f = open(output_file_name) try: return f.read().strip() finally: f.close() finally: pytesseract.cleanup(input_file_name) pytesseract.cleanup(output_file_name)
Runs Tesseract on a given image. Writes an intermediate tempfile and then runs the tesseract command on the image. This is a simplified modification of image_to_string from PyTesseract, which is adapted to SKImage rather than PIL. In principle we could have reimplemented it just as well - there are some apparent bugs in PyTesseract, but it works so far :) :param mrz_mode: when this is True (default) the tesseract is configured to recognize MRZs rather than arbitrary texts. When False, no specific configuration parameters are passed (and you are free to provide your own via `extra_cmdline_params`) :param extra_cmdline_params: extra parameters passed to tesseract. When mrz_mode=True, these are appended to whatever is the "best known" configuration at the moment. "--oem 0" is the parameter you might want to pass. This selects the Tesseract's "legacy" OCR engine, which often seems to work better than the new LSTM-based one.
19,412
def timer(logger=None, level=logging.INFO, fmt="function %(function_name)s execution time: %(execution_time).3f", *func_or_func_args, **timer_kwargs): } if logger: logger.log( level, fmt % context, extra=context) else: print(fmt % context) return out return wrapped if (len(func_or_func_args) == 1 and isinstance(func_or_func_args[0], collections.Callable)): return wrapped_f(func_or_func_args[0]) else: return wrapped_f
Function decorator displaying the function execution time All kwargs are the arguments taken by the Timer class constructor.
19,413
def write_id (self): self.writeln(u"<tr>") self.writeln(u % self.part("id")) self.write(u"<td>%d</td></tr>" % self.stats.number)
Write ID for current URL.
19,414
def load(json_src, save=False): if isinstance(json_src, six.string_types): json_src = json_lib.loads(json_src) if isinstance(json_src, list): return [getattr(objects, obj[]).load(obj, save) for obj in json_src] return getattr(objects, json_src[]).load(json_src, save)
Load any json serialized cinderlib object.
19,415
def _reduce_logsumexp(input_tensor, axis=None, keepdims=False, name=None): try: return scipy_special.logsumexp( input_tensor, axis=_astuple(axis), keepdims=keepdims) except NotImplementedError: m = _max_mask_non_finite(input_tensor, axis=axis, keepdims=True) y = input_tensor - m y = np.exp(y, out=y) return m + np.log(np.sum(y, axis=_astuple(axis), keepdims=keepdims))
Computes `log(sum(exp(input_tensor))) along the specified axis.
19,416
def comic_archive_compress(args): try: filename, old_format, settings, nag_about_gifs = args Settings.update(settings) tmp_dir = _get_archive_tmp_dir(filename) new_filename = files.replace_ext(filename, _NEW_ARCHIVE_SUFFIX) _comic_archive_write_zipfile(new_filename, tmp_dir) if os.path.isdir(tmp_dir): if Settings.verbose: print(, end=) shutil.rmtree(tmp_dir) if Settings.verbose: print() report_stats = files.cleanup_after_optimize( filename, new_filename, old_format, _CBZ_FORMAT) report_stats.nag_about_gifs = nag_about_gifs stats.report_saved(report_stats) return report_stats except Exception as exc: print(exc) traceback.print_exc(exc) raise exc
Called back by every optimization inside a comic archive. When they're all done it creates the new archive and cleans up.
19,417
def remove_defaults(self, *args): for arg in args: if arg not in self.defaults: raise KeyError(.format(arg)) return self.discard_defaults(*args)
cplan.remove_defaults(a, b...) yields a new caclulation plan identical to cplan except without default values for any of the given parameter names. An exception is raised if any default value given is not found in cplan.
19,418
def _parse_http_header(h): values = [] if not in h: for value in h.split(): parts = value.split() values.append((parts[0].strip(), {})) for attr in parts[1:]: name, value = attr.split(, 1) values[-1][1][name.strip()] = value.strip() else: lop, key, attrs = , None, {} for quoted, plain, tok in _hsplit(h): value = plain.strip() if plain else quoted.replace(, ) if lop == : attrs = {} values.append((value, attrs)) elif lop == : if tok == : key = value else: attrs[value] = elif lop == and key: attrs[key] = value key = None lop = tok return values
Parses a typical multi-valued and parametrised HTTP header (e.g. Accept headers) and returns a list of values and parameters. For non-standard or broken input, this implementation may return partial results. :param h: A header string (e.g. ``text/html,text/plain;q=0.9,*/*;q=0.8``) :return: List of (value, params) tuples. The second element is a (possibly empty) dict.
19,419
def convert_float_to_two_registers(floatValue): myList = list() s = bytearray(struct.pack(, floatValue) ) myList.append(s[0] | (s[1]<<8)) myList.append(s[2] | (s[3]<<8)) return myList
Convert 32 Bit real Value to two 16 Bit Value to send as Modbus Registers floatValue: Value to be converted return: 16 Bit Register values int[]
19,420
def rollup(self, freq, **kwargs): return self.ret_rels().resample(freq, **kwargs).prod() - 1.0
Downsample `self` through geometric linking. Parameters ---------- freq : {'D', 'W', 'M', 'Q', 'A'} The frequency of the result. **kwargs Passed to `self.resample()`. Returns ------- TSeries Example ------- # Derive quarterly returns from monthly returns. >>> import numpy as np >>> from pyfinance import TSeries >>> np.random.seed(444) >>> ts = TSeries(np.random.randn(12) / 100 + 0.002, ... index=pd.date_range('2016', periods=12, freq='M')) >>> ts.rollup('Q') 2016-03-31 0.0274 2016-06-30 -0.0032 2016-09-30 -0.0028 2016-12-31 0.0127 Freq: Q-DEC, dtype: float64
19,421
def get_task_ops(task_type=TaskType.ALG_CTRL): try: return LearnToExecuteState.TASK_TYPE_OPS[task_type] except KeyError: raise KeyError("Bad task_type , check config." % task_type)
Returns an operations list based on the specified task index. Args: task_type: indicates the task type used. Returns: List of the eligible ops.
19,422
def player_stats(game_id): box_score = mlbgame.data.get_box_score(game_id) box_score_tree = etree.parse(box_score).getroot() pitching = box_score_tree.findall() batting = box_score_tree.findall() pitching_info = __player_stats_info(pitching, ) batting_info = __player_stats_info(batting, ) try: raw_box_score = mlbgame.data.get_raw_box_score(game_id) raw_box_score_tree = etree.parse(raw_box_score).getroot() additional_stats = __raw_player_stats_info(raw_box_score_tree) addl_home_pitching = additional_stats[0][] addl_home_batting = additional_stats[0][] addl_away_pitching = additional_stats[1][] addl_away_batting = additional_stats[1][] output = { : pitching_info[0], : pitching_info[1], : batting_info[0], : batting_info[1], : addl_home_pitching, : addl_away_pitching, : addl_home_batting, : addl_away_batting } except etree.XMLSyntaxError: output = { : pitching_info[0], : pitching_info[1], : batting_info[0], : batting_info[1], } return output
Return dictionary of individual stats of a game with matching id. The additional pitching/batting is mostly the same stats, except it contains some useful stats such as groundouts/flyouts per pitcher (go/ao). MLB decided to have two box score files, thus we return the data from both.
19,423
def load_from_stream(self, group): self._unpack_attrs(group.atts) self.name = group.name for dim in group.dims: new_dim = Dimension(self, dim.name) self.dimensions[dim.name] = new_dim new_dim.load_from_stream(dim) for var in group.vars: new_var = Variable(self, var.name) self.variables[var.name] = new_var new_var.load_from_stream(var) for grp in group.groups: new_group = Group(self) self.groups[grp.name] = new_group new_group.load_from_stream(grp) for struct in group.structs: new_var = Variable(self, struct.name) self.variables[struct.name] = new_var new_var.load_from_stream(struct) if group.enumTypes: for en in group.enumTypes: self.types[en.name] = enum.Enum(en.name, [(typ.value, typ.code) for typ in en.map])
Load a Group from an NCStream object.
19,424
def require(self, entity_type, attribute_name=None): if not attribute_name: attribute_name = entity_type self.requires += [(entity_type, attribute_name)] return self
The intent parser should require an entity of the provided type. Args: entity_type(str): an entity type attribute_name(str): the name of the attribute on the parsed intent. Defaults to match entity_type. Returns: self: to continue modifications.
19,425
def rpc_get_all_names( self, offset, count, **con_info ): if not check_offset(offset): return {: , : 400} if not check_count(count, 100): return {: , : 400} db = get_db_state(self.working_dir) num_domains = db.get_num_names() if num_domains > offset: all_domains = db.get_all_names( offset=offset, count=count ) else: all_domains = [] db.close() return self.success_response( {: all_domains} )
Get all unexpired names, paginated Return {'status': true, 'names': [...]} on success Return {'error': ...} on error
19,426
def _add_unitary_two(self, gate, qubit0, qubit1): gate_tensor = np.reshape(np.array(gate, dtype=complex), 4 * [2]) indexes = einsum_matmul_index([qubit0, qubit1], self._number_of_qubits) self._unitary = np.einsum(indexes, gate_tensor, self._unitary, dtype=complex, casting=)
Apply a two-qubit unitary matrix. Args: gate (matrix_like): a the two-qubit gate matrix qubit0 (int): gate qubit-0 qubit1 (int): gate qubit-1
19,427
def output_influx(data): for contract in data: yesterday_data = data[contract][] del data[contract][] out = "pyhydroquebec,contract=" + contract + " " for index, key in enumerate(data[contract]): if index != 0: out = out + "," if key in ("annual_date_start", "annual_date_end"): out += key + "=\"" + str(data[contract][key]) + "\"" else: out += key + "=" + str(data[contract][key]) out += " " + str(int(datetime.datetime.now(HQ_TIMEZONE).timestamp() * 1000000000)) print(out) yesterday = datetime.datetime.now(HQ_TIMEZONE) - datetime.timedelta(days=1) yesterday = yesterday.replace(minute=0, hour=0, second=0, microsecond=0) for hour in yesterday_data: msg = "pyhydroquebec,contract={} {} {}" data = ",".join(["{}={}".format(key, value) for key, value in hour.items() if key != ]) datatime = datetime.datetime.strptime(hour[], ) yesterday = yesterday.replace(hour=datatime.hour) yesterday_str = str(int(yesterday.timestamp() * 1000000000)) print(msg.format(contract, data, yesterday_str))
Print data using influxDB format.
19,428
def getSubjectObjectsByPredicate(self, predicate): return sorted( set( [ (str(s), str(o)) for s, o in self.subject_objects(rdflib.term.URIRef(predicate)) ] ) )
Args: predicate : str Predicate for which to return subject, object tuples. Returns: list of subject, object tuples: All subject/objects with ``predicate``. Notes: Equivalent SPARQL: .. highlight: sql :: SELECT DISTINCT ?s ?o WHERE {{ ?s {0} ?o . }}
19,429
def to_nnf(self): node = self.node.to_nnf() if node is self.node: return self else: return _expr(node)
Return an equivalent expression is negation normal form.
19,430
def fit( self, df, duration_col, event_col=None, ancillary_df=None, show_progress=False, timeline=None, weights_col=None, robust=False, initial_point=None, entry_col=None, ): self.duration_col = duration_col self._time_cols = [duration_col] self._censoring_type = CensoringType.RIGHT df = df.copy() T = pass_for_numeric_dtypes_or_raise_array(df.pop(duration_col)).astype(float) self.durations = T.copy() self._fit( self._log_likelihood_right_censoring, df, (T.values, None), event_col=event_col, ancillary_df=ancillary_df, show_progress=show_progress, timeline=timeline, weights_col=weights_col, robust=robust, initial_point=initial_point, entry_col=entry_col, ) return self
Fit the accelerated failure time model to a right-censored dataset. Parameters ---------- df: DataFrame a Pandas DataFrame with necessary columns `duration_col` and `event_col` (see below), covariates columns, and special columns (weights). `duration_col` refers to the lifetimes of the subjects. `event_col` refers to whether the 'death' events was observed: 1 if observed, 0 else (censored). duration_col: string the name of the column in DataFrame that contains the subjects' lifetimes. event_col: string, optional the name of the column in DataFrame that contains the subjects' death observation. If left as None, assume all individuals are uncensored. show_progress: boolean, optional (default=False) since the fitter is iterative, show convergence diagnostics. Useful if convergence is failing. ancillary_df: None, boolean, or DataFrame, optional (default=None) Choose to model the ancillary parameters. If None or False, explicitly do not fit the ancillary parameters using any covariates. If True, model the ancillary parameters with the same covariates as ``df``. If DataFrame, provide covariates to model the ancillary parameters. Must be the same row count as ``df``. timeline: array, optional Specify a timeline that will be used for plotting and prediction weights_col: string the column in DataFrame that specifies weights per observation. robust: boolean, optional (default=False) Compute the robust errors using the Huber sandwich estimator. initial_point: (d,) numpy array, optional initialize the starting point of the iterative algorithm. Default is the zero vector. entry_col: specify a column in the DataFrame that denotes any late-entries (left truncation) that occurred. See the docs on `left truncation <https://lifelines.readthedocs.io/en/latest/Survival%20analysis%20with%20lifelines.html#left-truncated-late-entry-data>`__ Returns ------- self: self with additional new properties: ``print_summary``, ``params_``, ``confidence_intervals_`` and more Examples -------- >>> from lifelines import WeibullAFTFitter, LogNormalAFTFitter, LogLogisticAFTFitter >>> >>> df = pd.DataFrame({ >>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7], >>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0], >>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2], >>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7], >>> }) >>> >>> aft = WeibullAFTFitter() >>> aft.fit(df, 'T', 'E') >>> aft.print_summary() >>> aft.predict_median(df) >>> >>> aft = WeibullAFTFitter() >>> aft.fit(df, 'T', 'E', ancillary_df=df) >>> aft.print_summary() >>> aft.predict_median(df)
19,431
def _validate_inputs(self, inputdict): try: parameters = inputdict.pop(self.get_linkname()) except KeyError: raise InputValidationError("No parameters specified for this " "calculation") if not isinstance(parameters, ParameterData): raise InputValidationError("parameters not of type " "ParameterData") try: code = inputdict.pop(self.get_linkname()) except KeyError: raise InputValidationError("No code specified for this " "calculation") try: structure = inputdict.pop(self.get_linkname()) if not isinstance(structure, SinglefileData): raise InputValidationError( "structure not of type SinglefileData") except KeyError: raise InputValidationError( "No input structure specified for calculation") try: surface_sample = inputdict.pop(self.get_linkname()) if not isinstance(surface_sample, SinglefileData): raise InputValidationError( "surface_sample not of type SinglefileData") except KeyError: raise InputValidationError( "No surface sample specified for calculation") if inputdict: raise ValidationError("Unrecognized inputs: {}".format(inputdict)) return parameters, code, structure, surface_sample
Validate input links.
19,432
def matchToString(aaMatch, read1, read2, indent=, offsets=None): match = aaMatch[] matchCount = match[] gapMismatchCount = match[] gapGapMismatchCount = match[] nonGapMismatchCount = match[] if offsets: len1 = len2 = len(offsets) else: len1, len2 = map(len, (read1, read2)) result = [] append = result.append append(countPrint( % indent, matchCount, len1, len2)) mismatchCount = (gapMismatchCount + gapGapMismatchCount + nonGapMismatchCount) append(countPrint( % indent, mismatchCount, len1, len2)) append(countPrint( % (indent), nonGapMismatchCount, len1, len2)) append(countPrint( % indent, gapMismatchCount, len1, len2)) append(countPrint( % indent, gapGapMismatchCount, len1, len2)) for read, key in zip((read1, read2), (, )): append( % (indent, read.id)) length = len(read) append( % (indent, length)) gapCount = len(aaMatch[key][]) append(countPrint( % indent, gapCount, length)) if gapCount: append( % (indent, .join(map(lambda offset: str(offset + 1), sorted(aaMatch[key][]))))) extraCount = aaMatch[key][] if extraCount: append(countPrint( % indent, extraCount, length)) return .join(result)
Format amino acid sequence match as a string. @param aaMatch: A C{dict} returned by C{compareAaReads}. @param read1: A C{Read} instance or an instance of one of its subclasses. @param read2: A C{Read} instance or an instance of one of its subclasses. @param indent: A C{str} to indent all returned lines with. @param offsets: If not C{None}, a C{set} of offsets of interest that were only considered when making C{match}. @return: A C{str} describing the match.
19,433
def get_parameter(name, withdecryption=False, resp_json=False, region=None, key=None, keyid=None, profile=None): conn = __utils__[](, region=region, key=key, keyid=keyid, profile=profile) try: resp = conn.get_parameter(Name=name, WithDecryption=withdecryption) except conn.exceptions.ParameterNotFound: log.warning("get_parameter: Unable to locate name: %s", name) return False if resp_json: return json.loads(resp[][]) else: return resp[][]
Retrives a parameter from SSM Parameter Store .. versionadded:: Neon .. code-block:: text salt-call boto_ssm.get_parameter test-param withdescription=True
19,434
def eigh_robust(a, b=None, eigvals=None, eigvals_only=False, overwrite_a=False, overwrite_b=False, turbo=True, check_finite=True): kwargs = dict(eigvals=eigvals, eigvals_only=eigvals_only, turbo=turbo, check_finite=check_finite, overwrite_a=overwrite_a, overwrite_b=overwrite_b) if b is None: return linalg.eigh(a, **kwargs) kwargs_b = dict(turbo=turbo, check_finite=check_finite, overwrite_a=overwrite_b) S, U = linalg.eigh(b, **kwargs_b) S[S <= 0] = np.inf Sinv = 1. / np.sqrt(S) W = Sinv[:, None] * np.dot(U.T, np.dot(a, U)) * Sinv output = linalg.eigh(W, **kwargs) if eigvals_only: return output else: evals, evecs = output return evals, np.dot(U, Sinv[:, None] * evecs)
Robustly solve the Hermitian generalized eigenvalue problem This function robustly solves the Hermetian generalized eigenvalue problem ``A v = lambda B v`` in the case that B is not strictly positive definite. When B is strictly positive-definite, the result is equivalent to scipy.linalg.eigh() within floating-point accuracy. Parameters ---------- a : (M, M) array_like A complex Hermitian or real symmetric matrix whose eigenvalues and eigenvectors will be computed. b : (M, M) array_like, optional A complex Hermitian or real symmetric matrix. If omitted, identity matrix is assumed. eigvals : tuple (lo, hi), optional Indexes of the smallest and largest (in ascending order) eigenvalues and corresponding eigenvectors to be returned: 0 <= lo <= hi <= M-1. If omitted, all eigenvalues and eigenvectors are returned. eigvals_only : bool, optional Whether to calculate only eigenvalues and no eigenvectors. (Default: both are calculated) turbo : bool, optional Use divide and conquer algorithm (faster but expensive in memory, only for generalized eigenvalue problem and if eigvals=None) overwrite_a : bool, optional Whether to overwrite data in `a` (may improve performance) overwrite_b : bool, optional Whether to overwrite data in `b` (may improve performance) check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- w : (N,) float ndarray The N (1<=N<=M) selected eigenvalues, in ascending order, each repeated according to its multiplicity. v : (M, N) complex ndarray (if eigvals_only == False)
19,435
def get_tile_info(tile, time, aws_index=None, all_tiles=False): start_date, end_date = parse_time_interval(time) candidates = [] for tile_info in search_iter(start_date=start_date, end_date=end_date): path_props = tile_info[][].split() this_tile = .join(path_props[1: 4]) this_aws_index = int(path_props[-1]) if this_tile == tile.lstrip() and (aws_index is None or aws_index == this_aws_index): candidates.append(tile_info) if not candidates: raise TileMissingException if len(candidates) > 1: LOGGER.info(, len(candidates), tile, time) if all_tiles: return candidates return candidates[0]
Get basic information about image tile :param tile: tile name (e.g. ``'T10UEV'``) :type tile: str :param time: A single date or a time interval, times have to be in ISO 8601 string :type time: str or (str, str) :param aws_index: index of tile on AWS :type aws_index: int or None :param all_tiles: If ``True`` it will return list of all tiles otherwise only the first one :type all_tiles: bool :return: dictionary with info provided by Opensearch REST service or None if such tile does not exist on AWS. :rtype: dict or None
19,436
def authenticated_user(self, auth): response = self.get("/user", auth=auth) return GogsUser.from_json(response.json())
Returns the user authenticated by ``auth`` :param auth.Authentication auth: authentication for user to retrieve :return: user authenticated by the provided authentication :rtype: GogsUser :raises NetworkFailure: if there is an error communicating with the server :raises ApiFailure: if the request cannot be serviced
19,437
def _add_raster_layer(self, raster_layer, layer_name, save_style=False): if not self.is_writable(): return False, output = QFileInfo(self.uri.filePath(layer_name + )) source = QFileInfo(raster_layer.source()) if source.exists() and source.suffix() in [, ]: renderer = raster_layer.renderer() provider = raster_layer.dataProvider() crs = raster_layer.crs() pipe = QgsRasterPipe() pipe.set(provider.clone()) pipe.set(renderer.clone()) file_writer = QgsRasterFileWriter(output.absoluteFilePath()) file_writer.Mode(1) file_writer.writeRaster( pipe, provider.xSize(), provider.ySize(), provider.extent(), crs) del file_writer if save_style: style_path = QFileInfo(self.uri.filePath(layer_name + )) raster_layer.saveNamedStyle(style_path.absoluteFilePath()) assert output.exists() return True, output.baseName()
Add a raster layer to the folder. :param raster_layer: The layer to add. :type raster_layer: QgsRasterLayer :param layer_name: The name of the layer in the datastore. :type layer_name: str :param save_style: If we have to save a QML too. Default to False. :type save_style: bool :returns: A two-tuple. The first element will be True if we could add the layer to the datastore. The second element will be the layer name which has been used or the error message. :rtype: (bool, str) .. versionadded:: 4.0
19,438
def show_args(): * mapping = {: {}, : {}} for flag, arg in option_toggles.items(): mapping[][flag] = arg for option, arg in option_flags.items(): mapping[][option] = arg return mapping
Show which arguments map to which flags and options. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' logadm.show_args
19,439
def update(self, instance, oldValue, newValue): self.__set__(instance, self.__get__(instance, None) + newValue - (oldValue or 0))
Updates the aggregate based on a change in the child value.
19,440
async def create( cls, node: Union[Node, str], cache_device: Union[BlockDevice, Partition]): params = {} if isinstance(node, str): params[] = node elif isinstance(node, Node): params[] = node.system_id else: raise TypeError( % ( type(node).__name__)) if isinstance(cache_device, BlockDevice): params[] = cache_device.id elif isinstance(cache_device, Partition): params[] = cache_device.id else: raise TypeError( % ( type(cache_device).__name__)) return cls._object(await cls._handler.create(**params))
Create a BcacheCacheSet on a Node. :param node: Node to create the interface on. :type node: `Node` or `str` :param cache_device: Block device or partition to create the cache set on. :type cache_device: `BlockDevice` or `Partition`
19,441
def _set_history_control_entry(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("history_control_index",history_control_entry.history_control_entry, yang_name="history-control-entry", rest_name="history", parent=self, is_container=, user_ordered=False, path_helper=self._path_helper, yang_keys=, extensions={u: {u: u, u: None, u: None, u: None, u: None, u: u, u: None, u: None, u: u}}), is_container=, yang_name="history-control-entry", rest_name="history", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None, u: None, u: None, u: None, u: u, u: None, u: None, u: u}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "list", : , }) self.__history_control_entry = t if hasattr(self, ): self._set()
Setter method for history_control_entry, mapped from YANG variable /interface/fortygigabitethernet/rmon/collection/history_control_entry (list) If this variable is read-only (config: false) in the source YANG file, then _set_history_control_entry is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_history_control_entry() directly.
19,442
def setData(self, index, value, role=Qt.EditRole): if role != Qt.CheckStateRole and role != Qt.EditRole: return False treeItem = self.getItem(index, altItem=self.invisibleRootItem) try: result = self.setItemData(treeItem, index.column(), value, role=role) if result: self.emitDataChanged(treeItem) self.sigItemChanged.emit(treeItem) return result except Exception as ex: logger.warn("Unable to set data: {}".format(ex)) if DEBUGGING: raise return False
Sets the role data for the item at index to value. Returns true if successful; otherwise returns false. The dataChanged and sigItemChanged signals will be emitted if the data was successfully set. Descendants should typically override setItemData function instead of setData()
19,443
def remove_prefix(self, prefix): if prefix not in self.__prefix_map: return ni = self.__lookup_prefix(prefix) ni.prefixes.discard(prefix) del self.__prefix_map[prefix] if ni.preferred_prefix == prefix: ni.preferred_prefix = next(iter(ni.prefixes), None)
Removes prefix from this set. This is a no-op if the prefix doesn't exist in it.
19,444
def otp(scope, password, seed, seqs): return [crypt.otp(password[0], seed[0], int(seq)) for seq in seqs]
Calculates a one-time password hash using the given password, seed, and sequence number and returns it. Uses the md4/sixword algorithm as supported by TACACS+ servers. :type password: string :param password: A password. :type seed: string :param seed: A username. :type seqs: int :param seqs: A sequence number, or a list of sequence numbers. :rtype: string :return: A hash, or a list of hashes.
19,445
def from_xml(xml): parsed = xml if not isinstance(xml, MARCXMLRecord): parsed = MARCXMLRecord(str(xml)) if "DEL" in parsed.datafields: raise DocumentNotFoundException("Document was deleted.") return EPeriodical( url=parsed.get_urls(), ISSN=parsed.get_ISSNs(), nazev=parsed.get_name(), anotace=None, podnazev=parsed.get_subname(), id_number=parsed.controlfields.get("001", None), datumVydani=parsed.get_pub_date(), mistoVydani=parsed.get_pub_place(), internal_url=parsed.get_internal_urls(), invalid_ISSNs=parsed.get_invalid_ISSNs(), nakladatelVydavatel=parsed.get_publisher(), ISSNSouboruPublikaci=parsed.get_linking_ISSNs(), )
Convert :class:`.MARCXMLRecord` object to :class:`.EPublication` namedtuple. Args: xml (str/MARCXMLRecord): MARC XML which will be converted to EPublication. In case of str, ``<record>`` tag is required. Returns: structure: :class:`.EPublication` namedtuple with data about \ publication.
19,446
def get_method_analysis(self, method): class_analysis = self.get_class_analysis(method.get_class_name()) if class_analysis: return class_analysis.get_method_analysis(method) return None
Returns the crossreferencing object for a given Method. Beware: the similar named function :meth:`~get_method()` will return a :class:`MethodAnalysis` object, while this function returns a :class:`MethodClassAnalysis` object! This Method will only work after a run of :meth:`~create_xref()` :param method: :class:`EncodedMethod` :return: :class:`MethodClassAnalysis` for the given method or None, if method was not found
19,447
def getInterval(self): items = ( ("", _(u"Not set")), ("1", _(u"daily")), ("7", _(u"weekly")), ("30", _(u"monthly")), ("90", _(u"quarterly")), ("180", _(u"biannually")), ("365", _(u"yearly")), ) return DisplayList(items)
Vocabulary of date intervals to calculate the "To" field date based from the "From" field date.
19,448
def startLogin(): flask.session["state"] = oic.oauth2.rndstr(SECRET_KEY_LENGTH) flask.session["nonce"] = oic.oauth2.rndstr(SECRET_KEY_LENGTH) args = { "client_id": app.oidcClient.client_id, "response_type": "code", "scope": ["openid", "profile"], "nonce": flask.session["nonce"], "redirect_uri": app.oidcClient.redirect_uris[0], "state": flask.session["state"] } result = app.oidcClient.do_authorization_request( request_args=args, state=flask.session["state"]) return flask.redirect(result.url)
If we are not logged in, this generates the redirect URL to the OIDC provider and returns the redirect response :return: A redirect response to the OIDC provider
19,449
def shakespeare(chunk_size): file_name = maybe_download(, ) with open(file_name) as f: shakespeare_full = f.read() length = (len(shakespeare_full) // chunk_size) * chunk_size if length < len(shakespeare_full): shakespeare_full = shakespeare_full[:length] arr = np.array([convert_to_int(c) for c in shakespeare_full])[ 0:len(shakespeare_full) / chunk_size * chunk_size] return arr.reshape((len(arr) / chunk_size, chunk_size))
Downloads Shakespeare, converts it into ASCII codes and chunks it. Args: chunk_size: The dataset is broken down so that it is shaped into batches x chunk_size. Returns: A numpy array of ASCII codes shaped into batches x chunk_size.
19,450
def get_needed_fieldnames(arr, names): fieldnames = set([]) names = list(parsed_names & (set(dir(arr)) | set(arr.fieldnames))) for name in names: if name in arr.fieldnames: fieldnames.update([name]) else: try: func = getattr(cls, name).fget except AttributeError: func = getattr(arr, name) try: sourcecode = inspect.getsource(func) except TypeError: continue possible_fields = get_instance_fields_from_arg(sourcecode) fieldnames.update(get_needed_fieldnames(arr, possible_fields)) return fieldnames
Given a FieldArray-like array and a list of names, determines what fields are needed from the array so that using the names does not result in an error. Parameters ---------- arr : instance of a FieldArray or similar The array from which to determine what fields to get. names : (list of) strings A list of the names that are desired. The names may be either a field, a virtualfield, a property, a method of ``arr``, or any function of these. If a virtualfield/property or a method, the source code of that property/method will be analyzed to pull out what fields are used in it. Returns ------- set The set of the fields needed to evaluate the names.
19,451
def input_file(filename): if excluded(filename) or not filename_match(filename): return {} if options.verbose: message( + filename) options.counters[] = options.counters.get(, 0) + 1 errors = Checker(filename).check_all() if options.testsuite and not errors: message("%s: %s" % (filename, "no errors found")) return errors
Run all checks on a Python source file.
19,452
def extract(dump_files, extractors=ALL_EXTRACTORS): def process_dump(dump, path): for page in dump: if page.namespace != 0: continue else: for cite in extract_cite_history(page, extractors): yield cite return mwxml.map(process_dump, dump_files)
Extracts cites from a set of `dump_files`. :Parameters: dump_files : str | `file` A set of files MediaWiki XML dump files (expects: pages-meta-history) extractors : `list`(`extractor`) A list of extractors to apply to the text :Returns: `iterable` -- a generator of extracted cites
19,453
def handle_aliases_in_calls(name, import_alias_mapping): for key, val in import_alias_mapping.items(): if name == key or \ name.startswith(key + ): return name.replace(key, val) return None
Returns either None or the handled alias. Used in add_module.
19,454
def on_timer(self): try: self.flush() except Exception as e: log.exception(, e) self._set_timer()
Executes flush(). Ignores any errors to make sure one exception doesn't halt the whole flushing process.
19,455
def map_val(dest, src, key, default=None, src_key=None): if not src_key: src_key = key if src_key in src: dest[key] = src[src_key] else: if default is not None: dest[key] = default
Will ensure a dict has values sourced from either another dict or based on the provided default
19,456
def logout_service_description(self): label = + self.name if (self.auth_type): label = label + + self.auth_type + return({"@id": self.logout_uri, "profile": self.profile_base + , "label": label})
Logout service description.
19,457
def rar3_s2k(psw, salt): if not isinstance(psw, unicode): psw = psw.decode() seed = bytearray(psw.encode() + salt) h = Rar3Sha1(rarbug=True) iv = EMPTY for i in range(16): for j in range(0x4000): cnt = S_LONG.pack(i * 0x4000 + j) h.update(seed) h.update(cnt[:3]) if j == 0: iv += h.digest()[19:20] key_be = h.digest()[:16] key_le = pack("<LLLL", *unpack(">LLLL", key_be)) return key_le, iv
String-to-key hash for RAR3.
19,458
def to_string(cls, error_code): if error_code == cls.ILLEGAL_COMMAND: return return super(JLinkEraseErrors, cls).to_string(error_code)
Returns the string message for the given ``error_code``. Args: cls (JLinkEraseErrors): the ``JLinkEraseErrors`` class error_code (int): error code to convert Returns: An error string corresponding to the error code. Raises: ValueError: if the error code is invalid.
19,459
def verify_directory(verbose=True, max_size_mb=50): ok = True mb_to_bytes = 1000 * 1000 expected_files = ["config.txt", "experiment.py"] for f in expected_files: if os.path.exists(f): log("✓ {} is PRESENT".format(f), chevrons=False, verbose=verbose) else: log("✗ {} is MISSING".format(f), chevrons=False, verbose=verbose) ok = False max_size = max_size_mb * mb_to_bytes size = size_on_copy() if size > max_size: size_in_mb = round(size / mb_to_bytes) log( "✗ {}MB is TOO BIG (greater than {}MB)".format(size_in_mb, max_size_mb), chevrons=False, verbose=verbose, ) ok = False return ok
Ensure that the current directory looks like a Dallinger experiment, and does not appear to have unintended contents that will be copied on deployment.
19,460
def add_key_val(keyname, keyval, keytype, filename, extnum): funtype = {: int, : float, : str, : bool} if keytype not in funtype: raise ValueError(, keytype) with fits.open(filename, "update") as hdulist: hdulist[extnum].header[keyname] = funtype[keytype](keyval) print( + keyname + + keyval + + filename)
Add/replace FITS key Add/replace the key keyname with value keyval of type keytype in filename. Parameters: ---------- keyname : str FITS Keyword name. keyval : str FITS keyword value. keytype: str FITS keyword type: int, float, str or bool. filaname : str FITS filename. extnum : int Extension number where the keyword will be inserted. Note that the first extension is number 1 (and not zero).
19,461
def present(name, password, permission): ret = {: name, : True, : {}, : } users = __salt__[]() if __opts__[]: if name in users: ret[] = .format(name) else: ret[] = .format(name) ret[] = {name: } return ret if name in users: ret[] = .format(name) else: if __salt__[](name, password, permission, users): ret[] = .format(name) ret[] = {name: } else: ret[] = ret[] = False return ret
Ensure the user exists on the Dell DRAC name: The users username password The password used to authenticate permission The permissions that should be assigned to a user
19,462
def confirm_authorization_request(self): server = self.server uri, http_method, body, headers = extract_params() try: realms, credentials = server.get_realms_and_credentials( uri, http_method=http_method, body=body, headers=headers ) ret = server.create_authorization_response( uri, http_method, body, headers, realms, credentials ) log.debug() return create_response(*ret) except errors.OAuth1Error as e: return redirect(e.in_uri(self.error_uri)) except errors.InvalidClientError as e: return redirect(e.in_uri(self.error_uri))
When consumer confirm the authrozation.
19,463
def merge(self, other): if not other: return Recipe(self.recipe) if isinstance(other, basestring): return self.merge(Recipe(other)) if not other.recipe: return Recipe(self.recipe) if not self.recipe: return Recipe(other.recipe) self.validate() other.validate() new_recipe = self.recipe max_index = max(1, *filter(lambda x: isinstance(x, int), self.recipe.keys())) next_index = max_index + 1 translation = {} for key, value in other.recipe.items(): if isinstance(key, int): if key not in translation: translation[key] = next_index next_index = next_index + 1 new_recipe[translation[key]] = value def translate(x): if isinstance(x, list): return list(map(translate, x)) elif isinstance(x, tuple): return tuple(map(translate, x)) elif isinstance(x, dict): return {k: translate(v) for k, v in x.items()} else: return translation[x] for idx in translation.values(): if "output" in new_recipe[idx]: new_recipe[idx]["output"] = translate(new_recipe[idx]["output"]) if "error" in new_recipe[idx]: new_recipe[idx]["error"] = translate(new_recipe[idx]["error"]) for (idx, param) in other.recipe["start"]: new_recipe["start"].append((translate(idx), param)) if "error" in other.recipe: if "error" not in new_recipe: new_recipe["error"] = translate(other.recipe["error"]) else: if isinstance(new_recipe["error"], (list, tuple)): new_recipe["error"] = list(new_recipe["error"]) else: new_recipe["error"] = list([new_recipe["error"]]) if isinstance(other.recipe["error"], (list, tuple)): new_recipe["error"].extend(translate(other.recipe["error"])) else: new_recipe["error"].append(translate(other.recipe["error"])) return Recipe(new_recipe)
Merge two recipes together, returning a single recipe containing all nodes. Note: This does NOT yet return a minimal recipe. :param other: A Recipe object that should be merged with the current Recipe object. :return: A new Recipe object containing information from both recipes.
19,464
def _ppf(self, q, left, right, cache): if isinstance(left, Dist) and left in cache: left = cache[left] if isinstance(right, Dist) and right in cache: right = cache[right] if isinstance(left, Dist): if isinstance(right, Dist): raise StochasticallyDependentError( "under-defined distribution {} or {}".format(left, right)) elif not isinstance(right, Dist): raise StochasticallyDependentError( "truncated variable indirectly depends on underlying variable") else: left = (numpy.array(left).T*numpy.ones(q.shape).T).T uloc = evaluation.evaluate_forward(right, left) return evaluation.evaluate_inverse(right, q*(1-uloc)+uloc, cache=cache) right = (numpy.array(right).T*numpy.ones(q.shape).T).T uloc = evaluation.evaluate_forward(left, right, cache=cache.copy()) return evaluation.evaluate_inverse(left, q*uloc, cache=cache)
Point percentile function. Example: >>> print(chaospy.Uniform().inv([0.1, 0.2, 0.9])) [0.1 0.2 0.9] >>> print(chaospy.Trunc(chaospy.Uniform(), 0.4).inv([0.1, 0.2, 0.9])) [0.04 0.08 0.36] >>> print(chaospy.Trunc(0.6, chaospy.Uniform()).inv([0.1, 0.2, 0.9])) [0.64 0.68 0.96]
19,465
def resample_waveforms(Y_dict): for key in Y_dict.keys(): Y_dict[key] = signal.decimate(Y_dict[key], 4, ftype=) metadata = {} metadata["fs"] = 4096. metadata["T"] = 1. metadata["source distance"] = 10. return Y_dict, metadata
INPUT: - Dictionary of waveforms loaded from text file - ALSO, Dictionary of timeseries indexed by name OUTPUT: - Y_dict: resampled, normalized waveforms, ready for FFT - new parameters: + fs = 4096 + time duration: 1 second - metadata: dictionary of sampling information
19,466
def substitute_infinitives_as_subjects(sent_str): sent_doc = textacy.Doc(sent_str, lang=) inf_pattern = r infinitives = textacy.extract.pos_regex_matches(sent_doc, inf_pattern) inf_subjs = [] for inf in infinitives: if inf[0].text.lower() != : continue if ( not in [w.dep_ for w in inf] and sent_doc[inf[-1].i + 1].dep_ != ): continue if inf[-1].tag_ != : continue inf_subj = [] for v in inf: inf_subj.append(v.i) inf_subjs.append(inf_subj) new_sent_str = sent_str unusual_char = for inf_subj in inf_subjs: start_inf = sent_doc[inf_subj[0]].idx end_inf = sent_doc[inf_subj[-1]].idx + len(sent_doc[inf_subj[-1]]) inf_len = end_inf - start_inf sub = (unusual_char * inf_len) new_sent_str = new_sent_str[:start_inf] + sub + new_sent_str[end_inf:] new_sent_str = re.sub(, , new_sent_str) repl = [conjugate(sent_doc[i_s[-1]].text, tense=) for i_s in inf_subjs] return new_sent_str.format(*repl)
If an infinitive is used as a subject, substitute the gerund.
19,467
def print_help(self, script_name: str): textWidth = max(60, shutil.get_terminal_size((80, 20)).columns) if len(script_name) > 20: print(f) print( ) else: print( f ) print( ) print() print( ) print( ) description = [x.lstrip().strip() for x in self.description] description = textwrap.dedent(.join(description)).strip() if description: print( + description) print() print( + .join(self.workflows)) global_parameters = {} for section in self.sections: global_parameters.update(section.global_parameters) if global_parameters: print() for name, (value, comment) in global_parameters.items(): par_str = f print(par_str) if comment: print(.join( textwrap.wrap( comment, width=textWidth, initial_indent= * 24, subsequent_indent= * 24))) print() for section in self.sections: section.show()
print a help message from the script
19,468
def run_nose(self, params): thread.set_index(params.thread_index) log.debug("[%s] Starting nose iterations: %s", params.worker_index, params) assert isinstance(params.tests, list) end_time = self.params.ramp_up + self.params.hold_for end_time += time.time() if end_time else 0 time.sleep(params.delay) plugin = ApiritifPlugin(self._writer) self._writer.concurrency += 1 config = Config(env=os.environ, files=all_config_files(), plugins=DefaultPluginManager()) config.plugins.addPlugins(extraplugins=[plugin]) config.testNames = params.tests config.verbosity = 3 if params.verbose else 0 if params.verbose: config.stream = open(os.devnull, "w") iteration = 0 try: while True: log.debug("Starting iteration:: index=%d,start_time=%.3f", iteration, time.time()) thread.set_iteration(iteration) ApiritifTestProgram(config=config) log.debug("Finishing iteration:: index=%d,end_time=%.3f", iteration, time.time()) iteration += 1 if plugin.stop_reason: log.debug("[%s] finished prematurely: %s", params.worker_index, plugin.stop_reason) elif iteration >= params.iterations: log.debug("[%s] iteration limit reached: %s", params.worker_index, params.iterations) elif 0 < end_time <= time.time(): log.debug("[%s] duration limit reached: %s", params.worker_index, params.hold_for) else: continue break finally: self._writer.concurrency -= 1 if params.verbose: config.stream.close()
:type params: Params
19,469
def modify_no_rollback(self, dn: str, mod_list: dict): _debug("modify_no_rollback", self, dn, mod_list) result = self._do_with_retry(lambda obj: obj.modify_s(dn, mod_list)) _debug("--") return result
Modify a DN in the LDAP database; See ldap module. Doesn't return a result if transactions enabled.
19,470
def windowed_df(pos, ac1, ac2, size=None, start=None, stop=None, step=None, windows=None, is_accessible=None, fill=np.nan): pos = SortedIndex(pos, copy=False) is_accessible = asarray_ndim(is_accessible, 1, allow_none=True) loc_df = locate_fixed_differences(ac1, ac2) n_df, windows, counts = windowed_statistic( pos, values=loc_df, statistic=np.count_nonzero, size=size, start=start, stop=stop, step=step, windows=windows, fill=0 ) df, n_bases = per_base(n_df, windows, is_accessible=is_accessible, fill=fill) return df, windows, n_bases, counts
Calculate the density of fixed differences between two populations in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the second population. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. fill : object, optional The value to use where a window is completely inaccessible. Returns ------- df : ndarray, float, shape (n_windows,) Per-base density of fixed differences in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. n_bases : ndarray, int, shape (n_windows,) Number of (accessible) bases in each window. counts : ndarray, int, shape (n_windows,) Number of variants in each window. See Also -------- allel.model.locate_fixed_differences
19,471
def get_data_matches(text, delim_pos, dxproj, folderpath, classname=None, typespec=None, visibility=None): unescaped_text = text[delim_pos + 1:] if visibility is None: if text != and delim_pos != len(text) - 1: visibility = "either" else: visibility = "visible" try: results = list(dxpy.find_data_objects(project=dxproj.get_id(), folder=folderpath, name=unescaped_text + "*", name_mode="glob", recurse=False, visibility=visibility, classname=classname, limit=100, describe=dict(fields=dict(name=True)), typename=typespec)) prefix = if text == else text[:delim_pos + 1] return [prefix + escape_name(result[][]) for result in results] except: return []
:param text: String to be tab-completed; still in escaped form :type text: string :param delim_pos: index of last unescaped "/" or ":" in text :type delim_pos: int :param dxproj: DXProject handler to use :type dxproj: DXProject :param folderpath: Unescaped path in which to search for data object matches :type folderpath: string :param classname: Data object class by which to restrict the search (None for no restriction on class) :type classname: string :param visibility: Visibility to constrain the results to; default is "visible" for empty strings, "either" for nonempty :type visibility: string :returns: List of matches :rtype: list of strings Members of the returned list are guaranteed to start with *text* and be in escaped form for consumption by the command-line.
19,472
def sample_id(self, lon): if self.grid == : sample = np.rint(float(self.SAMPLE_PROJECTION_OFFSET) + 1.0 + (lon * np.pi / 180.0 - float(self.CENTER_LONGITUDE)) * self.A_AXIS_RADIUS * np.cos(self.CENTER_LATITUDE * np.pi / 180.0) / (self.MAP_SCALE * 1e-3)) else: sample = np.rint(float(self.SAMPLE_PROJECTION_OFFSET) + float(self.MAP_RESOLUTION) * (lon - float(self.CENTER_LONGITUDE))) + 1 return self._control_sample(sample)
Return the corresponding sample Args: lon (int): longidute in degree Returns: Correponding sample
19,473
def csi(self, capname, *args): value = curses.tigetstr(capname) if value is None: return b else: return curses.tparm(value, *args)
Return the escape sequence for the selected Control Sequence.
19,474
def find_best_question(X, y, criterion): measure_impurity = gini_impurity if criterion == "gini" else entropy current_impurity = measure_impurity(y) best_info_gain = 0 best_question = None for feature_n in range(X.shape[1]): for value in set(X[:, feature_n]): q = Question(feature_n, value) _, _, true_y, false_y = split(X, y, q) current_info_gain = info_gain(current_impurity, true_y, false_y, criterion) if current_info_gain >= best_info_gain: best_info_gain = current_info_gain best_question = q return best_info_gain, best_question
Find the best question to ask by iterating over every feature / value and calculating the information gain.
19,475
def get_cpu_props(cls, family, arch=): cpus = cls.get_cpus_by_arch(arch) try: return cpus.xpath(.format(family))[0] except IndexError: raise LagoException(.format(family))
Get CPU info XML Args: family(str): CPU family arch(str): CPU arch Returns: lxml.etree.Element: CPU xml Raises: :exc:`~LagoException`: If no such CPU family exists
19,476
def wetdays(pr, thresh=, freq=): r thresh = utils.convert_units_to(thresh, pr, ) wd = (pr >= thresh) * 1 return wd.resample(time=freq).sum(dim=)
r"""Wet days Return the total number of days during period with precipitation over threshold. Parameters ---------- pr : xarray.DataArray Daily precipitation [mm] thresh : str Precipitation value over which a day is considered wet. Default: '1 mm/day'. freq : str, optional Resampling frequency defining the periods defined in http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling. Returns ------- xarray.DataArray The number of wet days for each period [day] Examples -------- The following would compute for each grid cell of file `pr.day.nc` the number days with precipitation over 5 mm at the seasonal frequency, ie DJF, MAM, JJA, SON, DJF, etc.: >>> pr = xr.open_dataset('pr.day.nc') >>> wd = wetdays(pr, pr_min = 5., freq="QS-DEC")
19,477
def from_connection_string(cls, conn_str, *, loop=None, **kwargs): address, policy, key, _ = parse_conn_str(conn_str) parsed_namespace = urlparse(address) namespace, _, base = parsed_namespace.hostname.partition() return cls( service_namespace=namespace, shared_access_key_name=policy, shared_access_key_value=key, host_base= + base, loop=loop, **kwargs)
Create a Service Bus client from a connection string. :param conn_str: The connection string. :type conn_str: str Example: .. literalinclude:: ../examples/async_examples/test_examples_async.py :start-after: [START create_async_servicebus_client_connstr] :end-before: [END create_async_servicebus_client_connstr] :language: python :dedent: 4 :caption: Create a ServiceBusClient via a connection string.
19,478
def ensure_benchmark_data(symbol, first_date, last_date, now, trading_day, environ=None): filename = get_benchmark_filename(symbol) data = _load_cached_data(filename, first_date, last_date, now, , environ) if data is not None: return data logger.info( ( ), symbol=symbol, first_date=first_date - trading_day, last_date=last_date ) try: data = get_benchmark_returns(symbol) data.to_csv(get_data_filepath(filename, environ)) except (OSError, IOError, HTTPError): logger.exception() raise if not has_data_for_dates(data, first_date, last_date): logger.warn( ("Still don't have expected benchmark data for {symbol!r} " "from {first_date} to {last_date} after redownload!"), symbol=symbol, first_date=first_date - trading_day, last_date=last_date ) return data
Ensure we have benchmark data for `symbol` from `first_date` to `last_date` Parameters ---------- symbol : str The symbol for the benchmark to load. first_date : pd.Timestamp First required date for the cache. last_date : pd.Timestamp Last required date for the cache. now : pd.Timestamp The current time. This is used to prevent repeated attempts to re-download data that isn't available due to scheduling quirks or other failures. trading_day : pd.CustomBusinessDay A trading day delta. Used to find the day before first_date so we can get the close of the day prior to first_date. We attempt to download data unless we already have data stored at the data cache for `symbol` whose first entry is before or on `first_date` and whose last entry is on or after `last_date`. If we perform a download and the cache criteria are not satisfied, we wait at least one hour before attempting a redownload. This is determined by comparing the current time to the result of os.path.getmtime on the cache path.
19,479
def remove_observing_method(self, prop_names, method): for prop_name in prop_names: if prop_name in self.__PROP_TO_METHS: self.__PROP_TO_METHS[prop_name].remove(method) del self.__PAT_METH_TO_KWARGS[(prop_name, method)] elif method in self.__METH_TO_PAT: pat = self.__METH_TO_PAT[method] if fnmatch.fnmatch(prop_name, pat): del self.__METH_TO_PAT[method] self.__PAT_TO_METHS[pat].remove(method) del self.__PAT_METH_TO_KWARGS[(pat, method)]
Remove dynamic notifications. *method* a callable that was registered with :meth:`observe`. *prop_names* a sequence of strings. This need not correspond to any one `observe` call. .. note:: This can revert even the effects of decorator `observe` at runtime. Don't.
19,480
def send_message(msg: ) -> Optional[]: global middlewares, master, slaves if msg is None: return for i in middlewares: m = i.process_message(msg) if m is None: return None assert m is not None msg = m msg.verify() if msg.deliver_to.channel_id == master.channel_id: return master.send_message(msg) elif msg.deliver_to.channel_id in slaves: return slaves[msg.deliver_to.channel_id].send_message(msg) else: raise EFBChannelNotFound(msg)
Deliver a message to the destination channel. Args: msg (EFBMsg): The message Returns: The message sent by the destination channel, includes the updated message ID from there. Returns ``None`` if the message is not sent.
19,481
def load_key(self, key_path, password): try: return paramiko.RSAKey.from_private_key_file(key_path) except PasswordRequiredException as ex: return paramiko.RSAKey.from_private_key_file(key_path, password=password)
Creates paramiko rsa key :type key_path: str :param key_path: path to rsa key :type password: str :param password: password to try if rsa key is encrypted
19,482
def makeRequests(callable_, args_list, callback=None, exc_callback=_handle_thread_exception): requests = [] for item in args_list: if isinstance(item, tuple): requests.append( WorkRequest(callable_, item[0], item[1], callback=callback, exc_callback=exc_callback) ) else: requests.append( WorkRequest(callable_, [item], None, callback=callback, exc_callback=exc_callback) ) return requests
Create several work requests for same callable with different arguments. Convenience function for creating several work requests for the same callable where each invocation of the callable receives different values for its arguments. ``args_list`` contains the parameters for each invocation of callable. Each item in ``args_list`` should be either a 2-item tuple of the list of positional arguments and a dictionary of keyword arguments or a single, non-tuple argument. See docstring for ``WorkRequest`` for info on ``callback`` and ``exc_callback``.
19,483
def set_cookie(response, key, value, max_age): expires = datetime.strftime( datetime.utcnow() + timedelta(seconds=max_age), "%a, %d-%b-%Y %H:%M:%S GMT" ) response.set_cookie( key, value, max_age=max_age, expires=expires, domain=settings.SESSION_COOKIE_DOMAIN, secure=settings.SESSION_COOKIE_SECURE or None )
Set the cookie ``key`` on ``response`` with value ``value`` valid for ``max_age`` secondes :param django.http.HttpResponse response: a django response where to set the cookie :param unicode key: the cookie key :param unicode value: the cookie value :param int max_age: the maximum validity age of the cookie
19,484
def animate(func: types.AnyFunction = None, *, animation: types.AnimationGenerator = _default_animation(), step: float = 0.1) -> types.AnyFunction: if callable(func): return _animate_no_kwargs(func, animation, step) elif func is None: return _animate_with_kwargs(animation_gen=animation, step=step) else: raise TypeError("argument must either be None or callable")
Wrapper function for the _Animate wrapper class. Args: func: A function to run while animation is showing. animation: An AnimationGenerator that yields animation frames. step: Approximate timestep (in seconds) between frames. Returns: An animated version of func if func is not None. Otherwise, a function that takes a function and returns an animated version of that.
19,485
def parse(cls, filepath, filecontent, parser): try: objects = parser.parse(filepath, filecontent) except Exception as e: raise MappingError(.format(filepath, e)) objects_by_name = {} for obj in objects: if not Serializable.is_serializable(obj): raise UnaddressableObjectError(.format(obj)) attributes = obj._asdict() name = attributes.get() if not name: raise UnaddressableObjectError(.format(obj)) if name in objects_by_name: raise DuplicateNameError( .format(filepath, name, objects_by_name[name], obj)) objects_by_name[name] = obj return cls(filepath, OrderedDict(sorted(objects_by_name.items())))
Parses a source for addressable Serializable objects. No matter the parser used, the parsed and mapped addressable objects are all 'thin'; ie: any objects they point to in other namespaces or even in the same namespace but from a seperate source are left as unresolved pointers. :param string filepath: The path to the byte source containing serialized objects. :param string filecontent: The content of byte source containing serialized objects to be parsed. :param symbol_table: The symbol table cls to expose a symbol table dict. :type symbol_table: Instance of :class:`pants.engine.parser.SymbolTable`. :param parser: The parser cls to use. :type parser: A :class:`pants.engine.parser.Parser`.
19,486
def set_creation_date(self, p_date=date.today()): self.fields[] = p_date self.src = re.sub( r, lambda m: u"{}{} {}".format(m.group(1) or , p_date.isoformat(), m.group(3)), self.src)
Sets the creation date of a todo. Should be passed a date object.
19,487
def stepper_request_library_version(self): data = [self.STEPPER_LIBRARY_VERSION] self._command_handler.send_sysex(self._command_handler.STEPPER_DATA, data)
Request the stepper library version from the Arduino. To retrieve the version after this command is called, call get_stepper_version
19,488
def help_text(cls): docs = [cmd_func.__doc__ for cmd_func in cls.commands.values()]
Return a slack-formatted list of commands with their usage.
19,489
def mols_from_file(path, no_halt=True, assign_descriptors=True): with open(path, ) as f: fd = (tx.decode(line) for line in f) for c in mol_supplier(fd, no_halt, assign_descriptors): yield c
Compound supplier from CTAB text file (.mol, .sdf)
19,490
def decode_consumermetadata_response(cls, data): (correlation_id, error_code, node_id), cur = \ relative_unpack(, data, 0) host, cur = read_short_ascii(data, cur) (port,), cur = relative_unpack(, data, cur) return ConsumerMetadataResponse( error_code, node_id, nativeString(host), port)
Decode bytes to a ConsumerMetadataResponse :param bytes data: bytes to decode
19,491
def bbox(self): from itertools import chain knots = [ (knot.anchor[1], knot.anchor[0]) for knot in chain.from_iterable(self.paths) ] if len(knots) == 0: return (0., 0., 1., 1.) x, y = zip(*knots) return (min(x), min(y), max(x), max(y))
Bounding box tuple (left, top, right, bottom) in relative coordinates, where top-left corner is (0., 0.) and bottom-right corner is (1., 1.). :return: `tuple`
19,492
def cmap_center_adjust(cmap, center_ratio): if not (0. < center_ratio) & (center_ratio < 1.): return cmap a = math.log(center_ratio) / math.log(0.5) return cmap_powerlaw_adjust(cmap, a)
Returns a new colormap based on the one given but adjusted so that the old center point higher (>0.5) or lower (<0.5) :param cmap: colormap instance (e.g., cm.jet) :param center_ratio:
19,493
def _break_reads(self, contig, position, fout, min_read_length=250): sam_reader = pysam.Samfile(self.bam, "rb") for read in sam_reader.fetch(contig): seqs = [] if read.pos < position < read.reference_end - 1: split_point = position - read.pos if split_point - 1 >= min_read_length: sequence = mapping.aligned_read_to_read(read, revcomp=False, ignore_quality=not self.fastq_out).subseq(0, split_point) sequence.id += seqs.append(sequence) if read.query_length - split_point >= min_read_length: sequence = mapping.aligned_read_to_read(read, revcomp=False, ignore_quality=not self.fastq_out).subseq(split_point, read.query_length) sequence.id += seqs.append(sequence) else: seqs.append(mapping.aligned_read_to_read(read, revcomp=False, ignore_quality=not self.fastq_out)) for seq in seqs: if read.is_reverse: seq.revcomp() print(seq, file=fout)
Get all reads from contig, but breaks them all at given position (0-based) in the reference. Writes to fout. Currently pproximate where it breaks (ignores indels in the alignment)
19,494
def parse_conference_address(address_string): geo_elements = address_string.split() city = geo_elements[0] country_name = geo_elements[-1].upper().replace(, ).strip() us_state = None state = None country_code = None country_code = match_country_name_to_its_code(country_name, city) if country_code == and len(geo_elements) > 1: us_state = match_us_state(geo_elements[-2].upper().strip() .replace(, )) if not country_code: us_state = match_us_state(country_name) if us_state: state = us_state country_code = return { : [ city, ], : country_code, : None, : state, }
Parse a conference address. This is a pretty dummy address parser. It only extracts country and state (for US) and should be replaced with something better, like Google Geocoding.
19,495
def get_appliance_stats_by_location(self, location_id, start, end, granularity=None, per_page=None, page=None, min_power=None): url = "https://api.neur.io/v1/appliances/stats" headers = self.__gen_headers() headers["Content-Type"] = "application/json" params = { "locationId": location_id, "start": start, "end": end } if granularity: params["granularity"] = granularity if min_power: params["minPower"] = min_power if per_page: params["perPage"] = per_page if page: params["page"] = page url = self.__append_url_params(url, params) r = requests.get(url, headers=headers) return r.json()
Get appliance usage data for a given location within a given time range. Stats are generated by fetching appliance events that match the supplied criteria and then aggregating them together based on the granularity specified with the request. Note: This endpoint uses the location's time zone when generating time intervals for the stats, which is relevant if that time zone uses daylight saving time (some days will be 23 or 25 hours long). Args: location_id (string): hexadecimal id of the sensor to query, e.g. ``0x0013A20040B65FAD`` start (string): ISO 8601 start time for getting the events of appliances. end (string): ISO 8601 stop time for getting the events of appliances. Cannot be larger than 1 month from start time granularity (string): granularity of stats. If the granularity is 'unknown', the stats for the appliances between the start and end time is returned.; must be one of "minutes", "hours", "days", "weeks", "months", or "unknown" (default: days) min_power (string): The minimum average power (in watts) for filtering. Only events with an average power above this value will be returned. (default: 400) per_page (string, optional): the number of returned results per page (min 1, max 500) (default: 10) page (string, optional): the page number to return (min 1, max 100000) (default: 1) Returns: list: dictionary objects containing appliance events meeting specified criteria
19,496
def load_data(self, df): stmt = ddl.LoadData(self._qualified_name, df) return self._execute(stmt)
Wraps the LOAD DATA DDL statement. Loads data into an MapD table from pandas.DataFrame or pyarrow.Table Parameters ---------- df: pandas.DataFrame or pyarrow.Table Returns ------- query : MapDQuery
19,497
def dest_fpath(self, source_fpath: str) -> str: relative_fpath = os.path.join(*source_fpath.split(os.sep)[1:]) relative_dirpath = os.path.dirname(relative_fpath) source_fname = relative_fpath.split(os.sep)[-1] base_fname = source_fname.split()[0] dest_fname = f return os.path.join(self.dest_dir, relative_dirpath, dest_fname)
Calculates full path for end json-api file from source file full path.
19,498
def append(self, obj): if isinstance(obj, dict) and self._col_names: obj = [obj.get(col, None) for col in self._col_names] assert isinstance(obj, list), \ "obj appended to ReprListList needs to be a list or dict" self._original.append(obj)
If it is a list it will append the obj, if it is a dictionary it will convert it to a list and append :param obj: dict or list of the object to append :return: None
19,499
def scalar(name, data, step=None, description=None): summary_metadata = metadata.create_summary_metadata( display_name=None, description=description) summary_scope = ( getattr(tf.summary.experimental, , None) or tf.summary.summary_scope) with summary_scope( name, , values=[data, step]) as (tag, _): tf.debugging.assert_scalar(data) return tf.summary.write(tag=tag, tensor=tf.cast(data, tf.float32), step=step, metadata=summary_metadata)
Write a scalar summary. Arguments: name: A name for this summary. The summary tag used for TensorBoard will be this name prefixed by any active name scopes. data: A real numeric scalar value, convertible to a `float32` Tensor. step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. Returns: True on success, or false if no summary was written because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None.