Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
11,600
def start_segment_address(cs, ip): b = [4, 0, 0, 0x03, (cs>>8)&0x0FF, cs&0x0FF, (ip>>8)&0x0FF, ip&0x0FF] return Record._from_bytes(b)
Return Start Segment Address Record. @param cs 16-bit value for CS register. @param ip 16-bit value for IP register. @return String representation of Intel Hex SSA record.
11,601
async def fetch_token(self): url = .format(API_URL) payload = .format(self._email, self._password) reg = await self.api_post(url, None, payload) if reg is None: _LOGGER.error() else: self._userid = reg[][] self._token = reg[][] self._expdate = reg[][] _LOGGER.debug(, self._userid, self.token)
Fetch new session token from api.
11,602
def update_config(config_new, config_default): if any([isinstance(v, dict) for v in list(config_new.values())]): for k,v in list(config_new.items()): if isinstance(v,dict) and k in config_default: update_config(config_new[k],config_default[k]) else: config_default[k] = v else: config_default.update(config_new) return config_default
Updates the loaded method configuration with default values.
11,603
def _set_shared_instances(self): self.inqueue = self.em.get_inqueue() self.outqueue = self.em.get_outqueue() self.namespace = self.em.get_namespace()
Sets attributes from the shared instances.
11,604
def _vmf_normalize(kappa, dim): num = np.power(kappa, dim / 2. - 1.) if dim / 2. - 1. < 1e-15: denom = np.power(2. * np.pi, dim / 2.) * i0(kappa) else: denom = np.power(2. * np.pi, dim / 2.) * iv(dim / 2. - 1., kappa) if np.isinf(num): raise ValueError("VMF scaling numerator was inf.") if np.isinf(denom): raise ValueError("VMF scaling denominator was inf.") if np.abs(denom) < 1e-15: raise ValueError("VMF scaling denominator was 0.") return num / denom
Compute normalization constant using built-in numpy/scipy Bessel approximations. Works well on small kappa and mu.
11,605
def parse(station: str, txt: str) -> TafData: core.valid_station(station) while len(txt) > 3 and txt[:4] in (, , ): txt = txt[4:] _, station, time = core.get_station_and_time(txt[:20].split()) retwx = { : None, : txt, : None, : None, : station, : core.make_timestamp(time) } txt = txt.replace(station, ) txt = txt.replace(time, ).strip() if core.uses_na_format(station): use_na = True units = Units(**NA_UNITS) else: use_na = False units = Units(**IN_UNITS) txt, retwx[] = core.get_taf_remarks(txt) lines = core.split_taf(txt) parsed_lines = parse_lines(lines, units, use_na) if parsed_lines: parsed_lines[-1][], retwx[], retwx[] \ = core.get_temp_min_and_max(parsed_lines[-1][]) if not (retwx[] or retwx[]): parsed_lines[0][], retwx[], retwx[] \ = core.get_temp_min_and_max(parsed_lines[0][]) start, end = parsed_lines[0][], parsed_lines[0][] parsed_lines[0][] = None retwx[], retwx[] = start, end parsed_lines = core.find_missing_taf_times(parsed_lines, start, end) parsed_lines = core.get_taf_flight_rules(parsed_lines) if retwx[][0] == : parsed_lines[-1][], retwx[], retwx[] \ = core.get_oceania_temp_and_alt(parsed_lines[-1][]) retwx[] = [TafLineData(**line) for line in parsed_lines] return TafData(**retwx), units
Returns TafData and Units dataclasses with parsed data and their associated units
11,606
def _get_layout(self): connected = list() active_layout = list() disconnected = list() layout = OrderedDict( {"connected": OrderedDict(), "disconnected": OrderedDict()} ) current = self.py3.command_output("xrandr") for line in current.splitlines(): try: s = line.split(" ") infos = line[line.find("(") :] if s[1] == "connected": output, state, mode = s[0], s[1], None for index, x in enumerate(s[2:], 2): if "x" in x and "+" in x: mode = x active_layout.append(output) infos = line[line.find(s[index + 1]) :] break elif "(" in x: break connected.append(output) elif s[1] == "disconnected": output, state, mode = s[0], s[1], None disconnected.append(output) else: continue except Exception as err: self.py3.log(.format(err)) else: layout[state][output] = {"infos": infos, "mode": mode, "state": state} if self.active_layout is None: self.active_comb = tuple(active_layout) self.active_layout = self._get_string_and_set_width( tuple(active_layout), self.active_mode ) return layout
Get the outputs layout from xrandr and try to detect the currently active layout as best as we can on start.
11,607
def _show(self, message, indent=0, enable_verbose=True): if enable_verbose: print(" " * indent + message)
Message printer.
11,608
def mime(self): mime = self.mime_object() self.headers.prepare(mime) return mime
Returns the finalised mime object, after applying the internal headers. Usually this is not to be overriden.
11,609
def render(self, target, data): rows = self.get_rows(target, data) rows = self._filter_rows(rows) renderer = getattr(self, "_render_%s" % target.name, None) if renderer is None: raise ValueError( "Cannot render %r for %s." % (self.value, target)) else: return renderer(rows)
Render the table.
11,610
def points_on_circle(radius, points): angle = np.linspace(0, 2*np.pi, points) x_coord = np.cos(angle)*radius y_coord = np.sin(angle)*radius return x_coord, y_coord
returns a set of uniform points around a circle :param radius: radius of the circle :param points: number of points on the circle :return:
11,611
async def count(self, query, clear_limit=False): query = self._swap_database(query) return (await count(query, clear_limit=clear_limit))
Perform *COUNT* aggregated query asynchronously. :return: number of objects in ``select()`` query
11,612
def ostree_compose(self, release): start = datetime.utcnow() treefile = os.path.join(release[], ) cmd = release[] % treefile with file(treefile, ) as tree: json.dump(release[], tree) out, err, rcode = self.mock_chroot(release, cmd, new_chroot=True) ref = None commitid = None for line in out.split(): if in line: line = line.replace(, ) ref, _, commitid = line.partition() self.log.info(, datetime.utcnow() - start, ref, commitid) return ref, commitid
Compose the OSTree in the mock container
11,613
def _prepend_name(self, prefix, dict_): return dict([.join([prefix, name]), msg] for name, msg in dict_.iteritems())
changes the keys of the dictionary prepending them with "name."
11,614
def _words_by_score(words, score, least_to_most, n=None): if words.shape != score.shape: raise ValueError() if n is not None and (n <= 0 or n > len(words)): raise ValueError() indices = np.argsort(score) if not least_to_most: indices = indices[::-1] ordered_words = words[indices] if n is not None: return ordered_words[:n] else: return ordered_words
Order a vector of `words` by a `score`, either `least_to_most` or reverse. Optionally return only the top `n` results.
11,615
async def create_student_container(self, job_id, parent_container_id, sockets_path, student_path, systemfiles_path, course_common_student_path, socket_id, environment_name, memory_limit, time_limit, hard_time_limit, share_network, write_stream): try: self._logger.debug("Starting new student container... %s %s %s %s", environment_name, memory_limit, time_limit, hard_time_limit) if environment_name not in self._containers: self._logger.warning("Student container asked for an unknown environment %s (not in aliases)", environment_name) await self._write_to_container_stdin(write_stream, {"type": "run_student_retval", "retval": 254, "socket_id": socket_id}) return environment = self._containers[environment_name]["id"] try: socket_path = path_join(sockets_path, str(socket_id) + ".sock") container_id = await self._docker.create_container_student(parent_container_id, environment, share_network, memory_limit, student_path, socket_path, systemfiles_path, course_common_student_path) except Exception as e: self._logger.exception("Cannot create student container!") await self._write_to_container_stdin(write_stream, {"type": "run_student_retval", "retval": 254, "socket_id": socket_id}) if isinstance(e, asyncio.CancelledError): raise return self._student_containers_for_job[job_id].add(container_id) self._student_containers_running[container_id] = job_id, parent_container_id, socket_id, write_stream await self._write_to_container_stdin(write_stream, {"type": "run_student_started", "socket_id": socket_id}) try: await self._docker.start_container(container_id) except Exception as e: self._logger.exception("Cannot start student container!") await self._write_to_container_stdin(write_stream, {"type": "run_student_retval", "retval": 254, "socket_id": socket_id}) if isinstance(e, asyncio.CancelledError): raise return await self._timeout_watcher.register_container(container_id, time_limit, hard_time_limit) except asyncio.CancelledError: raise except: self._logger.exception("Exception in create_student_container")
Creates a new student container. :param write_stream: stream on which to write the return value of the container (with a correctly formatted msgpack message)
11,616
def _finalize(self): if self.status in [, ]: if self.verbose > 0: print_() self.status = self.save_state() self.db._finalize()
Reset the status and tell the database to finalize the traces.
11,617
def overlap_cplx(vec1, vec2, psd=None, low_frequency_cutoff=None, high_frequency_cutoff=None, normalized=True): htilde = make_frequency_series(vec1) stilde = make_frequency_series(vec2) kmin, kmax = get_cutoff_indices(low_frequency_cutoff, high_frequency_cutoff, stilde.delta_f, (len(stilde)-1) * 2) if psd: inner = (htilde[kmin:kmax]).weighted_inner(stilde[kmin:kmax], psd[kmin:kmax]) else: inner = (htilde[kmin:kmax]).inner(stilde[kmin:kmax]) if normalized: sig1 = sigma(vec1, psd=psd, low_frequency_cutoff=low_frequency_cutoff, high_frequency_cutoff=high_frequency_cutoff) sig2 = sigma(vec2, psd=psd, low_frequency_cutoff=low_frequency_cutoff, high_frequency_cutoff=high_frequency_cutoff) norm = 1 / sig1 / sig2 else: norm = 1 return 4 * htilde.delta_f * inner * norm
Return the complex overlap between the two TimeSeries or FrequencySeries. Parameters ---------- vec1 : TimeSeries or FrequencySeries The input vector containing a waveform. vec2 : TimeSeries or FrequencySeries The input vector containing a waveform. psd : Frequency Series A power spectral density to weight the overlap. low_frequency_cutoff : {None, float}, optional The frequency to begin the overlap. high_frequency_cutoff : {None, float}, optional The frequency to stop the overlap. normalized : {True, boolean}, optional Set if the overlap is normalized. If true, it will range from 0 to 1. Returns ------- overlap: complex
11,618
def get_notmuch_setting(self, section, key, fallback=None): value = None if section in self._notmuchconfig: if key in self._notmuchconfig[section]: value = self._notmuchconfig[section][key] if value is None: value = fallback return value
look up config values from notmuch's config :param section: key is in :type section: str :param key: key to look up :type key: str :param fallback: fallback returned if key is not present :type fallback: str :returns: config value with type as specified in the spec-file
11,619
def statistical_axes(fit, **kw): method = kw.pop(, ) confidence_level = kw.pop(, 0.95) dof = kw.pop(,2) nominal = fit.eigenvalues if method == : cov = sampling_covariance(fit,**kw) elif method == : cov = noise_covariance(fit,**kw) if kw.pop(, False): z = chi2.ppf(confidence_level,dof) else: z = fisher_statistic(fit.n,confidence_level,dof=dof) err = z*N.sqrt(cov) return apply_error_scaling(nominal, err, n=fit.n, **kw)
Hyperbolic error using a statistical process (either sampling or noise errors) Integrates covariance with error level and degrees of freedom for plotting confidence intervals. Degrees of freedom is set to 2, which is the relevant number of independent dimensions to planar fitting of *a priori* centered data.
11,620
def assumes(*args): s assumptions as an attribute.' args = tuple(args) def decorator(func): func.assumptions = args return func return decorator
Stores a function's assumptions as an attribute.
11,621
def getProductUIDs(self): uids = [] for orderitem in self.objectValues(): product = orderitem.getProduct() if product is not None: uids.append(orderitem.getProduct().UID()) return uids
return the uids of the products referenced by order items
11,622
def p_theory(self, p): if len(p) == 4: p[0] = Theory(p[2]) else: p[0] = Theory(p[2], literals=p[4])
theory : LBRACKET superclauses_sum RBRACKET | LBRACKET superclauses_sum RBRACKET literal_list
11,623
def object_data(self): self.foxml_buffer = io.BytesIO() if self.progress_bar: self.progress_bar.start() previous_section = None while True: try: section = self.get_next_section() except StopIteration: break if section == BINARY_CONTENT_START: self.within_file = True dsinfo = self.get_datastream_info(previous_section) if dsinfo: logger.info(, dsinfo) else: raise Exception( \ % (self.obj.pid, previous_section)) if self.xml_only and not \ dsinfo[] in [, , ]: try: dsid = dsinfo[].split()[0] except ValueError: % content_location)) elif section == BINARY_CONTENT_END: self.within_file = False elif self.within_file: continue else: self.foxml_buffer.write(section) previous_section = section return self.foxml_buffer
Process the archival export and return a buffer with foxml content for ingest into the destination repository. :returns: :class:`io.BytesIO` for ingest, with references to uploaded datastream content or content location urls
11,624
def _init_metadata(self): self._provenance_metadata = { : Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, ), : , : , : False, : False, : False, : False, : [], : , : None, : None, : [] }
stub
11,625
def get_aggregations(self, query, group_by, stats_field, percents=(50, 95, 99, 99.9), size=100): body = { "query": { "bool": { "must": [{ "query_string": { "query": query, }, }] }, }, "aggregations": { "group_by_agg": { "terms": { "field": group_by, "size": size, }, "aggregations": { "field_stats": { "percentiles": { "field": stats_field, "percents": percents } } } } } } body[][][].append(self._get_timestamp_filer()) self._logger.info("Getting aggregations for %s field when grouped by %s", group_by, stats_field) res = self._es.search( body=body, index=self._index, size=0, ) aggs = {} for bucket in res[][][]: entry = { "count": bucket[] } entry.update(bucket[][]) aggs[bucket[]] = entry return aggs
Returns aggregations (rows count + percentile stats) for a given query This is basically the same as the following pseudo-SQL query: SELECT PERCENTILE(stats_field, 75) FROM query GROUP BY group_by LIMIT size https://www.elastic.co/guide/en/elasticsearch/reference/5.5/search-aggregations-bucket-terms-aggregation.html https://www.elastic.co/guide/en/elasticsearch/reference/5.5/search-aggregations-metrics-percentile-aggregation.html Please note that group_by should be provided by a "keyword" field: Fielddata is disabled on text fields by default. Set fielddata=true on [@context.caller] in order to load fielddata in memory by uninverting the inverted index. Note that this can however use significant memory.\ Alternatively use a keyword field instead. :type query str :type group_by str :type stats_field str :type percents tuple[int] :type size int :rtype: dict
11,626
def tex_parse(string): string = string.replace(, ).replace(, ) def tex_replace(match): return \ sub(r, r, sub(r, r, sub(r, r, sub(r, r, sub(r + GREEK_LETTERS + , r, match.group(1)))))) return mark_safe(sub(r, tex_replace, escape(string)))
Renders some basic TeX math to HTML.
11,627
def to_dict(self) -> Dict[str, Any]: return { : self.__class__.__name__, : self.channel_identifier, : to_normalized_address(self.token_network_address), : encode_hex(self.balance_hash), : self.nonce, : encode_hex(self.additional_hash), : encode_hex(self.signature), : self.chain_id, }
Message format according to monitoring service spec
11,628
def json_dumps(obj): try: return json.dumps(obj, indent=2, sort_keys=True, allow_nan=False) except ValueError: pass json_str = json.dumps(obj, indent=2, sort_keys=True, allow_nan=True) json_obj = json.loads(json_str) def do_map(obj): if obj is None: return None if isinstance(obj, basestring): return obj if isinstance(obj, dict): res = {} for (key, value) in obj.items(): res[key] = do_map(value) return res if isinstance(obj, collections.Iterable): res = [] for el in obj: res.append(do_map(el)) return res if math.isnan(obj): return "NaN" if math.isinf(obj): return "Infinity" if obj > 0 else "-Infinity" return obj return json.dumps( do_map(json_obj), indent=2, sort_keys=True, allow_nan=False)
A safe JSON dump function that provides correct diverging numbers for a ECMAscript consumer.
11,629
def _generate_current_command(self): s axis-current settings, plus a small delay to wait for those settings to take effect. {}{}{} {}SET_CURRENT {currents} {code}P{seconds}DWELL'], seconds=CURRENT_CHANGE_DELAY ) log.debug("_generate_current_command: {}".format(command)) return command
Returns a constructed GCode string that contains this driver's axis-current settings, plus a small delay to wait for those settings to take effect.
11,630
def __updateJobResultsPeriodic(self): if self._isBestModelStored and not self._isBestModel: return while True: jobResultsStr = self._jobsDAO.jobGetFields(self._jobID, [])[0] if jobResultsStr is None: jobResults = {} else: self._isBestModelStored = True if not self._isBestModel: return jobResults = json.loads(jobResultsStr) bestModel = jobResults.get(, None) bestMetric = jobResults.get(, None) isSaved = jobResults.get(, False) if (bestModel is not None) and (self._modelID != bestModel): self._isBestModel = False return curValue=jobResultsStr, newValue=newResults) if isUpdated or (not isUpdated and newResults==jobResultsStr): self._isBestModel = True break
Periodic check to see if this is the best model. This should only have an effect if this is the *first* model to report its progress
11,631
def randomize_molecule_low(molecule, manipulations): manipulations = copy.copy(manipulations) shuffle(manipulations) coordinates = molecule.coordinates.copy() for manipulation in manipulations: manipulation.apply(coordinates) return molecule.copy_with(coordinates=coordinates)
Return a randomized copy of the molecule, without the nonbond check.
11,632
def from_dataset(cls, dataset, constraints = (), **kwargs): fm = LWLRForwardModel(dataset.dim_x, dataset.dim_y, **kwargs) fm.dataset = dataset im = cls.from_forward(fm, constraints = constraints, **kwargs) return im
Construct a optimized inverse model from an existing dataset. A LWLR forward model is constructed by default.
11,633
def main(argv=None): if argv is None: argv = sys.argv try: opts, args = getopt.getopt(argv[1:], , []) except getopt.error as msg: raise Usage(msg) for opt, _ in opts: if opt in (, ): print(__doc__) print(help_msg) return 0 if not args or len(args) > 4: raise Usage() with open(args[0], ) as template: fsm = TextFSM(template) print( % fsm) if len(args) > 1: with open(args[1], ) as f: cli_input = f.read() table = fsm.ParseText(cli_input) print() result = str(fsm.header) + for line in table: result += str(line) + print(result, end=) if len(args) > 2: with open(args[2], ) as f: ref_table = f.read() if ref_table != result: print() return 1 else: print()
Validate text parsed with FSM or validate an FSM via command line.
11,634
def allreduce_ring(xs, devices, reduction_fn_string="SUM"): n = len(xs) if len(devices) != n: raise ValueError("devices must be a list of length len(xs)") if n == 1: return xs shape = xs[0].shape.as_list() size = None if None in shape else mtf.list_product(shape) if size is None or size < 1024 or size % n != 0: return allreduce_ring_single_shard(xs, devices, reduction_fn_string) def _circular_shift(l, n): n %= len(l) return l[-n:] + l[:-n] def _flatten_and_split(x): return tf.split(tf.reshape(x, [-1]), n) def _concat_and_reshape(xs): return tf.reshape(tf.concat(xs, 0), shape) x_split = mtf.parallel(devices, _flatten_and_split, xs) x_split_t = mtf.transpose_list_of_lists(x_split) y_split_t = [] for shard in xrange(n): shard_xs = _circular_shift(x_split_t[shard], shard) shard_devices = _circular_shift(devices, shard) shard_ys = allreduce_ring_single_shard( shard_xs, shard_devices, reduction_fn_string) y_split_t.append(_circular_shift(shard_ys, -shard)) y_split = mtf.transpose_list_of_lists(y_split_t) ys = mtf.parallel(devices, _concat_and_reshape, y_split) return ys
Compute the reduction of all Tensors and put the result everywhere. Performance-optimized for a ring of devices. Args: xs: a list of n tf.Tensors devices: a list of strings reduction_fn_string: "SUM" or "MAX" Returns: a list of n Tensors Raises: ValueError: if devices is not a list of n strings
11,635
def results(self, dataset_name, index_by, timeframe): url = "{0}/{1}/results".format(self._cached_datasets_url, dataset_name) index_by = index_by if isinstance(index_by, str) else json.dumps(index_by) timeframe = timeframe if isinstance(timeframe, str) else json.dumps(timeframe) query_params = { "index_by": index_by, "timeframe": timeframe } return self._get_json( HTTPMethods.GET, url, self._get_read_key(), params=query_params )
Retrieve results from a Cached Dataset. Read key must be set.
11,636
def is_valid_address (s): try: pairs = s.split (":") if len (pairs) != 6: return False if not all(0 <= int(b, 16) <= 255 for b in pairs): return False except: return False return True
returns True if address is a valid Bluetooth address valid address are always strings of the form XX:XX:XX:XX:XX:XX where X is a hexadecimal character. For example, 01:23:45:67:89:AB is a valid address, but IN:VA:LI:DA:DD:RE is not
11,637
def input(self, _in, out, **kw): args = [self.binary or ] + self.rebase_opt if self.extra_args: args.extend(self.extra_args) self.subprocess(args, out, _in)
Input filtering.
11,638
def defocusThroughDepth(u, uf, f, fn, k=2.355): return (k/fn) * (f**2*abs(u-uf)) / (u*(uf-f))
return the defocus (mm std) through DOF u -> scene point (depth value) uf -> in-focus position (the distance at which the scene point should be placed in order to be focused) f -> focal length k -> camera dependent constant (transferring blur circle to PSF), 2.335 would be FHWD of 2dgaussian fn --> f-number (relative aperture) equation (3) taken from http://linkinghub.elsevier.com/retrieve/pii/S0031320312004736 Pertuz et.al. "Analysis of focus measure operators for shape-from-focus" all parameter should be in same physical unit [mm] !! assumes spatial invariant blur
11,639
def get_parent(self): return None if self.zoom == 0 else self.tile_pyramid.tile( self.zoom - 1, self.row // 2, self.col // 2 )
Return tile from previous zoom level.
11,640
def register(self, name, func): try: templatehook = self._registry[name] except KeyError: templatehook = self._register(name) templatehook.register(func)
Register a new callback.\ When the name/id is not found\ a new hook is created under its name,\ meaning the hook is usually created by\ the first registered callback :param str name: Hook name :param callable func: A func reference (callback)
11,641
def process_from_web(): logger.info( % trrust_human_url) res = requests.get(trrust_human_url) res.raise_for_status() df = pandas.read_table(io.StringIO(res.text)) tp = TrrustProcessor(df) tp.extract_statements() return tp
Return a TrrustProcessor based on the online interaction table. Returns ------- TrrustProcessor A TrrustProcessor object that has a list of INDRA Statements in its statements attribute.
11,642
def remove_item(self, val): return cache.lrem(self.key, json.dumps(val))
Removes given item from the list. Args: val: Item Returns: Cache backend response.
11,643
def unregister_message_callback(self, type_, from_): if type_ is not None: type_ = self._coerce_enum(type_, structs.MessageType) warnings.warn( "unregister_message_callback is deprecated; use " "aioxmpp.dispatcher.SimpleMessageDispatcher instead", DeprecationWarning, stacklevel=2 ) self._xxx_message_dispatcher.unregister_callback( type_, from_, )
Unregister a callback previously registered with :meth:`register_message_callback`. :param type_: Message type to listen for. :type type_: :class:`~.MessageType` or :data:`None` :param from_: Sender JID to listen for. :type from_: :class:`~aioxmpp.JID` or :data:`None` :raises KeyError: if no function is currently registered for the given ``(type_, from_)`` pair. :raises ValueError: if `type_` is not a valid :class:`~.MessageType` (and cannot be cast to a :class:`~.MessageType`) The match is made on the exact pair; it is not possible to unregister arbitrary listeners by passing :data:`None` to both arguments (i.e. the wildcarding only applies for receiving stanzas, not for unregistering callbacks; unregistering the super-wildcard with both arguments set to :data:`None` is of course possible). .. versionchanged:: 0.7 The `type_` argument is now supposed to be a :class:`~.MessageType` member. .. deprecated:: 0.7 Passing a :class:`str` as `type_` argument is deprecated and will raise a :class:`TypeError` as of the 1.0 release. See the Changelog for :ref:`api-changelog-0.7` for further details on how to upgrade your code efficiently. .. deprecated:: 0.9 This method has been deprecated in favour of and is now implemented in terms of the :class:`aioxmpp.dispatcher.SimpleMessageDispatcher` service. It is equivalent to call :meth:`~.SimpleStanzaDispatcher.unregister_callback`, except that the latter is not deprecated.
11,644
def datetimes(self): if self._datetimes is None: self._datetimes = tuple(self.header.analysis_period.datetimes) return self._datetimes
Return datetimes for this collection as a tuple.
11,645
def get_available_modes(self): resource = "modes" resource_event = self.publish_and_get_event(resource) if resource_event: properties = resource_event.get("properties") return properties.get("modes") return None
Return a list of available mode objects for an Arlo user.
11,646
def clone(cls, model, use_json=True, use_lp=False): model.update() interface = sys.modules[cls.__module__] if use_lp: warnings.warn("Cloning with LP formats can change variable and constraint ID's.") new_model = cls.from_lp(model.to_lp()) new_model.configuration = interface.Configuration.clone(model.configuration, problem=new_model) return new_model if use_json: new_model = cls.from_json(model.to_json()) new_model.configuration = interface.Configuration.clone(model.configuration, problem=new_model) return new_model new_model = cls() for variable in model.variables: new_variable = interface.Variable.clone(variable) new_model._add_variable(new_variable) for constraint in model.constraints: new_constraint = interface.Constraint.clone(constraint, model=new_model) new_model._add_constraint(new_constraint) if model.objective is not None: new_model.objective = interface.Objective.clone(model.objective, model=new_model) new_model.configuration = interface.Configuration.clone(model.configuration, problem=new_model) return new_model
Make a copy of a model. The model being copied can be of the same type or belong to a different solver interface. This is the preferred way of copying models. Example ---------- >>> new_model = Model.clone(old_model)
11,647
def get_uri_template(urlname, args=None, prefix=""): t appear in every pattern possibility) %s/%s://') possibilities = resolver.reverse_dict.getlist(urlname) for tmp in possibilities: possibility, pattern = tmp[:2] if not args: result, params = possibility[0] return _convert(result, params) else: seen_params = [] for result, params in possibility: seen_params.append(params) common_params = reduce(lambda x, y: set(x) & set(y), seen_params) expected_params = sorted(common_params.union(args)) for result, params in possibility: if sorted(params) == expected_params: return _convert(result, params) return None
Utility function to return an URI Template from a named URL in django Copied from django-digitalpaper. Restrictions: - Only supports named urls! i.e. url(... name="toto") - Only support one namespace level - Only returns the first URL possibility. - Supports multiple pattern possibilities (i.e., patterns with non-capturing parenthesis in them) by trying to find a pattern whose optional parameters match those you specified (a parameter is considered optional if it doesn't appear in every pattern possibility)
11,648
def _vector_size(v): if isinstance(v, Vector): return len(v) elif type(v) in (array.array, list, tuple, xrange): return len(v) elif type(v) == np.ndarray: if v.ndim == 1 or (v.ndim == 2 and v.shape[1] == 1): return len(v) else: raise ValueError("Cannot treat an ndarray of shape %s as a vector" % str(v.shape)) elif _have_scipy and scipy.sparse.issparse(v): assert v.shape[1] == 1, "Expected column vector" return v.shape[0] else: raise TypeError("Cannot treat type %s as a vector" % type(v))
Returns the size of the vector. >>> _vector_size([1., 2., 3.]) 3 >>> _vector_size((1., 2., 3.)) 3 >>> _vector_size(array.array('d', [1., 2., 3.])) 3 >>> _vector_size(np.zeros(3)) 3 >>> _vector_size(np.zeros((3, 1))) 3 >>> _vector_size(np.zeros((1, 3))) Traceback (most recent call last): ... ValueError: Cannot treat an ndarray of shape (1, 3) as a vector
11,649
def NewDefaultAgency(self, **kwargs): agency = self._gtfs_factory.Agency(**kwargs) if not agency.agency_id: agency.agency_id = util.FindUniqueId(self._agencies) self._default_agency = agency self.SetDefaultAgency(agency, validate=False) return agency
Create a new Agency object and make it the default agency for this Schedule
11,650
def convex_conj(self): if self.exponent == np.inf: return L1Norm(self.domain) elif self.exponent == 2: return L2Norm(self.domain) else: return LpNorm(self.domain, exponent=conj_exponent(self.exponent))
The conjugate functional of IndicatorLpUnitBall. The convex conjugate functional of an ``Lp`` norm, ``p < infty`` is the indicator function on the unit ball defined by the corresponding dual norm ``q``, given by ``1/p + 1/q = 1`` and where ``q = infty`` if ``p = 1`` [Roc1970]. By the Fenchel-Moreau theorem, the convex conjugate functional of indicator function on the unit ball in ``Lq`` is the corresponding Lp-norm [BC2011]. References ---------- [Roc1970] Rockafellar, R. T. *Convex analysis*. Princeton University Press, 1970. [BC2011] Bauschke, H H, and Combettes, P L. *Convex analysis and monotone operator theory in Hilbert spaces*. Springer, 2011.
11,651
def validate_json_schema(self): errors = [] for work in self: for task in work: if not task.get_results().validate_json_schema(): errors.append(task) if not work.get_results().validate_json_schema(): errors.append(work) if not self.get_results().validate_json_schema(): errors.append(self) return errors
Validate the JSON schema. Return list of errors.
11,652
def to_xdr_object(self): return Xdr.types.Memo(type=Xdr.const.MEMO_HASH, hash=self.memo_hash)
Creates an XDR Memo object for a transaction with MEMO_HASH.
11,653
def scan_uow_candidates(self): try: since = settings.settings[] uow_list = self.uow_dao.get_reprocessing_candidates(since) except LookupError as e: self.logger.info(.format(e)) return for uow in uow_list: try: if uow.process_name not in self.managed_handlers: self.logger.debug( .format(uow.process_name)) continue thread_handler = self.managed_handlers[uow.process_name] assert isinstance(thread_handler, ManagedThreadHandler) if not thread_handler.process_entry.is_on: self.logger.debug(.format(uow.process_name)) continue entry = PriorityEntry(uow) if entry in self.reprocess_uows[uow.process_name]: continue if datetime.utcnow() - uow.created_at > timedelta(hours=settings.settings[]): self._cancel_uow(uow) continue if datetime.utcnow() - uow.submitted_at > timedelta(hours=settings.settings[])\ or uow.is_invalid: self.reprocess_uows[uow.process_name].put(entry) except Exception as e: self.logger.error(.format(e), exc_info=True)
method performs two actions: - enlist stale or invalid units of work into reprocessing queue - cancel UOWs that are older than 2 days and have been submitted more than 1 hour ago
11,654
def read_cf1_config(self): target = self._cload.targets[0xFF] config_page = target.flash_pages - 1 return self._cload.read_flash(addr=0xFF, page=config_page)
Read a flash page from the specified target
11,655
def _validate_zooms(zooms): if isinstance(zooms, dict): if any([a not in zooms for a in ["min", "max"]]): raise MapcheteConfigError("min and max zoom required") zmin = _validate_zoom(zooms["min"]) zmax = _validate_zoom(zooms["max"]) if zmin > zmax: raise MapcheteConfigError( "max zoom must not be smaller than min zoom") return list(range(zmin, zmax + 1)) elif isinstance(zooms, list): if len(zooms) == 1: return zooms elif len(zooms) == 2: zmin, zmax = sorted([_validate_zoom(z) for z in zooms]) return list(range(zmin, zmax + 1)) else: return zooms else: return [_validate_zoom(zooms)]
Return a list of zoom levels. Following inputs are converted: - int --> [int] - dict{min, max} --> range(min, max + 1) - [int] --> [int] - [int, int] --> range(smaller int, bigger int + 1)
11,656
def get_url_parameters(self): url_fields = {} for field in self.url_fields: url_fields[field] = getattr(self, field) return url_fields
Create a dictionary of parameters used in URLs for this model.
11,657
def remove_all_cts_records_by(self, crypto_idfp): regex = re.compile() to_remove = [] for k, v in self.filter(regex, is_regex=True): if v == crypto_idfp: match = regex.match(k) to_remove.append((match.group(1), int(match.group(2)))) for i in to_remove: self.remove_cts_record(*i)
Remove all CTS records from the specified player :param crypto_idfp: :return:
11,658
def ctrl_transfer(self, bmRequestType, bRequest, wValue=0, wIndex=0, data_or_wLength = None, timeout = None): r try: buff = util.create_buffer(data_or_wLength) except TypeError: buff = _interop.as_array(data_or_wLength) self._ctx.managed_open() recipient = bmRequestType & 3 rqtype = bmRequestType & (3 << 5) if recipient == util.CTRL_RECIPIENT_INTERFACE \ and rqtype != util.CTRL_TYPE_VENDOR: interface_number = wIndex & 0xff self._ctx.managed_claim_interface(self, interface_number) ret = self._ctx.backend.ctrl_transfer( self._ctx.handle, bmRequestType, bRequest, wValue, wIndex, buff, self.__get_timeout(timeout)) if isinstance(data_or_wLength, array.array) \ or util.ctrl_direction(bmRequestType) == util.CTRL_OUT: return ret elif ret != len(buff) * buff.itemsize: return buff[:ret] else: return buff
r"""Do a control transfer on the endpoint 0. This method is used to issue a control transfer over the endpoint 0 (endpoint 0 is required to always be a control endpoint). The parameters bmRequestType, bRequest, wValue and wIndex are the same of the USB Standard Control Request format. Control requests may or may not have a data payload to write/read. In cases which it has, the direction bit of the bmRequestType field is used to infer the desired request direction. For host to device requests (OUT), data_or_wLength parameter is the data payload to send, and it must be a sequence type convertible to an array object. In this case, the return value is the number of bytes written in the data payload. For device to host requests (IN), data_or_wLength is either the wLength parameter of the control request specifying the number of bytes to read in data payload, and the return value is an array object with data read, or an array object which the data will be read to, and the return value is the number of bytes read.
11,659
def find_lemma(self, verb): v = verb.lower() if not (v.startswith("ge") and v.endswith("t")): for prefix in prefixes: if v.startswith(prefix) and v[len(prefix):] in self.inflections: return prefix + self.inflections[v[len(prefix):]] b, suffix = " " in v and v.split()[:2] or (v, "") if b.endswith(("ln", "rn")): return b for x in ("test", "est", "end", "ten", "tet", "en", "et", "te", "st", "e", "t"): if b.endswith(x): b = b[:-len(x)]; break for x, y in ( ("ieb", "eib"), ( "ied", "eid"), ( "ief", "auf" ), ( "ieg", "eig" ), ("iel", "alt"), ("ien", "ein"), ("iess", "ass"), (u"ieß", u"aß" ), ( "iff", "eif" ), ("iss", "eiss"), (u"iß", u"eiß"), ( "it", "eid"), ( "oss", "iess"), (u"öss", "iess")): if b.endswith(x): b = b[:-len(x)] + y; break b = b.replace("eeiss", "eiss") b = b.replace("eeid", "eit") if not b.endswith(("e", "l")) and not (b.endswith("er") and len(b) >= 3 and not b[-3] in VOWELS): b = b + "e" if b.endswith(("hl", "ll", "ul", "eil")): b = b + "e" if b.startswith("ge") and v.endswith("t"): b = b[2:] if b.endswith(("lnde", "rnde")): b = b[:-3] if b.endswith(("ae", "al", u"öe", u"üe")): b = b.rstrip("e") + "te" if b.endswith(u"äl"): b = b + "e" return suffix + b + "n"
Returns the base form of the given inflected verb, using a rule-based approach.
11,660
def do_selection_reduction_to_one_parent(selection): all_models_selected = selection.get_all() parent_m_count_dict = {} for model in all_models_selected: parent_m_count_dict[model.parent] = parent_m_count_dict[model.parent] + 1 if model.parent in parent_m_count_dict else 1 parent_m = None current_count_parent = 0 for possible_parent_m, count in parent_m_count_dict.items(): parent_m = possible_parent_m if current_count_parent < count else parent_m if len(selection.states) == 1 and selection.get_selected_state().state.is_root_state: parent_m = None if len(all_models_selected) > 1: selection.set(selection.get_selected_state()) if parent_m is not None: for model in all_models_selected: if model.parent is not parent_m: selection.remove(model) return parent_m
Find and reduce selection to one parent state. :param selection: :return: state model which is parent of selection or None if root state
11,661
def get_stdev(self, col, row): return javabridge.call(self.jobject, "getStdDev", "(II)D", col, row)
Returns the standard deviation at this location (if valid location). :param col: the 0-based column index :type col: int :param row: the 0-based row index :type row: int :return: the standard deviation :rtype: float
11,662
def process(self, plugin, context, instance=None, action=None): plugin = plugin.to_json() instance = instance.to_json() if instance is not None else None return self._dispatch("process", args=[plugin, instance, action])
Transmit a `process` request to host Arguments: plugin (PluginProxy): Plug-in to process context (ContextProxy): Filtered context instance (InstanceProxy, optional): Instance to process action (str, optional): Action to process
11,663
def generate_value_label(self, byteorder, encoding): self._encoding = encoding bio = BytesIO() null_string = null_byte = b bio.write(struct.pack(byteorder + , self.len)) labname = self._encode(_pad_bytes(self.labname[:32], 33)) bio.write(labname) for i in range(3): bio.write(struct.pack(, null_byte)) bio.write(struct.pack(byteorder + , self.n)) bio.write(struct.pack(byteorder + , self.text_len)) for offset in self.off: bio.write(struct.pack(byteorder + , offset)) for value in self.val: bio.write(struct.pack(byteorder + , value)) for text in self.txt: bio.write(self._encode(text + null_string)) bio.seek(0) return bio.read()
Parameters ---------- byteorder : str Byte order of the output encoding : str File encoding Returns ------- value_label : bytes Bytes containing the formatted value label
11,664
def convert_p(element, text): depth = -1 while element: if (not element.name == and not element.parent.get() == ): depth += 1 element = element.parent if text: text = * depth + text return text
Adds 2 newlines to the end of text
11,665
def _is_locked(self): if os.path.isfile(self._lck): try: import psutil except ImportError: return True with open(self._lck) as f: pid = f.read() return True if psutil.pid_exists(int(pid)) else False else: return False
Checks to see if we are already pulling items from the queue
11,666
def lower_key(fn): def lower(key): try: return key.lower() except AttributeError: return key return process_key(lower, fn)
:param fn: a key function :return: a function that wraps around the supplied key function to ensure the returned key is in lowercase.
11,667
def get_assessments_by_query(self, assessment_query): if in assessment_query._query_terms: collection = JSONClientValidated(, collection=, runtime=self._runtime) match = in assessment_query._query_terms[].keys() if match: match_identifiers = [ObjectId(Id(i).identifier) for i in assessment_query._query_terms[][]] query = {: match_identifiers} else: match_identifiers = [ObjectId(Id(i).identifier) for i in assessment_query._query_terms[][]] query = {: match_identifiers} result = collection.find({ "_id": query }) assessment_ids = [ObjectId(Id(r[]).identifier) for r in result] collection = JSONClientValidated(, collection=, runtime=self._runtime) result = collection.find({ "_id": {"$in": assessment_ids} }) return objects.AssessmentList(result, runtime=self._runtime, proxy=self._proxy) else: and_list = list() or_list = list() for term in assessment_query._query_terms: if in assessment_query._query_terms[term] and in assessment_query._query_terms[term]: and_list.append( {: [{term: {: assessment_query._query_terms[term][]}}, {term: {: assessment_query._query_terms[term][]}}]}) else: and_list.append({term: assessment_query._query_terms[term]}) for term in assessment_query._keyword_terms: or_list.append({term: assessment_query._keyword_terms[term]}) if or_list: and_list.append({: or_list}) view_filter = self._view_filter() if view_filter: and_list.append(view_filter) if and_list: query_terms = {: and_list} collection = JSONClientValidated(, collection=, runtime=self._runtime) result = collection.find(query_terms).sort(, DESCENDING) else: result = [] return objects.AssessmentList(result, runtime=self._runtime, proxy=self._proxy)
Gets a list of ``Assessments`` matching the given assessment query. arg: assessment_query (osid.assessment.AssessmentQuery): the assessment query return: (osid.assessment.AssessmentList) - the returned ``AssessmentList`` raise: NullArgument - ``assessment_query`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred raise: Unsupported - ``assessment_query`` is not of this service *compliance: mandatory -- This method must be implemented.*
11,668
def get_data_by_slug(model, slug, kind=, **kwargs): instance = get_instance_by_slug(model, slug, **kwargs) if not instance: return return ins2dict(instance, kind)
Get instance data by slug and kind. Raise 404 Not Found if there is no data. This function requires model has a `slug` column. :param model: a string, model name in rio.models :param slug: a string used to query by `slug`. This requires there is a slug field in model definition. :param kind: a string specified which kind of dict tranformer should be called. :return: a dict or None.
11,669
def get_lines(handle, line): for i, l in enumerate(handle): if i == line: return l
Get zero-indexed line from an open file-like.
11,670
def flip_coded(self): self.genotypes = 2 - self.genotypes self.reference, self.coded = self.coded, self.reference
Flips the coding of the alleles.
11,671
def instantiate_components(self, context): with use_ipopo(context) as ipopo: for name, (factory, properties) in self.__state.components.items(): ipopo.instantiate(factory, name, properties)
Instantiate the defined components .. note:: This method requires the iPOPO core service to be registered. This means that the ``pelix.ipopo.core`` must have been declared in the list of bundles (or installed and started programmatically). :param context: A :class:`~pelix.framework.BundleContext` object :raise BundleException: Error looking for the iPOPO service or starting a component
11,672
def refresh_db(**kwargs): salt.utils.pkg.clear_rtag(__opts__) cmd = [, ] return salt.utils.mac_utils.execute_return_success(cmd)
Update ports with ``port selfupdate`` CLI Example: .. code-block:: bash salt mac pkg.refresh_db
11,673
def _tryMatch(self, textToMatchObject): for rule in self.context.rules: ruleTryMatchResult = rule.tryMatch(textToMatchObject) if ruleTryMatchResult is not None: _logger.debug(, rule.shortId(), textToMatchObject.currentColumnIndex, self.context.parser.syntax.name, self.context.name) return ruleTryMatchResult else: return None
Try to find themselves in the text. Returns (count, matchedRule) or (None, None) if doesn't match
11,674
def nonterminal(n): def match_nonterminal(s, grm=None, pos=0): if grm is None: grm = {} expr = grm[n] return expr(s, grm, pos) return match_nonterminal
Create a PEG function to match a nonterminal.
11,675
def get_image_path(definition): path = resources_path( , , % definition[]) if os.path.exists(path): return path else: return not_set_image_path
Helper to get path of image from a definition in resource directory. :param definition: A definition (hazard, exposure). :type definition: dict :returns: The definition's image path. :rtype: str
11,676
def get_anki_phrases(lang=, limit=None): lang = lang.strip().lower()[:3] lang = LANG2ANKI[lang[:2]] if lang not in ANKI_LANGUAGES else lang if lang[:2] == : return get_anki_phrases_english(limit=limit) return sorted(get_data(lang).iloc[:, -1].str.strip().values)
Retrieve as many anki paired-statement corpora as you can for the requested language If `ankis` (requested languages) is more than one, then get the english texts associated with those languages. TODO: improve modularity: def function that takes a single language and call it recursively if necessary >>> get_anki_phrases('afr')[:2] ["'n Groen piesang is nie ryp genoeg om te eet nie.", "'n Hond het agter die kat aan gehardloop."]
11,677
def is_bool_dtype(arr_or_dtype): if arr_or_dtype is None: return False try: dtype = _get_dtype(arr_or_dtype) except TypeError: return False if isinstance(arr_or_dtype, CategoricalDtype): arr_or_dtype = arr_or_dtype.categories if isinstance(arr_or_dtype, ABCIndexClass): return dtype._is_boolean return issubclass(dtype.type, np.bool_)
Check whether the provided array or dtype is of a boolean dtype. Parameters ---------- arr_or_dtype : array-like The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a boolean dtype. Notes ----- An ExtensionArray is considered boolean when the ``_is_boolean`` attribute is set to True. Examples -------- >>> is_bool_dtype(str) False >>> is_bool_dtype(int) False >>> is_bool_dtype(bool) True >>> is_bool_dtype(np.bool) True >>> is_bool_dtype(np.array(['a', 'b'])) False >>> is_bool_dtype(pd.Series([1, 2])) False >>> is_bool_dtype(np.array([True, False])) True >>> is_bool_dtype(pd.Categorical([True, False])) True >>> is_bool_dtype(pd.SparseArray([True, False])) True
11,678
def send_method_request(self, method: str, method_params: dict) -> dict: url = .join((self.METHOD_URL, method)) method_params[] = self.API_VERSION if self._access_token: method_params[] = self._access_token response = self.post(url, method_params, timeout=10) response.raise_for_status() return json.loads(response.text)
Sends user-defined method and method params
11,679
def export_chat_invite_link( self, chat_id: Union[int, str] ) -> str: peer = self.resolve_peer(chat_id) if isinstance(peer, types.InputPeerChat): return self.send( functions.messages.ExportChatInvite( peer=peer.chat_id ) ).link elif isinstance(peer, types.InputPeerChannel): return self.send( functions.channels.ExportInvite( channel=peer ) ).link
Use this method to generate a new invite link for a chat; any previously generated link is revoked. You must be an administrator in the chat for this to work and have the appropriate admin rights. Args: chat_id (``int`` | ``str``): Unique identifier for the target chat or username of the target channel/supergroup (in the format @username). Returns: On success, the exported invite link as string is returned. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
11,680
def _decodeAddressField(byteIter, smscField=False, log=False): addressLen = next(byteIter) if addressLen > 0: toa = next(byteIter) ton = (toa & 0x70) if ton == 0x50: addressLen = int(math.ceil(addressLen / 2.0)) septets = unpackSeptets(byteIter, addressLen) addressValue = decodeGsm7(septets) return (addressValue, (addressLen + 2)) else: if smscField: addressValue = decodeSemiOctets(byteIter, addressLen-1) else: if addressLen % 2: addressLen = int(addressLen / 2) + 1 else: addressLen = int(addressLen / 2) addressValue = decodeSemiOctets(byteIter, addressLen) addressLen += 1 if ton == 0x10: addressValue = + addressValue return (addressValue, (addressLen + 1)) else: return (None, 1)
Decodes the address field at the current position of the bytearray iterator :param byteIter: Iterator over bytearray :type byteIter: iter(bytearray) :return: Tuple containing the address value and amount of bytes read (value is or None if it is empty (zero-length)) :rtype: tuple
11,681
def tobytes(self): d = offsetcopy(self._datastore, 0).rawbytes unusedbits = 8 - self.len % 8 if unusedbits != 8: d[-1] &= (0xff << unusedbits) return bytes(d)
Return the bitstring as bytes, padding with zero bits if needed. Up to seven zero bits will be added at the end to byte align.
11,682
def check_extension(conn, extension: str) -> bool: query = with conn.cursor() as cursor: cursor.execute(query, (extension,)) result = cursor.fetchone() if result is None: raise psycopg2.ProgrammingError( , extension ) else: extension_version = result[0] return bool(extension_version)
Check to see if an extension is installed.
11,683
def get_git_branch(git_path=): branch_match = call((git_path, , , )) if branch_match == "HEAD": return None else: return os.path.basename(branch_match)
Returns the name of the current git branch
11,684
def encode_notifications(tokens, notifications): fmt = "!BH32sH%ds" structify = lambda t, p: struct.pack(fmt % len(p), 0, 32, t, len(p), p) binaryify = lambda t: t.decode() if type(notifications) is dict and type(tokens) in (str, unicode): tokens, notifications = ([tokens], [notifications]) if type(notifications) is list and type(tokens) is list: return .join(map(lambda y: structify(*y), ((binaryify(t), json.dumps(p, separators=(,), ensure_ascii=False).encode()) for t, p in zip(tokens, notifications))))
Returns the encoded bytes of tokens and notifications tokens a list of tokens or a string of only one token notifications a list of notifications or a dictionary of only one
11,685
def _from_dict(cls, _dict): args = {} if in _dict: args[] = _dict.get() if in _dict: args[] = Location._from_dict(_dict.get()) if in _dict: args[] = _dict.get() if in _dict: args[] = [ TypeLabelComparison._from_dict(x) for x in (_dict.get()) ] if in _dict: args[] = [ CategoryComparison._from_dict(x) for x in (_dict.get()) ] if in _dict: args[] = [ Attribute._from_dict(x) for x in (_dict.get()) ] return cls(**args)
Initialize a UnalignedElement object from a json dictionary.
11,686
def read_input_data(filename): logging.info(, filename) input_file = open(filename, ) data = [] labels = [] for line in input_file: tokens = line.split(, 1) labels.append(tokens[0].strip()) data.append(tokens[1].strip()) return labels, data
Helper function to get training data
11,687
def visit_snippet(self, node): lang = self.highlightlang linenos = node.rawsource.count() >= self.highlightlinenothreshold - 1 fname = node[] highlight_args = node.get(, {}) if in node: lang = node[] highlight_args[] = True if in node: linenos = node[] def warner(msg): self.builder.warn(msg, (self.builder.current_docname, node.line)) highlighted = self.highlighter.highlight_block(node.rawsource, lang, warn=warner, linenos=linenos, **highlight_args) starttag = self.starttag(node, , suffix=, CLASS= % lang) self.body.append(starttag) self.body.append( % (fname,)) self.body.append(highlighted) self.body.append() raise nodes.SkipNode
HTML document generator visit handler
11,688
def sub(x, y, context=None): return _apply_function_in_current_context( BigFloat, mpfr.mpfr_sub, ( BigFloat._implicit_convert(x), BigFloat._implicit_convert(y), ), context, )
Return ``x`` - ``y``.
11,689
def check_geophysical_vars_fill_value(self, ds): results = [] for geo_var in get_geophysical_variables(ds): results.append( self._has_var_attr(ds, geo_var, , , BaseCheck.MEDIUM), ) return results
Check that geophysical variables contain fill values. :param netCDF4.Dataset ds: An open netCDF dataset
11,690
def make_request(self, session, url, **kwargs): log.debug( % (url, kwargs)) return session.post(url, **kwargs)
Make a HTTP POST request. :param url: The URL to post to. :param data: The data to post. :returns: The response to the request. :rtype: requests.Response
11,691
def _perp_eigendecompose(matrix: np.ndarray, rtol: float = 1e-5, atol: float = 1e-8, ) -> Tuple[np.array, List[np.ndarray]]: vals, cols = np.linalg.eig(matrix) vecs = [cols[:, i] for i in range(len(cols))] for i in range(len(vecs)): vecs[i] = np.reshape(vecs[i], (len(vecs[i]), vecs[i].ndim)) n = len(vecs) groups = _group_similar( list(range(n)), lambda k1, k2: np.allclose(vals[k1], vals[k2], rtol=rtol)) for g in groups: q, _ = np.linalg.qr(np.hstack([vecs[i] for i in g])) for i in range(len(g)): vecs[g[i]] = q[:, i] return vals, vecs
An eigendecomposition that ensures eigenvectors are perpendicular. numpy.linalg.eig doesn't guarantee that eigenvectors from the same eigenspace will be perpendicular. This method uses Gram-Schmidt to recover a perpendicular set. It further checks that all eigenvectors are perpendicular and raises an ArithmeticError otherwise. Args: matrix: The matrix to decompose. rtol: Relative threshold for determining whether eigenvalues are from the same eigenspace and whether eigenvectors are perpendicular. atol: Absolute threshold for determining whether eigenvalues are from the same eigenspace and whether eigenvectors are perpendicular. Returns: The eigenvalues and column eigenvectors. The i'th eigenvalue is associated with the i'th column eigenvector. Raises: ArithmeticError: Failed to find perpendicular eigenvectors.
11,692
def _diff_cache_cluster(current, desired): modifiables a dict mapping the param as used in modify_cache_cluster() to that in describe_cache_clusters(). Any data fiddlery that needs to be done to make the mappings meaningful should be done in the munging section below as well. This function will ONLY touch settings that are explicitly called out in - any settings which might have previously been changed from their values will not be changed back simply by leaving them out of . This is both intentional, and much, much easier to code :) SecurityGroupsSecurityGroupIdsSecurityGroupIdSecurityGroupsCacheSecurityGroupsCacheSecurityGroupNamesCacheSecurityGroupNameCacheSecurityGroupsNotificationConfigurationNotificationTopicArnNotificationConfigurationTopicArnNotificationTopicStatusNotificationConfigurationTopicStatusCacheParameterGroupCacheParameterGroupNameCacheParameterGroupCacheParameterGroupNameAutoMinorVersionUpgradeAutoMinorVersionUpgradeAZModeAZModeCacheNodeTypeCacheNodeTypeCacheNodeIdsToRemoveCacheParameterGroupNameCacheParameterGroupNameCacheSecurityGroupNamesCacheSecurityGroupNamesEngineVersionEngineVersionNewAvailabilityZonesNotificationTopicArnNotificationTopicArnNotificationTopicStatusNotificationTopicStatusNumCacheNodesNumCacheNodesPreferredMaintenanceWindowPreferredMaintenanceWindowSecurityGroupIdsSecurityGroupIdsSnapshotRetentionLimitSnapshotRetentionLimitSnapshotWindowSnapshotWindow' } need_update = {} for m, o in modifiable.items(): if m in desired: if not o: need_update[m] = desired[m] else: if m in current: if current[m] != desired[m]: need_update[m] = desired[m] return need_update
If you need to enhance what modify_cache_cluster() considers when deciding what is to be (or can be) updated, add it to 'modifiable' below. It's a dict mapping the param as used in modify_cache_cluster() to that in describe_cache_clusters(). Any data fiddlery that needs to be done to make the mappings meaningful should be done in the munging section below as well. This function will ONLY touch settings that are explicitly called out in 'desired' - any settings which might have previously been changed from their 'default' values will not be changed back simply by leaving them out of 'desired'. This is both intentional, and much, much easier to code :)
11,693
def _make_order(field_path, direction): return query_pb2.StructuredQuery.Order( field=query_pb2.StructuredQuery.FieldReference(field_path=field_path), direction=_enum_from_direction(direction), )
Helper for :meth:`order_by`.
11,694
def _create_state_data(self, context, resp_args, relay_state): state = super()._create_state_data(context, resp_args, relay_state) state["target_entity_id"] = context.target_entity_id_from_path() return state
Adds the frontend idp entity id to state See super class satosa.frontends.saml2.SAMLFrontend#save_state :type context: satosa.context.Context :type resp_args: dict[str, str | saml2.samlp.NameIDPolicy] :type relay_state: str :rtype: dict[str, dict[str, str] | str]
11,695
def send_command_w_enter(self, *args, **kwargs): if len(args) > 1: raise ValueError("Must pass in delay_factor as keyword argument") delay_factor = kwargs.get("delay_factor", 1) kwargs["delay_factor"] = self.select_delay_factor(delay_factor) output = self.send_command_timing(*args, **kwargs) if "Press any key" in output or "Press Enter to" in output: new_args = list(args) if len(args) == 1: new_args[0] = self.RETURN else: kwargs["command_string"] = self.RETURN if not kwargs.get("max_loops"): kwargs["max_loops"] = 150 output = self.send_command_timing(*new_args, **kwargs) if "802.11b Advanced Configuration" in output: time.sleep(kwargs["delay_factor"] * 30) not_done = True i = 1 while not_done and i <= 150: time.sleep(kwargs["delay_factor"] * 3) i += 1 new_data = "" new_data = self.read_channel() if new_data: output += new_data else: not_done = False strip_prompt = kwargs.get("strip_prompt", True) if strip_prompt: output = self.strip_prompt(output) output = self.strip_prompt(output) return output
For 'show run-config' Cisco WLC adds a 'Press Enter to continue...' message Even though pagination is disabled show run-config also has excessive delays in the output which requires special handling. Arguments are the same as send_command_timing() method
11,696
def interactive(f): return f
Decorator for making functions appear as interactively defined. This results in the function being linked to the user_ns as globals() instead of the module globals().
11,697
def sanitizer(name, replacements=[(,), (,), (,)]): for old,new in replacements: name = name.replace(old,new) return name
String sanitizer to avoid problematic characters in filenames.
11,698
def settings_module(self): settings_module = parse_conf_data( os.environ.get( self.ENVVAR_FOR_DYNACONF, self.SETTINGS_FILE_FOR_DYNACONF ), tomlfy=True, ) if settings_module != getattr(self, "SETTINGS_MODULE", None): self.set("SETTINGS_MODULE", settings_module) return self.SETTINGS_MODULE
Gets SETTINGS_MODULE variable
11,699
def cupy_wrapper(func): @functools.wraps(func) def wrapped(*args, **kwargs): args = list(args) for n, a in enumerate(args): if isinstance(a, np.ndarray): args[n] = cp.asarray(a) for k, v in kwargs.items(): if isinstance(v, np.ndarray): kwargs[k] = cp.asarray(v) rtn = func(*args, **kwargs) if isinstance(rtn, (list, tuple)): for n, a in enumerate(rtn): if isinstance(a, cp.core.core.ndarray): rtn[n] = cp.asnumpy(a) else: if isinstance(rtn, cp.core.core.ndarray): rtn = cp.asnumpy(rtn) return rtn return wrapped
A wrapper function that converts numpy ndarray arguments to cupy arrays, and convert any cupy arrays returned by the wrapped function into numpy ndarrays.