Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
11,400
def people_findByEmail(email): method = data = _doget(method, find_email=email) user = User(data.rsp.user.id, username=data.rsp.user.username.text) return user
Returns User object.
11,401
def _init_groups(self): for group_id, conf in self.group_conf.items(): self.parent_input_dict[group_id] = Queue(conf.get(, 0)) self.parent_output_dict[group_id] = Queue(conf.get(, 0))
初始化group数据 :return:
11,402
def notch(self, frequency, type=, filtfilt=True, **kwargs): zpk = filter_design.notch(frequency, self.sample_rate.value, type=type, **kwargs) return self.filter(*zpk, filtfilt=filtfilt)
Notch out a frequency in this `TimeSeries`. Parameters ---------- frequency : `float`, `~astropy.units.Quantity` frequency (default in Hertz) at which to apply the notch type : `str`, optional type of filter to apply, currently only 'iir' is supported **kwargs other keyword arguments to pass to `scipy.signal.iirdesign` Returns ------- notched : `TimeSeries` a notch-filtered copy of the input `TimeSeries` See Also -------- TimeSeries.filter for details on the filtering method scipy.signal.iirdesign for details on the IIR filter design method
11,403
def start(self, *_): try: box_configurations = self.bc_dao.run_query(QUERY_PROCESSES_FOR_BOX_ID(self.box_id)) for box_config in box_configurations: handler = RepeatTimer(TRIGGER_INTERVAL, self.manage_process, args=[box_config.process_name]) self.thread_handlers[box_config.process_name] = handler handler.start() self.logger.info( .format(box_config.process_name, TRIGGER_INTERVAL)) except LookupError as e: self.logger.error(.format(e))
reading box configurations and starting timers to start/monitor/kill processes
11,404
def isemptyfile(filepath): exists = os.path.exists(safepath(filepath)) if exists: filesize = os.path.getsize(safepath(filepath)) return filesize == 0 else: return False
Determine if the file both exists and isempty Args: filepath (str, path): file path Returns: bool
11,405
def ensure(self, connection, func, *args, **kwargs): channel = None while 1: try: if channel is None: channel = connection.channel() return func(channel, *args, **kwargs), channel except (connection.connection_errors, IOError): self._call_errback() channel = self.connect(connection)
Perform an operation until success Repeats in the face of connection errors, pursuant to retry policy.
11,406
def nn(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf): self.n, self.m = np.shape(self.get_data_x()) x = np.asarray(x) if np.shape(x)[-1] != self.m: raise ValueError("x must consist of vectors of length %d but has shape %s" % (self.m, np.shape(x))) if p < 1: raise ValueError("Only p-norms with 1<=p<=infinity permitted") retshape = np.shape(x)[:-1] if retshape != (): if k is None: dd = np.empty(retshape,dtype=np.object) ii = np.empty(retshape,dtype=np.object) elif k > 1: dd = np.empty(retshape+(k,),dtype=np.float) dd.fill(np.inf) ii = np.empty(retshape+(k,),dtype=np.int) ii.fill(self.n) elif k == 1: dd = np.empty(retshape,dtype=np.float) dd.fill(np.inf) ii = np.empty(retshape,dtype=np.int) ii.fill(self.n) else: raise ValueError("Requested %s nearest neighbors; acceptable numbers are integers greater than or equal to one, or None") for c in np.ndindex(retshape): hits = self.__query(x[c], k=k, eps=eps, p=p, distance_upper_bound=distance_upper_bound) if k is None: dd[c] = [d for (d,i) in hits] ii[c] = [i for (d,i) in hits] elif k > 1: for j in range(len(hits)): dd[c+(j,)], ii[c+(j,)] = hits[j] elif k == 1: if len(hits) > 0: dd[c], ii[c] = hits[0] else: dd[c] = np.inf ii[c] = self.n return dd, ii else: hits = self.__query(x, k=k, eps=eps, p=p, distance_upper_bound=distance_upper_bound) if k is None: return [d for (d,i) in hits], [i for (d,i) in hits] elif k == 1: if len(hits) > 0: return hits[0] else: return np.inf, self.n elif k > 1: dd = np.empty(k,dtype=np.float) dd.fill(np.inf) ii = np.empty(k,dtype=np.int) ii.fill(self.n) for j in range(len(hits)): dd[j], ii[j] = hits[j] return dd, ii else: raise ValueError("Requested %s nearest neighbors; acceptable numbers are integers greater than or equal to one, or None")
Query the tree for nearest neighbors Parameters ---------- x : array_like, last dimension self.m An array of points to query. k : integer The number of nearest neighbors to return. eps : nonnegative float Return approximate nearest neighbors; the kth returned value is guaranteed to be no further than (1+eps) times the distance to the real kth nearest neighbor. p : float, 1<=p<=infinity Which Minkowski p-norm to use. 1 is the sum-of-absolute-values "Manhattan" distance 2 is the usual Euclidean distance infinity is the maximum-coordinate-difference distance distance_upper_bound : nonnegative float Return only neighbors within this distance. This is used to prune tree searches, so if you are doing a series of nearest-neighbor queries, it may help to supply the distance to the nearest neighbor of the most recent point. Returns ------- d : float or array of floats The distances to the nearest neighbors. If x has shape tuple+(self.m,), then d has shape tuple if k is one, or tuple+(k,) if k is larger than one. Missing neighbors (e.g. when k > n or distance_upper_bound is given) are indicated with infinite distances. If k is None, then d is an object array of shape tuple, containing lists of distances. In either case the hits are sorted by distance (nearest first). i : integer or array of integers The locations of the neighbors in self.data. i is the same shape as d.
11,407
def compute_depth(self): left_depth = self.left_node.compute_depth() if self.left_node else 0 right_depth = self.right_node.compute_depth() if self.right_node else 0 return 1 + max(left_depth, right_depth)
Recursively computes true depth of the subtree. Should only be needed for debugging. Unless something is wrong, the depth field should reflect the correct depth of the subtree.
11,408
def __geomToPointList(self, geom): if arcpyFound and isinstance(geom, arcpy.Polyline): feature_geom = [] fPart = [] wkt = None wkid = None for part in geom: fPart = [] for pnt in part: if geom.spatialReference is None: if self._wkid is None and self._wkt is not None: wkt = self._wkt else: wkid = self._wkid else: wkid = geom.spatialReference.factoryCode fPart.append(Point(coord=[pnt.X, pnt.Y], wkid=wkid, wkt=wkt, z=pnt.Z, m=pnt.M)) feature_geom.append(fPart) return feature_geom
converts a geometry object to a common.Geometry object
11,409
def load_step_specifications(self, file_name, short=False, dataset_number=None): dataset_number = self._validate_dataset_number(dataset_number) if dataset_number is None: self._report_empty_dataset() return step_specs = pd.read_csv(file_name, sep=prms.Reader.sep) if "step" not in step_specs.columns: self.logger.info("step col is missing") raise IOError if "type" not in step_specs.columns: self.logger.info("type col is missing") raise IOError if not short and "cycle" not in step_specs.columns: self.logger.info("cycle col is missing") raise IOError self.make_step_table(custom_step_definition=True, step_specifications=step_specs, short=short)
Load a table that contains step-type definitions. This function loads a file containing a specification for each step or for each (cycle_number, step_number) combinations if short==False. The step_cycle specifications that are allowed are stored in the variable cellreader.list_of_step_types.
11,410
def configure(self, cnf={}, **kw): kw.update(cnf) reinit = False if in kw: if kw[] == : self._style_name = self._style_name.replace(, ) if not in kw: self._tickpos = else: self._style_name = self._style_name.replace(, ) if not in kw: self._tickpos = self.scale.configure(style=self._style_name) reinit = True if in kw: self._showvalue = bool(kw.pop()) reinit = True if in kw: self._tickinterval = kw.pop() reinit = True if in kw: tickpos = kw.pop() orient = kw.get(, str(self.cget())) if orient == and tickpos not in [, ]: raise ValueError("For a vertical TickScale, must be or .") elif orient == and tickpos not in [, ]: raise ValueError("For a horizontal TickScale, must be or .") elif orient in [, ]: self._tickpos = tickpos reinit = True if in kw: labelpos = kw.pop() if labelpos not in [, , , ]: raise ValueError(" must be , , or .") else: self._labelpos = labelpos reinit = True if in kw: try: self._resolution = float(kw.pop()) if self._resolution < 0: raise ValueError(" must be non negative.") except ValueError: raise TypeError(" must be a float.") if self._tickinterval != 0 and self._resolution > self._tickinterval: self._tickinterval = self._resolution reinit = True if in kw: digits = kw.pop() if not isinstance(digits, int): raise TypeError(" must be an integer.") elif digits < 0: self._digits = digits self._formatter = reinit = True else: self._digits = digits self._formatter = + str(self._digits) + interv = self._get_precision(self._tickinterval) resol = self._get_precision(self._resolution) start = kw.get(, kw.get(, self._start)) end = kw.get(, self.scale.cget()) from_ = self._get_precision(start) to = self._get_precision(end) d = max(interv, resol, from_, to) if self._digits < d: self._resolution = float(.format(self._digits)) self._tickinterval = round(self._tickinterval, self._digits) if self._resolution > self._tickinterval: self._tickinterval = self._resolution kw[] = round(end, self._digits) if in kw: del kw[] kw[] = round(start, self._digits) reinit = True elif self._digits > 0: start = kw.get(, kw.get(, self._start)) end = kw.get(, self.scale.cget()) from_ = self._get_precision(start) to = self._get_precision(end) interv = self._get_precision(self._tickinterval) resol = self._get_precision(self._resolution) digits = max(self._digits, interv, resol, from_, to) if digits != self._digits: self._digits = digits self._formatter = + str(self._digits) + reinit = True if in kw: self._var = kw[] if not self._var: self._var = tk.DoubleVar(self, self.get()) kw[] = self._var try: self._var.trace_add(, self._increment) except AttributeError: self._var.trace(, self._increment) self.scale.configure(**kw) if in kw or in kw or in kw: self._extent = self.scale.cget() - self.scale.cget() self._start = self.scale.cget() reinit = True if in kw: self._style_name = kw[] if not self._style_name: self._style_name = % (str(self.scale.cget()).capitalize()) if reinit: self._init() if in kw: self._apply_style()
Configure resources of the widget. To get the list of options for this widget, call the method :meth:`~TickScale.keys`. See :meth:`~TickScale.__init__` for a description of the widget specific option.
11,411
def last_valid_index(self): def last_valid_index_builder(df): df.index = pandas.RangeIndex(len(df.index)) return df.apply(lambda df: df.last_valid_index()) func = self._build_mapreduce_func(last_valid_index_builder) first_result = self._full_axis_reduce(0, func).max(axis=1).to_pandas().squeeze() return self.index[first_result]
Returns index of last non-NaN/NULL value. Return: Scalar of index name.
11,412
def open(self, verbose): if verbose: print( % self.port_id) try: self.arduino.close() time.sleep(1) self.arduino.open() time.sleep(1) return self.arduino except Exception: raise
open the serial port using the configuration data returns a reference to this instance
11,413
def get_organisations(self, service_desk_id=None, start=0, limit=50): url_without_sd_id = url_with_sd_id = .format(service_desk_id) params = {} if start is not None: params[] = int(start) if limit is not None: params[] = int(limit) if service_desk_id is None: return self.get(url_without_sd_id, headers=self.experimental_headers, params=params) else: return self.get(url_with_sd_id, headers=self.experimental_headers, params=params)
Returns a list of organizations in the Jira instance. If the user is not an agent, the resource returns a list of organizations the user is a member of. :param service_desk_id: OPTIONAL: str Get organizations from single Service Desk :param start: OPTIONAL: int The starting index of the returned objects. Base index: 0. See the Pagination section for more details. :param limit: OPTIONAL: int The maximum number of users to return per page. Default: 50. See the Pagination section for more details. :return:
11,414
def _build_likelihood(self): fmean, fvar = self._build_predict(self.X, full_cov=False) return tf.reduce_sum(self.likelihood.variational_expectations(fmean, fvar, self.Y))
This function computes the optimal density for v, q*(v), up to a constant
11,415
def from_desmond(cls, path, **kwargs): dms = DesmondDMSFile(path) pos = kwargs.pop(, dms.getPositions()) return cls(master=dms, topology=dms.getTopology(), positions=pos, path=path, **kwargs)
Loads a topology from a Desmond DMS file located at `path`. Arguments --------- path : str Path to a Desmond DMS file
11,416
def from_packages(cls, parse_context, rev=, packages=None, **kwargs): for pkg in packages or (,): cls.from_package(parse_context, pkg=pkg, rev=rev, **kwargs)
:param list packages: The package import paths within the remote library; by default just the root package will be available (equivalent to passing `packages=['']`). :param string rev: Identifies which version of the remote library to download. This could be a commit SHA (git), node id (hg), etc. If left unspecified the version will default to the latest available. It's highly recommended to not accept the default and instead pin the rev explicitly for repeatable builds.
11,417
def _should_really_index(self, instance): if self._should_index_is_method: is_method = inspect.ismethod(self.should_index) try: count_args = len(inspect.signature(self.should_index).parameters) except AttributeError: count_args = len(inspect.getargspec(self.should_index).args) if is_method or count_args is 1: return self.should_index(instance) else: return self.should_index() else: attr_type = type(self.should_index) if attr_type is DeferredAttribute: attr_value = self.should_index.__get__(instance, None) elif attr_type is str: attr_value = getattr(instance, self.should_index) elif attr_type is property: attr_value = self.should_index.__get__(instance) else: raise AlgoliaIndexError(.format( self.should_index)) if type(attr_value) is not bool: raise AlgoliaIndexError("%s's should_index (%s) should be a boolean" % ( instance.__class__.__name__, self.should_index)) return attr_value
Return True if according to should_index the object should be indexed.
11,418
def get_metrics(self, from_time=None, to_time=None, metrics=None, ifs=[], storageIds=[], view=None): params = { } if ifs: params[] = ifs elif ifs is None: params[] = if storageIds: params[] = storageIds elif storageIds is None: params[] = return self._get_resource_root().get_metrics(self._path() + , from_time, to_time, metrics, view, params)
This endpoint is not supported as of v6. Use the timeseries API instead. To get all metrics for a host with the timeseries API use the query: 'select * where hostId = $HOST_ID'. To get specific metrics for a host use a comma-separated list of the metric names as follows: 'select $METRIC_NAME1, $METRIC_NAME2 where hostId = $HOST_ID'. For more information see http://tiny.cloudera.com/tsquery_doc @param from_time: A datetime; start of the period to query (optional). @param to_time: A datetime; end of the period to query (default = now). @param metrics: List of metrics to query (default = all). @param ifs: network interfaces to query. Default all, use None to disable. @param storageIds: storage IDs to query. Default all, use None to disable. @param view: View to materialize ('full' or 'summary') @return: List of metrics and their readings.
11,419
def interleaved_filename(file_path): if not isinstance(file_path, tuple): raise OneCodexException("Cannot get the interleaved filename without a tuple.") if re.match(".*[._][Rr][12][_.].*", file_path[0]): return re.sub("[._][Rr][12]", "", file_path[0]) else: warnings.warn("Paired-end filenames do not match--are you sure they are correct?") return file_path[0]
Return filename used to represent a set of paired-end files. Assumes Illumina-style naming conventions where each file has _R1_ or _R2_ in its name.
11,420
def _which(executable, flags=os.X_OK, abspath_only=False, disallow_symlinks=False): def _can_allow(p): if not os.access(p, flags): return False if abspath_only and not os.path.abspath(p): log.warn(, p) return False if disallow_symlinks and os.path.islink(p): log.warn(, p) return False return True result = [] exts = filter(None, os.environ.get(, ).split(os.pathsep)) path = os.environ.get(, None) if path is None: return [] for p in os.environ.get(, ).split(os.pathsep): p = os.path.join(p, executable) if _can_allow(p): result.append(p) for e in exts: pext = p + e if _can_allow(pext): result.append(pext) return result
Borrowed from Twisted's :mod:twisted.python.proutils . Search PATH for executable files with the given name. On newer versions of MS-Windows, the PATHEXT environment variable will be set to the list of file extensions for files considered executable. This will normally include things like ".EXE". This fuction will also find files with the given name ending with any of these extensions. On MS-Windows the only flag that has any meaning is os.F_OK. Any other flags will be ignored. Note: This function does not help us prevent an attacker who can already manipulate the environment's PATH settings from placing malicious code higher in the PATH. It also does happily follows links. :param str name: The name for which to search. :param int flags: Arguments to L{os.access}. :rtype: list :returns: A list of the full paths to files found, in the order in which they were found.
11,421
def start_commit(self, repo_name, branch=None, parent=None, description=None): req = proto.StartCommitRequest(parent=proto.Commit(repo=proto.Repo(name=repo_name), id=parent), branch=branch, description=description) res = self.stub.StartCommit(req, metadata=self.metadata) return res
Begins the process of committing data to a Repo. Once started you can write to the Commit with PutFile and when all the data has been written you must finish the Commit with FinishCommit. NOTE, data is not persisted until FinishCommit is called. A Commit object is returned. Params: * repo_name: The name of the repo. * branch: A more convenient way to build linear chains of commits. When a commit is started with a non-empty branch the value of branch becomes an alias for the created Commit. This enables a more intuitive access pattern. When the commit is started on a branch the previous head of the branch is used as the parent of the commit. * parent: Specifies the parent Commit, upon creation the new Commit will appear identical to the parent Commit, data can safely be added to the new commit without affecting the contents of the parent Commit. You may pass "" as parentCommit in which case the new Commit will have no parent and will initially appear empty. * description: (optional) explanation of the commit for clarity.
11,422
def get_windzone(conn, geometry): if geometry.geom_type in [, ]: coords = geometry.centroid else: coords = geometry sql = .format(wkt=coords.wkt) zone = conn.execute(sql).fetchone() if zone is not None: zone = zone[0] else: zone = 0 return zone
Find windzone from map.
11,423
def res_block(nf, dense:bool=False, norm_type:Optional[NormType]=NormType.Batch, bottle:bool=False, **conv_kwargs): "Resnet block of `nf` features. `conv_kwargs` are passed to `conv_layer`." norm2 = norm_type if not dense and (norm_type==NormType.Batch): norm2 = NormType.BatchZero nf_inner = nf//2 if bottle else nf return SequentialEx(conv_layer(nf, nf_inner, norm_type=norm_type, **conv_kwargs), conv_layer(nf_inner, nf, norm_type=norm2, **conv_kwargs), MergeLayer(dense))
Resnet block of `nf` features. `conv_kwargs` are passed to `conv_layer`.
11,424
def get_tour_list(self): resp = json.loads(urlopen(self.tour_list_url.format(1)).read().decode()) total_count = resp[][][] resp = json.loads(urlopen(self.tour_list_url.format(total_count)).read().decode()) data = resp[][][][] keychain = { : (, None), : (, None), : (, None), : (, None), : (, None), : (, None), : (, None), : (, None), : (, None), : (, None), : (, None), : (, 0), : (, None), : (, None), } for tour in data: _dict_key_changer(tour, keychain) tour[] = str(tour.pop())[:8] if in tour else None tour[] = str(tour.pop())[:8] if in tour else None tour.pop(, None) tour.pop(, None) tour.pop(, None) return data
Inquire all tour list :rtype: list
11,425
def fill_tree(self, tree, input_dict): def add_element(item, key, value): child_name = QtGui.QStandardItem(key) child_name.setDragEnabled(False) child_name.setSelectable(False) child_name.setEditable(False) if isinstance(value, dict): for ket_child, value_child in value.items(): add_element(child_name, ket_child, value_child) child_value = QtGui.QStandardItem() else: child_value = QtGui.QStandardItem(str(value)) child_value.setData(value) child_value.setDragEnabled(False) child_value.setSelectable(False) child_value.setEditable(False) item.appendRow([child_name, child_value]) for index, (loaded_item, loaded_item_settings) in enumerate(input_dict.items()): item = QtGui.QStandardItem(loaded_item) for key, value in loaded_item_settings[].items(): add_element(item, key, value) value = QtGui.QStandardItem() tree.model().appendRow([item, value]) if tree == self.tree_loaded: item.setEditable(False) tree.setFirstColumnSpanned(index, self.tree_infile.rootIndex(), True)
fills a tree with nested parameters Args: tree: QtGui.QTreeView parameters: dictionary or Parameter object Returns:
11,426
def from_string(cls, cl_function, dependencies=()): return_type, function_name, parameter_list, body = split_cl_function(cl_function) return SimpleCLFunction(return_type, function_name, parameter_list, body, dependencies=dependencies)
Parse the given CL function into a SimpleCLFunction object. Args: cl_function (str): the function we wish to turn into an object dependencies (list or tuple of CLLibrary): The list of CL libraries this function depends on Returns: SimpleCLFunction: the CL data type for this parameter declaration
11,427
def setStimRisefall(self): rf = self.ui.risefallSpnbx.value() self.tone.setRisefall(rf)
Sets the Risefall of the StimulusModel's tone from values pulled from this widget
11,428
def log_level(self, subsystem, level, **kwargs): r args = (subsystem, level) return self._client.request(, args, decoder=, **kwargs)
r"""Changes the logging output of a running daemon. .. code-block:: python >>> c.log_level("path", "info") {'Message': "Changed log level of 'path' to 'info'\n"} Parameters ---------- subsystem : str The subsystem logging identifier (Use ``"all"`` for all subsystems) level : str The desired logging level. Must be one of: * ``"debug"`` * ``"info"`` * ``"warning"`` * ``"error"`` * ``"fatal"`` * ``"panic"`` Returns ------- dict : Status message
11,429
def process_view(self, request, view_func, view_args, view_kwargs): profiler = getattr(request, , None) if profiler: try: return profiler.runcall( view_func, request, *view_args, **view_kwargs ) finally: request.GET = original_get
Run the profiler on _view_func_.
11,430
def _wrapped_method_with_watch_fn(self, f, *args, **kwargs): bound_args = signature(f).bind(*args, **kwargs) orig_watch = bound_args.arguments.get("watch") if orig_watch is not None: wrapped_watch = partial(self._call_in_reactor_thread, orig_watch) wrapped_watch = wraps(orig_watch)(wrapped_watch) bound_args.arguments["watch"] = wrapped_watch return f(**bound_args.arguments)
A wrapped method with a watch function. When this method is called, it will call the underlying method with the same arguments, *except* that if the ``watch`` argument isn't :data:`None`, it will be replaced with a wrapper around that watch function, so that the watch function will be called in the reactor thread. This means that the watch function can safely use Twisted APIs.
11,431
def get_updates(self, offset=None, limit=None, timeout=20, allowed_updates=None): json_updates = apihelper.get_updates(self.token, offset, limit, timeout, allowed_updates) ret = [] for ju in json_updates: ret.append(types.Update.de_json(ju)) return ret
Use this method to receive incoming updates using long polling (wiki). An Array of Update objects is returned. :param allowed_updates: Array of string. List the types of updates you want your bot to receive. :param offset: Integer. Identifier of the first update to be returned. :param limit: Integer. Limits the number of updates to be retrieved. :param timeout: Integer. Timeout in seconds for long polling. :return: array of Updates
11,432
def maybe_start_recording(tokens, index): if tokens[index].type == TokenType.BeginRSTComment: return _RSTCommentBlockRecorder(index, tokens[index].line) return None
Return a new _RSTCommentBlockRecorder when its time to record.
11,433
def _set_lim_and_transforms(self): LambertAxes._set_lim_and_transforms(self) yaxis_stretch = Affine2D().scale(4 * self.horizon, 1.0) yaxis_stretch = yaxis_stretch.translate(-self.horizon, 0.0) yaxis_space = Affine2D().scale(1.0, 1.1) self._yaxis_transform = \ yaxis_stretch + \ self.transData yaxis_text_base = \ yaxis_stretch + \ self.transProjection + \ (yaxis_space + \ self.transAffine + \ self.transAxes) self._yaxis_text1_transform = \ yaxis_text_base + \ Affine2D().translate(-8.0, 0.0) self._yaxis_text2_transform = \ yaxis_text_base + \ Affine2D().translate(8.0, 0.0)
Setup the key transforms for the axes.
11,434
def main(args=None): if args is None: args = sys.argv[1:] parser = create_parser() args = parser.parse_args(args) if args.verbose >= 2: level = logging.DEBUG elif args.verbose >= 1: level = logging.INFO else: level = logging.WARNING logging.basicConfig(level=level) try: args.command(args) except pylink.JLinkException as e: sys.stderr.write( % (str(e), os.linesep)) return 1 return 0
Main command-line interface entrypoint. Runs the given subcommand or argument that were specified. If not given a ``args`` parameter, assumes the arguments are passed on the command-line. Args: args (list): list of command-line arguments Returns: Zero on success, non-zero otherwise.
11,435
def disable_beacons(self): self.opts[][] = False evt = salt.utils.event.get_event(, opts=self.opts) evt.fire_event({: True, : self.opts[]}, tag=) return True
Enable beacons
11,436
def check(text): err = "uncomparables.misc" msg = "Comparison of an uncomparable: is not comparable." comparators = [ "most", "more", "less", "least", "very", "quite", "largely", "extremely", "increasingly", "kind of", "mildly" ] uncomparables = [ "absolute", "adequate", "chief", "complete", "correct", "devoid", "entire", "false", "fatal", "favorite", "final", "ideal", "impossible", "inevitable", "infinite", "irrevocable", "main", "manifest", "only", "paramount", "perfect", "perpetual", "possible", "preferable", "principal", "singular", "stationary", "sufficient", "true", "unanimous", "unavoidable", "unbroken", "uniform", "unique", "universal", "void", "whole", ] exceptions = [ ("more", "perfect"), ("more", "possible") ] all = ["\\b" + i[0] + "\s" + i[1] + "[\W$]" for i in itertools.product( comparators, uncomparables) if i not in exceptions] occ = re.finditer("|".join(all), text.lower()) return [(o.start(), o.end(), err, msg.format(o.group(0)), None) for o in occ]
Check the text.
11,437
def _get_command(classes): commands = {} setup_file = os.path.join( os.path.abspath(os.path.join(os.path.dirname(__file__), )), ) for line in open(setup_file, ): for cl in classes: if cl in line: commands[cl] = line.split()[0].strip().replace(, ) return commands
Associates each command class with command depending on setup.cfg
11,438
def groups_remove_owner(self, room_id, user_id, **kwargs): return self.__call_api_post(, roomId=room_id, userId=user_id, kwargs=kwargs)
Removes the role of owner from a user in the current Group.
11,439
def _create_input_transactions(self, addy): self._transactions.append(ProposedTransaction( address=addy, tag=self.tag, value=-addy.balance, )) for _ in range(addy.security_level - 1): self._transactions.append(ProposedTransaction( address=addy, tag=self.tag, value=0, ))
Creates transactions for the specified input address.
11,440
def update_one(self, filter_, document, **kwargs): self._valide_update_document(document) return self.__collect.update_one(filter_, document, **kwargs)
update method
11,441
def get_pret_embs(self, word_dims=None): assert (self._pret_embeddings is not None), "No pretrained file provided." pret_embeddings = gluonnlp.embedding.create(self._pret_embeddings[0], source=self._pret_embeddings[1]) embs = [None] * len(self._id2word) for idx, vec in enumerate(pret_embeddings.idx_to_vec): embs[idx] = vec.asnumpy() if word_dims is None: word_dims = len(pret_embeddings.idx_to_vec[0]) for idx, emb in enumerate(embs): if emb is None: embs[idx] = np.zeros(word_dims) pret_embs = np.array(embs, dtype=np.float32) return pret_embs / np.std(pret_embs)
Read pre-trained embedding file Parameters ---------- word_dims : int or None vector size. Use `None` for auto-infer Returns ------- numpy.ndarray T x C numpy NDArray
11,442
def select_limit(self, table, cols=, offset=0, limit=MAX_ROWS_PER_QUERY): return self.fetch(self._select_limit_statement(table, cols, offset, limit))
Run a select query with an offset and limit parameter.
11,443
def calc_uniform_lim_glorot(inmaps, outmaps, kernel=(1, 1)): r d = np.sqrt(6. / (np.prod(kernel) * inmaps + outmaps)) return -d, d
r"""Calculates the lower bound and the upper bound of the uniform distribution proposed by Glorot et al. .. math:: b &= \sqrt{\frac{6}{NK + M}}\\ a &= -b Args: inmaps (int): Map size of an input Variable, :math:`N`. outmaps (int): Map size of an output Variable, :math:`M`. kernel (:obj:`tuple` of :obj:`int`): Convolution kernel spatial shape. In above definition, :math:`K` is the product of shape dimensions. In Affine, the default value should be used. Example: .. code-block:: python import nnabla as nn import nnabla.parametric_functions as PF import nnabla.initializer as I x = nn.Variable([60,1,28,28]) lb,ub= I.calc_uniform_lim_glorot(x.shape[1],64) w = I.UniformInitializer((lb,ub)) b = I.ConstantInitializer(0) h = PF.convolution(x, 64, [3, 3], w_init=w, b_init=b, pad=[1, 1], name='conv') References: * `Glorot and Bengio. Understanding the difficulty of training deep feedforward neural networks <http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf>`_
11,444
def get_types(self): content_types = set() for name in self.translators: content_types |= type_names[name] return content_types
Retrieve a set of all recognized content types for this translator object.
11,445
def coerce_value(type, value): if isinstance(type, GraphQLNonNull): return coerce_value(type.of_type, value) if value is None: return None if isinstance(type, GraphQLList): item_type = type.of_type if not isinstance(value, string_types) and isinstance(value, Iterable): return [coerce_value(item_type, item) for item in value] else: return [coerce_value(item_type, value)] if isinstance(type, GraphQLInputObjectType): fields = type.fields obj = {} for field_name, field in fields.items(): if field_name not in value: if field.default_value is not None: field_value = field.default_value obj[field.out_name or field_name] = field_value else: field_value = coerce_value(field.type, value.get(field_name)) obj[field.out_name or field_name] = field_value return type.create_container(obj) assert isinstance(type, (GraphQLScalarType, GraphQLEnumType)), "Must be input type" return type.parse_value(value)
Given a type and any value, return a runtime value coerced to match the type.
11,446
def _converged(self): prior = self.global_prior_[0:self.prior_size] posterior = self.global_posterior_[0:self.prior_size] diff = prior - posterior max_diff = np.max(np.fabs(diff)) if self.verbose: _, mse = self._mse_converged() diff_ratio = np.sum(diff ** 2) / np.sum(posterior ** 2) logger.info( % ((max_diff, mse, diff_ratio))) if max_diff > self.threshold: return False, max_diff else: return True, max_diff
Check convergence based on maximum absolute difference Returns ------- converged : boolean Whether the parameter estimation converged. max_diff : float Maximum absolute difference between prior and posterior.
11,447
def getKwCtrlConf(self, kw, fmt=): try: confd = self.ctrlconf_dict[kw] if fmt == : retval = confd else: retval = json.dumps(confd) except KeyError: self.getKw(kw) if self.confstr_epics != : if fmt == : retval = ast.literal_eval(self.confstr_epics) elif fmt == : retval = json.dumps(ast.literal_eval(self.confstr_epics)) else: retval = self.confstr_epics else: retval = None return retval
return keyword's control configuration, followed after '!epics' notation :param kw: keyword name :param fmt: return format, 'raw', 'dict', 'json', default is 'dict'
11,448
def set_delegate(address=None, pubkey=None, secret=None): c.DELEGATE[] = address c.DELEGATE[] = pubkey c.DELEGATE[] = secret
Set delegate parameters. Call set_delegate with no arguments to clear.
11,449
def save(self, data, xparent=None): if xparent is not None: elem = ElementTree.SubElement(xparent, ) else: elem = ElementTree.Element() for key, value in sorted(data.items()): xitem = ElementTree.SubElement(elem, ) xitem.set(, nstr(key)) XmlDataIO.toXml(value, xitem) return elem
Parses the element from XML to Python. :param data | <variant> xparent | <xml.etree.ElementTree.Element> || None :return <xml.etree.ElementTree.Element>
11,450
def add_data(self, address, data): while len(data): region = self._map.get_region_for_address(address) if region is None: raise ValueError("no memory region defined for address 0x%08x" % address) if not region.is_flash: raise ValueError("memory region at address 0x%08x is not flash" % address) if region in self._builders: builder = self._builders[region] else: if region.flash is None: raise RuntimeError("flash memory region at address 0x%08x has no flash instance" % address) builder = region.flash.get_flash_builder() builder.log_performance = False self._builders[region] = builder programLength = min(len(data), region.end - address + 1) assert programLength != 0 builder.add_data(address, data[:programLength]) data = data[programLength:] address += programLength self._total_data_size += programLength return self
! @brief Add a chunk of data to be programmed. The data may cross flash memory region boundaries, as long as the regions are contiguous. @param self @param address Integer address for where the first byte of _data_ should be written. @param data A list of byte values to be programmed at the given address. @return The FlashLoader instance is returned, to allow chaining further add_data() calls or a call to commit(). @exception ValueError Raised when the address is not within a flash memory region. @exception RuntimeError Raised if the flash memory region does not have a valid Flash instance associated with it, which indicates that the target connect sequence did not run successfully.
11,451
def add_graph(self, graph): event = event_pb2.Event(graph_def=graph.SerializeToString()) self._add_event(event, None)
Adds a `Graph` protocol buffer to the event file.
11,452
def tryload(self, cfgstr=None, on_error=): cfgstr = self._rectify_cfgstr(cfgstr) if self.enabled: try: if self.verbose > 1: self.log(.format(self.fname)) return self.load(cfgstr) except IOError: if self.verbose > 0: self.log(.format(self.fname)) except Exception: if self.verbose > 0: self.log() if on_error == : raise elif on_error == : self.clear(cfgstr) return None else: raise KeyError(.format(on_error)) else: if self.verbose > 1: self.log(.format(self.fname)) return None
Like load, but returns None if the load fails due to a cache miss. Args: on_error (str): How to handle non-io errors errors. Either raise, which re-raises the exception, or clear which deletes the cache and returns None.
11,453
def delete_namespaced_endpoints(self, name, namespace, **kwargs): kwargs[] = True if kwargs.get(): return self.delete_namespaced_endpoints_with_http_info(name, namespace, **kwargs) else: (data) = self.delete_namespaced_endpoints_with_http_info(name, namespace, **kwargs) return data
delete_namespaced_endpoints # noqa: E501 delete Endpoints # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_endpoints(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Endpoints (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :param V1DeleteOptions body: :return: V1Status If the method is called asynchronously, returns the request thread.
11,454
def add_line(self, line, source, *lineno): self.result.append(line, source, *lineno)
Add a line to the result
11,455
def skipgram_fasttext_batch(centers, contexts, num_tokens, subword_lookup, dtype, index_dtype): contexts = mx.nd.array(contexts[2], dtype=index_dtype) data, row, col = subword_lookup(centers) centers = mx.nd.array(centers, dtype=index_dtype) centers_csr = mx.nd.sparse.csr_matrix( (data, (row, col)), dtype=dtype, shape=(len(centers), num_tokens)) return centers_csr, contexts, centers
Create a batch for SG training objective with subwords.
11,456
def Nu_vertical_cylinder_Eigenson_Morgan(Pr, Gr, turbulent=None): r Ra = Pr*Gr if turbulent or (Ra > 1.69E10 and turbulent is None): return 0.148*Ra**(1/3.) - 127.6 elif 1E9 < Ra < 1.69E10 and turbulent is not False: return 51.5 + 0.0000726*Ra**0.63 else: return 0.48*Ra**0.25
r'''Calculates Nusselt number for natural convection around a vertical isothermal cylinder according to the results of [1]_ correlated by [2]_, presented in [3]_ and in more detail in [4]_. .. math:: Nu_H = 0.48 Ra_H^{0.25},\; 10^{9} < Ra Nu_H = 51.5 + 0.0000726 Ra_H^{0.63},\; 10^{9} < Ra < 1.69 \times 10^{10} Nu_H = 0.148 Ra_H^{1/3} - 127.6 ,\; 1.69 \times 10^{10} < Ra Parameters ---------- Pr : float Prandtl number [-] Gr : float Grashof number [-] turbulent : bool or None, optional Whether or not to force the correlation to return the turbulent result; will return the laminar regime if False; leave as None for automatic selection Returns ------- Nu : float Nusselt number, [-] Notes ----- Author presents results as appropriate for both flat plates and cylinders. Height of 2.5 m with diameters of 2.4, 7.55, 15, 35, and 50 mm. Another experiment of diameter 58 mm and length of 6.5 m was considered. Cylinder of diameters 0.475 cm to 7.62 cm, L/D from 8 to 127.Transition between ranges is not smooth. If outside of range, no warning is given. Formulas are presented similarly in [3]_ and [4]_, but only [4]_ shows the transition formula. Examples -------- >>> Nu_vertical_cylinder_Eigenson_Morgan(0.7, 2E10) 230.55946525499715 References ---------- .. [1] Eigenson L (1940). Les lois gouvernant la transmission de la chaleur aux gaz biatomiques par les parois des cylindres verticaux dans le cas de convection naturelle. Dokl Akad Nauk SSSR 26:440-444 .. [2] Morgan, V.T., The Overall Convective Heat Transfer from Smooth Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and J.P. Hartnett, V 11, 199-264, 1975. .. [3] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6 (June 1, 2008): 521-36. doi:10.1080/01457630801891557. .. [4] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From Vertical Cylinders." In Natural Convection from Circular Cylinders, 23-42. Springer, 2014.
11,457
def from_table(table, engine, limit=None): sql = select([table]) if limit is not None: sql = sql.limit(limit) result_proxy = engine.execute(sql) return from_db_cursor(result_proxy.cursor)
Select data in a database table and put into prettytable. Create a :class:`prettytable.PrettyTable` from :class:`sqlalchemy.Table`. **中文文档** 将数据表中的数据放入prettytable中.
11,458
def _get_converter(self, convert_to=None): t call it. We do this so that in the case of convert, we do the conversion and return a string. In the case of save, we save the recipe to file for the user. Parameters ========== convert_to: a string either docker or singularity, if a different Returns ======= converter: the function to do the conversion ' conversion = self._get_conversion_type(convert_to) if conversion == "singularity": return self.docker2singularity return self.singularity2docker
see convert and save. This is a helper function that returns the proper conversion function, but doesn't call it. We do this so that in the case of convert, we do the conversion and return a string. In the case of save, we save the recipe to file for the user. Parameters ========== convert_to: a string either docker or singularity, if a different Returns ======= converter: the function to do the conversion
11,459
def get_object(self): obj = super(DeleteView, self).get_object() if not obj: raise http.Http404 return obj
Get the object for previewing. Raises a http404 error if the object is not found.
11,460
def partition(self): step = int(math.ceil(self.num_tasks / float(self.partitions))) if self.indices == None: slice_ind = list(range(0, self.num_tasks, step)) for start in slice_ind: yield self.__class__(self.partitions, list(range(start, start + step))) else: slice_ind = list(range(0, len(self.indices), step)) for start in slice_ind: if start + step <= len(self.indices): yield self.__class__(self.partitions, self.indices[start: start + step]) else: yield self.__class__(self.partitions, self.indices[start:])
Partitions all tasks into groups of tasks. A group is represented by a task_store object that indexes a sub- set of tasks.
11,461
async def _connect_polling(self, url, headers, engineio_path): if aiohttp is None: self.logger.error( ) return self.base_url = self._get_engineio_url(url, engineio_path, ) self.logger.info( + self.base_url) r = await self._send_request( , self.base_url + self._get_url_timestamp(), headers=headers) if r is None: self._reset() raise exceptions.ConnectionError( ) if r.status != 200: raise exceptions.ConnectionError( .format( r.status)) try: p = payload.Payload(encoded_payload=await r.read()) except ValueError: six.raise_from(exceptions.ConnectionError( ), None) open_packet = p.packets[0] if open_packet.packet_type != packet.OPEN: raise exceptions.ConnectionError( ) self.logger.info( + str(open_packet.data)) self.sid = open_packet.data[] self.upgrades = open_packet.data[] self.ping_interval = open_packet.data[] / 1000.0 self.ping_timeout = open_packet.data[] / 1000.0 self.current_transport = self.base_url += + self.sid self.state = client.connected_clients.append(self) await self._trigger_event(, run_async=False) for pkt in p.packets[1:]: await self._receive_packet(pkt) if in self.upgrades and in self.transports: if await self._connect_websocket(url, headers, engineio_path): return self.ping_loop_task = self.start_background_task(self._ping_loop) self.write_loop_task = self.start_background_task(self._write_loop) self.read_loop_task = self.start_background_task( self._read_loop_polling)
Establish a long-polling connection to the Engine.IO server.
11,462
async def handle_client_ping(self, client_addr, _: Ping): await ZMQUtils.send_with_addr(self._client_socket, client_addr, Pong())
Handle an Ping message. Pong the client
11,463
def get_help_datapacks(module_name, server_prefix): _dir = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) module_dir = "{}/../{}".format(_dir, module_name, "_help.json") if os.path.isdir(module_dir): module_help_path = "{}/{}".format(module_dir, "_help.json") if os.path.isfile(module_help_path): return helptools.get_help_datapacks(module_help_path, server_prefix) else: return [("Help", "{} does not have a help.json file".format(module_name), False)] else: return [("Help", "No module found called {}".format(module_name), False)]
Get the help datapacks for a module Args: module_name (str): The module to get help data for server_prefix (str): The command prefix for this server Returns: datapacks (list): The help datapacks for the module
11,464
def walk(self, dispatcher, node): deferrable_handlers = { Declare: self.declare, Resolve: self.register_reference, } layout_handlers = { PushScope: self.push_scope, PopScope: self.pop_scope, PushCatch: self.push_catch, PopCatch: self.pop_scope, } if not self.shadow_funcname: layout_handlers[ResolveFuncName] = self.shadow_reference local_dispatcher = Dispatcher( definitions=dict(dispatcher), token_handler=None, layout_handlers=layout_handlers, deferrable_handlers=deferrable_handlers, ) return list(walk(local_dispatcher, node))
Walk through the node with a custom dispatcher for extraction of details that are required.
11,465
def getLipdNames(D=None): _names = [] try: if not D: print("Error: LiPD data not provided. Pass LiPD data into the function.") else: _names = D.keys() except Exception: pass return _names
Get a list of all LiPD names in the library | Example | names = lipd.getLipdNames(D) :return list f_list: File list
11,466
def _get_distance_scaling(self, C, mag, rhypo): return (C["a3"] * np.log(rhypo)) + (C["a4"] + C["a5"] * mag) * rhypo
Returns the distance scalig term
11,467
def from_rgb(r, g=None, b=None): c = r if isinstance(r, list) else [r, g, b] best = {} for index, item in enumerate(colors): d = __distance(item, c) if(not best or d <= best[]): best = {: d, : index} if in best: return best[] else: return 1
Return the nearest xterm 256 color code from rgb input.
11,468
def draw_address(canvas): business_details = ( u, u, u, U, U, U, u, u, u, u, u, u ) canvas.setFont(, 9) textobject = canvas.beginText(13 * cm, -2.5 * cm) for line in business_details: textobject.textLine(line) canvas.drawText(textobject)
Draws the business address
11,469
def list_objects(self, query=None, limit=-1, offset=-1): result = [] doc = { : True} if not query is None: for key in query: doc[key] = query[key] coll = self.collection.find(doc).sort([(, pymongo.DESCENDING)]) count = 0 for document in coll: if limit >= 0 and len(result) == limit: break if offset < 0 or count >= offset: result.append(self.from_dict(document)) count += 1 return ObjectListing(result, offset, limit, coll.count())
List of all objects in the database. Optinal parameter limit and offset for pagination. A dictionary of key,value-pairs can be given as addictional query condition for document properties. Parameters ---------- query : Dictionary Filter objects by property-value pairs defined by dictionary. limit : int Limit number of items in the result set offset : int Set offset in list (order as defined by object store) Returns ------- ObjectListing
11,470
def get_info(pyfile): info = {} info_re = re.compile(r"^__(\w+)__ = [\"]") with open(pyfile, ) as f: for line in f.readlines(): match = info_re.search(line) if match: info[match.group(1)] = match.group(2) return info
Retrieve dunder values from a pyfile
11,471
def render_html(root, options=0, extensions=None): if extensions is None: extensions = _cmark.ffi.NULL raw_result = _cmark.lib.cmark_render_html( root, options, extensions) return _cmark.ffi.string(raw_result).decode()
Render a given syntax tree as HTML. Args: root (Any): The reference to the root node of the syntax tree. options (int): The cmark options. extensions (Any): The reference to the syntax extensions, generally from :func:`parser_get_syntax_extensions` Returns: str: The rendered HTML.
11,472
def view(self, rec): kwd = { : , } self.render(, postinfo=rec, kwd=kwd, author=rec.user_name, format_date=tools.format_date, userinfo=self.userinfo, cfg=CMS_CFG)
View the page.
11,473
def set_env_info(self, env_state=None, env_id=None, episode_id=None, bump_past=None, fps=None): with self.cv: if env_id is None: env_id = self._env_id if env_state is None: env_state = self._env_state if fps is None: fps = self._fps self.cv.notifyAll() old_episode_id = self._episode_id if self.primary: current_id = parse_episode_id(self._episode_id) if bump_past is not None: bump_past_id = parse_episode_id(bump_past) current_id = max(bump_past_id+1, current_id+1) elif env_state == : current_id += 1 self._episode_id = generate_episode_id(current_id) assert self._fps or fps elif episode_id is False: pass else: assert episode_id is not None, "No episode_id provided. This likely indicates a misbehaving server, which did not send an episode_id" self._episode_id = episode_id self._fps = fps logger.info(, self.label, self._env_state, self._env_id, env_state, env_id, old_episode_id, self._episode_id, self._fps) self._env_state = env_state if env_id is not None: self._env_id = env_id return self.env_info()
Atomically set the environment state tracking variables.
11,474
def __build_config_block(self, config_block_node): node_lists = [] for line_node in config_block_node: if isinstance(line_node, pegnode.ConfigLine): node_lists.append(self.__build_config(line_node)) elif isinstance(line_node, pegnode.OptionLine): node_lists.append(self.__build_option(line_node)) elif isinstance(line_node, pegnode.ServerLine): node_lists.append( self.__build_server(line_node)) elif isinstance(line_node, pegnode.BindLine): node_lists.append( self.__build_bind(line_node)) elif isinstance(line_node, pegnode.AclLine): node_lists.append( self.__build_acl(line_node)) elif isinstance(line_node, pegnode.BackendLine): node_lists.append( self.__build_usebackend(line_node)) elif isinstance(line_node, pegnode.UserLine): node_lists.append( self.__build_user(line_node)) elif isinstance(line_node, pegnode.GroupLine): node_lists.append( self.__build_group(line_node)) else: pass return node_lists
parse `config_block` in each section Args: config_block_node (TreeNode): Description Returns: [line_node1, line_node2, ...]
11,475
def save(self, items): rows = [] indx = self.indx size = 0 tick = s_common.now() for item in items: byts = s_msgpack.en(item) size += len(byts) lkey = s_common.int64en(indx) indx += 1 rows.append((lkey, byts)) self.slab.putmulti(rows, append=True, db=self.db) took = s_common.now() - tick origindx = self.indx self.indx = indx return {: indx, : size, : len(items), : tick, : took} return origindx
Save a series of items to a sequence. Args: items (tuple): The series of items to save into the sequence. Returns: The index of the first item
11,476
def get_mesh_dict(self): if self._mesh is None: msg = ("run_mesh has to be done.") raise RuntimeError(msg) retdict = {: self._mesh.qpoints, : self._mesh.weights, : self._mesh.frequencies, : self._mesh.eigenvectors, : self._mesh.group_velocities} return retdict
Returns calculated mesh sampling phonons Returns ------- dict keys: qpoints, weights, frequencies, eigenvectors, and group_velocities Each value for the corresponding key is explained as below. qpoints: ndarray q-points in reduced coordinates of reciprocal lattice dtype='double' shape=(ir-grid points, 3) weights: ndarray Geometric q-point weights. Its sum is the number of grid points. dtype='intc' shape=(ir-grid points,) frequencies: ndarray Phonon frequencies at ir-grid points. Imaginary frequenies are represented by negative real numbers. dtype='double' shape=(ir-grid points, bands) eigenvectors: ndarray Phonon eigenvectors at ir-grid points. See the data structure at np.linalg.eigh. dtype='complex' shape=(ir-grid points, bands, bands) group_velocities: ndarray Phonon group velocities at ir-grid points. dtype='double' shape=(ir-grid points, bands, 3)
11,477
def logsumexp(arr, axis=0): if axis == 0: pass elif axis == 1: arr = arr.T else: raise NotImplementedError vmax = arr.max(axis=0) out = da.log(da.sum(da.exp(arr - vmax), axis=0)) out += vmax return out
Computes the sum of arr assuming arr is in the log domain. Returns log(sum(exp(arr))) while minimizing the possibility of over/underflow. Examples -------- >>> import numpy as np >>> from sklearn.utils.extmath import logsumexp >>> a = np.arange(10) >>> np.log(np.sum(np.exp(a))) 9.4586297444267107 >>> logsumexp(a) 9.4586297444267107
11,478
def execute(self, input_data): raw_bytes = input_data[][] self.meta[] = hashlib.md5(raw_bytes).hexdigest() self.meta[] = input_data[][] self.meta[] = input_data[][] with magic.Magic() as mag: self.meta[] = mag.id_buffer(raw_bytes[:1024]) with magic.Magic(flags=magic.MAGIC_MIME_TYPE) as mag: self.meta[] = mag.id_buffer(raw_bytes[:1024]) with magic.Magic(flags=magic.MAGIC_MIME_ENCODING) as mag: try: self.meta[] = mag.id_buffer(raw_bytes[:1024]) except magic.MagicError: self.meta[] = self.meta[] = len(raw_bytes) self.meta[] = input_data[][] self.meta[] = input_data[][] self.meta[] = input_data[][] self.meta[] = input_data[][] return self.meta
This worker computes meta data for any file type.
11,479
def start_session_if_none(self): if not (self._screen_id and self._session): self.update_screen_id() self._session = YouTubeSession(screen_id=self._screen_id)
Starts a session it is not yet initialized.
11,480
def s3_write(self, log, remote_log_location, append=True): if append and self.s3_log_exists(remote_log_location): old_log = self.s3_read(remote_log_location) log = .join([old_log, log]) if old_log else log try: self.hook.load_string( log, key=remote_log_location, replace=True, encrypt=configuration.conf.getboolean(, ), ) except Exception: self.log.exception(, remote_log_location)
Writes the log to the remote_log_location. Fails silently if no hook was created. :param log: the log to write to the remote_log_location :type log: str :param remote_log_location: the log's location in remote storage :type remote_log_location: str (path) :param append: if False, any existing log file is overwritten. If True, the new log is appended to any existing logs. :type append: bool
11,481
def recommend_delete(self, num_iid, session): request = TOPRequest() request[] = num_iid self.create(self.execute(request, session)[]) return self
taobao.item.recommend.delete 取消橱窗推荐一个商品 取消当前用户指定商品的橱窗推荐状态 这个Item所属卖家从传入的session中获取,需要session绑定
11,482
def config(name=, default=): conf = {} s = env(name, default) if s: conf = parse_email_url(s) return conf
Returns a dictionary with EMAIL_* settings from EMAIL_URL.
11,483
def from_char( cls, char, name=None, width=None, fill_char=None, bounce=False, reverse=False, back_char=None, wrapper=None): return cls( cls._generate_move( char, width=width or cls.default_width, fill_char=str(fill_char or cls.default_fill_char), bounce=bounce, reverse=reverse, back_char=back_char, ), name=name, wrapper=wrapper or cls.default_wrapper, )
Create progress bar frames from a "moving" character. The frames simulate movement of the character, from left to right through empty space (`fill_char`). Arguments: char : Character to move across the bar. name : Name for the new BarSet. width : Width of the progress bar. Default: 25 fill_char : Character to fill empty space. Default: ' ' (space) bounce : Whether the frames should simulate a bounce from one side to another. Default: False reverse : Whether the character should start on the right. Default: False back_char : Character to use when "bouncing" backward. Default: `char`
11,484
def _wkt(eivals, timescales, normalization, normalized_laplacian): nv = eivals.shape[0] wkt = np.zeros(timescales.shape) for idx, t in enumerate(timescales): wkt[idx] = np.sum(np.exp(-1j * t * eivals)) if isinstance(normalization, np.ndarray): return hkt / normalization if normalization == or normalization == True: return wkt / nv if normalization == : if normalized_laplacian: return wkt / (1 + (nv - 1) * np.cos(timescales)) else: return wkt / (1 + (nv - 1) * np.cos(nv * timescales)) return wkt
Computes wave kernel trace from given eigenvalues, timescales, and normalization. For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18. Parameters ---------- eivals : numpy.ndarray Eigenvalue vector timescales : numpy.ndarray Vector of discrete timesteps for the kernel computation normalization : str or numpy.ndarray Either 'empty', 'complete' or None. If None or any ther value, return unnormalized wave kernel trace. For the details how 'empty' and 'complete' are computed, please refer to the paper. If np.ndarray, they are treated as exact normalization constants normalized_laplacian: bool Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization. Returns ------- numpy.ndarray Wave kernel trace signature
11,485
def normalize_locale(loc): comps = split_locale(loc) comps[] = comps[].upper() comps[] = comps[].lower().replace(, ) comps[] = return join_locale(comps)
Format a locale specifier according to the format returned by `locale -a`.
11,486
def _GetStatus(self): if self._parser_mediator: number_of_produced_events = ( self._parser_mediator.number_of_produced_events) number_of_produced_sources = ( self._parser_mediator.number_of_produced_event_sources) number_of_produced_warnings = ( self._parser_mediator.number_of_produced_warnings) else: number_of_produced_events = None number_of_produced_sources = None number_of_produced_warnings = None if self._extraction_worker and self._parser_mediator: last_activity_timestamp = max( self._extraction_worker.last_activity_timestamp, self._parser_mediator.last_activity_timestamp) processing_status = self._extraction_worker.processing_status else: last_activity_timestamp = 0.0 processing_status = self._status task_identifier = getattr(self._task, , ) if self._process_information: used_memory = self._process_information.GetUsedMemory() or 0 else: used_memory = 0 if self._memory_profiler: self._memory_profiler.Sample(, used_memory) used_memory = .format(used_memory) status = { : self._current_display_name, : self._name, : last_activity_timestamp, : None, : self._number_of_consumed_events, : self._number_of_consumed_sources, : None, : None, : number_of_produced_events, : number_of_produced_sources, : number_of_produced_warnings, : processing_status, : task_identifier, : used_memory} return status
Retrieves status information. Returns: dict[str, object]: status attributes, indexed by name.
11,487
def _spectrogram(y=None, S=None, n_fft=2048, hop_length=512, power=1, win_length=None, window=, center=True, pad_mode=): if S is not None: n_fft = 2 * (S.shape[0] - 1) else: S = np.abs(stft(y, n_fft=n_fft, hop_length=hop_length, win_length=win_length, center=center, window=window, pad_mode=pad_mode))**power return S, n_fft
Helper function to retrieve a magnitude spectrogram. This is primarily used in feature extraction functions that can operate on either audio time-series or spectrogram input. Parameters ---------- y : None or np.ndarray [ndim=1] If provided, an audio time series S : None or np.ndarray Spectrogram input, optional n_fft : int > 0 STFT window size hop_length : int > 0 STFT hop length power : float > 0 Exponent for the magnitude spectrogram, e.g., 1 for energy, 2 for power, etc. win_length : int <= n_fft [scalar] Each frame of audio is windowed by `window()`. The window will be of length `win_length` and then padded with zeros to match `n_fft`. If unspecified, defaults to ``win_length = n_fft``. window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)] - a window specification (string, tuple, or number); see `scipy.signal.get_window` - a window function, such as `scipy.signal.hanning` - a vector or array of length `n_fft` .. see also:: `filters.get_window` center : boolean - If `True`, the signal `y` is padded so that frame `t` is centered at `y[t * hop_length]`. - If `False`, then frame `t` begins at `y[t * hop_length]` pad_mode : string If `center=True`, the padding mode to use at the edges of the signal. By default, STFT uses reflection padding. Returns ------- S_out : np.ndarray [dtype=np.float32] - If `S` is provided as input, then `S_out == S` - Else, `S_out = |stft(y, ...)|**power` n_fft : int > 0 - If `S` is provided, then `n_fft` is inferred from `S` - Else, copied from input
11,488
def checkKey(self, credentials): filename = self._keyfile if not os.path.exists(filename): return 0 lines = open(filename).xreadlines() for l in lines: l2 = l.split() if len(l2) < 2: continue try: if base64.decodestring(l2[1]) == credentials.blob: return 1 except binascii.Error: continue return 0
Retrieve the keys of the user specified by the credentials, and check if one matches the blob in the credentials.
11,489
def perp(weights): r w = _np.asarray(weights) / _np.sum(weights) w = _np.ma.MaskedArray(w, copy=False, mask=(w == 0)) entr = - _np.sum( w * _np.log(w.filled(1.0))) return _np.exp(entr) / len(w)
r"""Calculate the normalized perplexity :math:`\mathcal{P}` of samples with ``weights`` :math:`\omega_i`. :math:`\mathcal{P}=0` is terrible and :math:`\mathcal{P}=1` is perfect. .. math:: \mathcal{P} = exp(H) / N where .. math:: H = - \sum_{i=1}^N \bar{\omega}_i log ~ \bar{\omega}_i .. math:: \bar{\omega}_i = \frac{\omega_i}{\sum_i \omega_i} :param weights: Vector-like array; the samples' weights
11,490
def print_splits(cliques, next_cliques): splits = 0 for i, clique in enumerate(cliques): parent, _ = clique if parent in next_cliques: if len(next_cliques[parent]) > 1: print_split(i + splits, len(cliques) + splits) splits += 1
Print shifts for new forks.
11,491
def read_ipx(self, length): if length is None: length = len(self) _csum = self._read_fileng(2) _tlen = self._read_unpack(2) _ctrl = self._read_unpack(1) _type = self._read_unpack(1) _dsta = self._read_ipx_address() _srca = self._read_ipx_address() ipx = dict( chksum=_csum, len=_tlen, count=_ctrl, type=TYPE.get(_type), dst=_dsta, src=_srca, ) proto = ipx[] length = ipx[] - 30 ipx[] = self._read_packet(header=30, payload=length) return self._decode_next_layer(ipx, proto, length)
Read Internetwork Packet Exchange. Structure of IPX header [RFC 1132]: Octets Bits Name Description 0 0 ipx.cksum Checksum 2 16 ipx.len Packet Length (header includes) 4 32 ipx.count Transport Control (hop count) 5 40 ipx.type Packet Type 6 48 ipx.dst Destination Address 18 144 ipx.src Source Address
11,492
def maintenance_center(self, storage_disk_xml=None): disk_in_maintenance = 0 for filer_disk in storage_disk_xml: disk_status = filer_disk.find() if disk_status.text == : disk_in_maintenance += 1 self.push(, , disk_in_maintenance)
Collector for how many disk(s) are in NetApp maintenance center For more information on maintenance center please see: bit.ly/19G4ptr
11,493
def build_template( initial_template=None, image_list=None, iterations = 3, gradient_step = 0.2, **kwargs ): wt = 1.0 / len( image_list ) if initial_template is None: initial_template = image_list[ 0 ] * 0 for i in range( len( image_list ) ): initial_template = initial_template + image_list[ i ] * wt xavg = initial_template.clone() for i in range( iterations ): for k in range( len( image_list ) ): w1 = registration( xavg, image_list[k], type_of_transform=, **kwargs ) if k == 0: wavg = iio.image_read( w1[][0] ) * wt xavgNew = w1[] * wt else: wavg = wavg + iio.image_read( w1[][0] ) * wt xavgNew = xavgNew + w1[] * wt print( wavg.abs().mean() ) wscl = (-1.0) * gradient_step wavg = wavg * wscl wavgfn = mktemp(suffix=) iio.image_write(wavg, wavgfn) xavg = apply_transforms( xavg, xavg, wavgfn ) return xavg
Estimate an optimal template from an input image_list ANTsR function: N/A Arguments --------- initial_template : ANTsImage initialization for the template building image_list : ANTsImages images from which to estimate template iterations : integer number of template building iterations gradient_step : scalar for shape update gradient kwargs : keyword args extra arguments passed to ants registration Returns ------- ANTsImage Example ------- >>> import ants >>> image = ants.image_read( ants.get_ants_data('r16') , 'float') >>> image2 = ants.image_read( ants.get_ants_data('r27') , 'float') >>> image3 = ants.image_read( ants.get_ants_data('r85') , 'float') >>> timage = ants.build_template( image_list = ( image, image2, image3 ) )
11,494
def create_server_app(provider, password=None, cache=True, cache_timeout=3600, debug=False): app = Flask(__name__, static_folder=None) app.debug = debug if cache: if type(cache) == bool: cache = SimpleCache() else: pass else: cache = False def daap_wsgi_app(func): @wraps(func) def _inner(environment, start_response): if environment["PATH_INFO"].startswith("daap://") or \ environment["PATH_INFO"].startswith("http://"): environment["PATH_INFO"] = "/" + \ environment["PATH_INFO"].split("/", 3)[3] return func(environment, start_response) return _inner app.wsgi_app = daap_wsgi_app(app.wsgi_app) def daap_trace(func): if not debug: return func @wraps(func) def _inner(*args, **kwargs): try: start = time.time() result = func(*args, **kwargs) logger.debug( "Request handling took %.6f seconds", time.time() - start) return result except: logger.exception( "Caught exception before raising it to Flask.") raise return _inner def daap_unpack_args(func): args, _, _, _ = inspect.getargspec(func) mappings = [mapping for mapping in QS_MAPPING if mapping[1] in args] @wraps(func) def _inner(*args, **kwargs): for key, kwarg, casting in mappings: kwargs[kwarg] = casting(request.args[key]) return func(*args, **kwargs) return _inner def daap_authenticate(func): if not password: return func @wraps(func) def _inner(*args, **kwargs): auth = request.authorization if not auth or not auth.password == password: return Response(None, 401, { "WWW-Authenticate": "Basic realm=\"%s\"" % provider.server.name}) return func(*args, **kwargs) return _inner app.authenticate = daap_authenticate def daap_cache_response(func): if not cache: return func @wraps(func) def _inner(*args, **kwargs): key = hashlib.md5() key.update(func.__name__) key.update(request.path) for k, v in request.args.iteritems(): if k not in QS_IGNORE_CACHE: key.update(v) key = key.digest() value = cache.get(key) if value is None: value = func(*args, **kwargs) cache.set(key, value, timeout=cache_timeout) elif debug: logger.debug("Loaded response from cache.") return value return _inner @app.after_request def after_request(response): response.headers["DAAP-Server"] = provider.server.name response.headers["Content-Language"] = "en_us" response.headers["Accept-Ranges"] = "bytes" return response @app.route("/server-info", methods=["GET"]) @daap_trace @daap_cache_response def server_info(): data = responses.server_info(provider, provider.server.name, password) return ObjectResponse(data) @app.route("/content-codes", methods=["GET"]) @daap_trace @daap_cache_response def content_codes(): data = responses.content_codes(provider) return ObjectResponse(data) @app.route("/login", methods=["GET"]) @daap_trace @daap_authenticate def login(): session_id = provider.create_session( user_agent=request.headers.get("User-Agent"), remote_address=request.remote_addr, client_version=request.headers.get( "Client-DAAP-Version")) data = responses.login(provider, session_id) return ObjectResponse(data) @app.route("/logout", methods=["GET"]) @daap_trace @daap_authenticate @daap_unpack_args def logout(session_id): provider.destroy_session(session_id) return Response(None, status=204) @app.route("/activity", methods=["GET"]) @daap_trace @daap_authenticate @daap_unpack_args def activity(session_id): return Response(None, status=200) @app.route("/update", methods=["GET"]) @daap_trace @daap_authenticate @daap_unpack_args def update(session_id, revision, delta): revision = provider.get_next_revision(session_id, revision, delta) data = responses.update(provider, revision) return ObjectResponse(data) @app.route("/fp-setup", methods=["POST"]) @daap_trace @daap_authenticate def fp_setup(): raise NotImplementedError("Fairplay not supported.") @app.route("/databases", methods=["GET"]) @daap_trace @daap_authenticate @daap_cache_response @daap_unpack_args def databases(session_id, revision, delta): new, old = provider.get_databases(session_id, revision, delta) added, removed, is_update = utils.diff(new, old) data = responses.databases( provider, new, old, added, removed, is_update) return ObjectResponse(data) @app.route( "/databases/<int:database_id>/items/<int:item_id>/extra_data/artwork", methods=["GET"]) @daap_trace @daap_unpack_args def database_item_artwork(database_id, item_id, session_id): data, mimetype, total_length = provider.get_artwork( session_id, database_id, item_id) response = Response( data, 200, mimetype=mimetype, direct_passthrough=not isinstance(data, basestring)) if total_length: response.headers["Content-Length"] = total_length return response @app.route( "/databases/<int:database_id>/groups/<int:group_id>/extra_data/" "artwork", methods=["GET"]) @daap_trace @daap_unpack_args def database_group_artwork(database_id, group_id, session_id, revision, delta): raise NotImplemented("Groups not supported.") @app.route( "/databases/<int:database_id>/items/<int:item_id>.<suffix>", methods=["GET"]) @daap_trace @daap_unpack_args def database_item(database_id, item_id, suffix, session_id): range_header = request.headers.get("Range", None) if range_header: begin, end = http.parse_range_header(range_header).ranges[0] data, mimetype, total_length = provider.get_item( session_id, database_id, item_id, byte_range=(begin, end)) begin, end = (begin or 0), (end or total_length) response = Response( data, 206, mimetype=mimetype, direct_passthrough=not isinstance(data, basestring)) if total_length <= 0: response.headers["Content-Range"] = "bytes %d-%d/*" % ( begin, end - 1) elif total_length > 0: response.headers["Content-Range"] = "bytes %d-%d/%d" % ( begin, end - 1, total_length) response.headers["Content-Length"] = end - begin else: data, mimetype, total_length = provider.get_item( session_id, database_id, item_id) response = Response( data, 200, mimetype=mimetype, direct_passthrough=not isinstance(data, basestring)) if total_length > 0: response.headers["Content-Length"] = total_length return response @app.route("/databases/<int:database_id>/items", methods=["GET"]) @daap_trace @daap_authenticate @daap_cache_response @daap_unpack_args def database_items(database_id, session_id, revision, delta, type): new, old = provider.get_items(session_id, database_id, revision, delta) added, removed, is_update = utils.diff(new, old) data = responses.items(provider, new, old, added, removed, is_update) return ObjectResponse(data) @app.route("/databases/<int:database_id>/containers", methods=["GET"]) @daap_trace @daap_authenticate @daap_cache_response @daap_unpack_args def database_containers(database_id, session_id, revision, delta): new, old = provider.get_containers( session_id, database_id, revision, delta) added, removed, is_update = utils.diff(new, old) data = responses.containers( provider, new, old, added, removed, is_update) return ObjectResponse(data) @app.route("/databases/<int:database_id>/groups", methods=["GET"]) @daap_trace @daap_authenticate @daap_cache_response @daap_unpack_args def database_groups(database_id, session_id, revision, delta, type): raise NotImplementedError("Groups not supported.") @app.route( "/databases/<int:database_id>/containers/<int:container_id>/items", methods=["GET"]) @daap_trace @daap_authenticate @daap_cache_response @daap_unpack_args def database_container_item(database_id, container_id, session_id, revision, delta): new, old = provider.get_container_items( session_id, database_id, container_id, revision, delta) added, removed, is_update = utils.diff(new, old) data = responses.container_items( provider, new, old, added, removed, is_update) return ObjectResponse(data) return app
Create a DAAP server, based around a Flask application. The server requires a content provider, server name and optionally, a password. The content provider should return raw object data. Object responses can be cached. This may dramatically speed up connections for multiple clients. However, this is only limited to objects, not file servings. Note: in case the server is mounted as a WSGI app, make sure the server passes the authorization header.
11,495
def drive_rotational_speed_rpm(self): drv_rot_speed_rpm = set() for member in self._drives_list(): if member.rotation_speed_rpm is not None: drv_rot_speed_rpm.add(member.rotation_speed_rpm) return drv_rot_speed_rpm
Gets set of rotational speed of the disks
11,496
def start(self): self._lc = LoopingCall(self._download) self._lc.start(30, now=True)
Start the background process.
11,497
def sanity(request, sysmeta_pyxb): _does_not_contain_replica_sections(sysmeta_pyxb) _is_not_archived(sysmeta_pyxb) _obsoleted_by_not_specified(sysmeta_pyxb) if in request.META: return _has_correct_file_size(request, sysmeta_pyxb) _is_supported_checksum_algorithm(sysmeta_pyxb) _is_correct_checksum(request, sysmeta_pyxb)
Check that sysmeta_pyxb is suitable for creating a new object and matches the uploaded sciobj bytes.
11,498
def _specialKeyEvent(key, upDown): assert upDown in (, ), "upDown argument must be or " key_code = special_key_translate_table[key] ev = AppKit.NSEvent.otherEventWithType_location_modifierFlags_timestamp_windowNumber_context_subtype_data1_data2_( Quartz.NSSystemDefined, (0,0), 0xa00 if upDown == else 0xb00, 0, 0, 0, 8, (key_code << 16) | ((0xa if upDown == else 0xb) << 8), -1 ) Quartz.CGEventPost(0, ev.CGEvent())
Helper method for special keys. Source: http://stackoverflow.com/questions/11045814/emulate-media-key-press-on-mac
11,499
def run(self): from pyrocore import config try: config.engine.open() items = [] self.run_filter(items) except (error.LoggableError, xmlrpc.ERRORS) as exc: self.LOG.warn(str(exc))
Filter job callback.