Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
13,500
def zeq_magic(meas_file=, spec_file=,crd=,input_dir_path=, angle=0, n_plots=5, save_plots=True, fmt="svg", interactive=False, specimen="", samp_file=, contribution=None,fignum=1): def plot_interpretations(ZED, spec_container, this_specimen, this_specimen_measurements, datablock): if cb.is_null(spec_container) or cb.is_null(this_specimen_measurements) or cb.is_null(datablock): return ZED if not in spec_container.df.columns: return ZED prior_spec_data = spec_container.get_records_for_code( , strict_match=False) prior_specimen_interpretations=[] if not len(prior_spec_data): return ZED mpars = {"specimen_direction_type": "Error"} if len(prior_spec_data): prior_specimen_interpretations = prior_spec_data[prior_spec_data[].astype(str) == this_specimen] if len(prior_specimen_interpretations): if len(prior_specimen_interpretations)>0: beg_pcas = pd.to_numeric( prior_specimen_interpretations.meas_step_min.values).tolist() end_pcas = pd.to_numeric( prior_specimen_interpretations.meas_step_max.values).tolist() spec_methods = prior_specimen_interpretations.method_codes.tolist() for ind in range(len(beg_pcas)): spec_meths = spec_methods[ind].split() for m in spec_meths: if in m: calculation_type = if in m: calculation_type = if in m: calculation_type = if in m: calculation_type = treatments = pd.to_numeric(this_specimen_measurements.treatment).tolist() if len(beg_pcas)!=0: try: start, end = treatments.index(beg_pcas[ind]), treatments.index(end_pcas[ind]) mpars = pmag.domean( datablock, start, end, calculation_type) except ValueError as ex: mpars[] = "Error" try: if beg_pcas[ind] == 0: start = 0 else: start = treatments.index(beg_pcas[ind]) if end_pcas[ind] == 0: end = 0 else: end = treatments.index(end_pcas[ind]) mpars = pmag.domean( datablock, start, end, calculation_type) except ValueError: mpars[] = "Error" if mpars["specimen_direction_type"] != "Error": pmagplotlib.plot_dir(ZED, mpars, datablock, angle) else: print(.format(this_specimen)) print(prior_spec_data.loc[this_specimen][[, ]]) print() cols = list(set([, ]).intersection(this_specimen_measurements.columns)) print(this_specimen_measurements[cols]) print() return ZED def make_plots(spec, cnt, meas_df, spec_container, samp_container=None): if spec_container: try: samps = spec_container.df.loc[spec, ] except KeyError: samps = "" samp_df = [] if isinstance(samps, int) or isinstance(samps, float) or isinstance(samps, np.int64): if np.isnan(samps): samp = "" samp_df = [] else: samp = str(samps) samp_container.df.index = samp_container.df.index.astype(str) samp_df = samp_container.df[samp_container.df.index == samp] elif isinstance(samps, type(None)): samp = "" samp_df = [] elif len(samps): if isinstance(samps, str): samp = samps else: samp = samps.iloc[0] samp_df = samp_container.df[samp_container.df.index == samp] else: samp_df = [] ZED = {: cnt, : cnt+1, : cnt+2} spec_df = meas_df[meas_df.specimen == s] spec_df = spec_df[- spec_df.method_codes.str.contains( )] spec_df_nrm = spec_df[spec_df.method_codes.str.contains( )] spec_df_th = spec_df[spec_df.method_codes.str.contains( )] try: cond = spec_df.method_codes.str.contains() spec_df_th = spec_df_th[-cond] except ValueError: keep_inds = [] n = 0 for ind, row in spec_df_th.copy().iterrows(): if in row[] and not in row[]: keep_inds.append(n) else: pass n += 1 if len(keep_inds) < n: spec_df_th = spec_df_th.iloc[keep_inds] spec_df_af = spec_df[spec_df.method_codes.str.contains()] this_spec_meas_df = None datablock = None if (not len(spec_df_th.index) > 1) and (not len(spec_df_af.index) > 1): return if len(spec_df_th.index) > 1: this_spec_meas_df = pd.concat([spec_df_nrm, spec_df_th]) n_rows = len(this_spec_meas_df) this_spec_meas_df = this_spec_meas_df.dropna(how=, subset=[, , ]) if n_rows > len(this_spec_meas_df): print(.format(s, n_rows - len(this_spec_meas_df))) if coord != "-1" and len(samp_df): this_spec_meas_df = transform_to_geographic(this_spec_meas_df, samp_df, samp, coord) units = try: this_spec_meas_df[] = this_spec_meas_df[].astype(float) this_spec_meas_df[] = this_spec_meas_df[].astype(float) except: print(.format(spec)) return datablock = this_spec_meas_df[[, , , , , ]].values.tolist() ZED = pmagplotlib.plot_zed(ZED, datablock, angle, s, units) if len(spec_df_af.index) > 1: this_spec_meas_df = pd.concat([spec_df_nrm, spec_df_af]) n_rows = len(this_spec_meas_df) this_spec_meas_df = this_spec_meas_df.dropna(how=, subset=[, , ]) if n_rows > len(this_spec_meas_df): print(.format(s, n_rows - len(this_spec_meas_df))) if coord != "-1" and len(samp_df): this_spec_meas_df = transform_to_geographic(this_spec_meas_df, samp_df, samp, coord) units = try: this_spec_meas_df[] = this_spec_meas_df[].astype(float) this_spec_meas_df[] = this_spec_meas_df[].astype(float) except: print(.format(spec)) return datablock = this_spec_meas_df[[, , , , , ]].values.tolist() ZED = pmagplotlib.plot_zed(ZED, datablock, angle, s, units) return plot_interpretations(ZED, spec_container, s, this_spec_meas_df, datablock) if interactive: save_plots = False if not isinstance(contribution, cb.Contribution): input_dir_path = os.path.realpath(input_dir_path) file_path = pmag.resolve_file_name(meas_file, input_dir_path) if not os.path.exists(file_path): print(, file_path) return False, [] custom_filenames = {: file_path, : spec_file, : samp_file} contribution = cb.Contribution(input_dir_path, custom_filenames=custom_filenames, read_tables=[, , , ]) if pmagplotlib.isServer: try: contribution.propagate_location_to_samples() contribution.propagate_location_to_specimens() contribution.propagate_location_to_measurements() except KeyError as ex: pass meas_container = contribution.tables[] meas_df = contribution.tables[].df spec_container = contribution.tables.get(, None) samp_container = contribution.tables.get(, None) meas_df[] = "" if in meas_df.columns: if in meas_df.columns: meas_df[] = meas_df[].where( cond=meas_df[].astype(bool), other=meas_df[]) else: meas_df[] = meas_df[] else: meas_df[] = meas_df[] if crd == "s": coord = "-1" elif crd == "t": coord = "100" else: coord = "0" specimens = meas_df.specimen.unique() if len(specimens) == 0: print() return False, [] saved.extend(pmagplotlib.save_plots(ZED, titles)) else: continue else: cnt += 3 return True, saved
zeq_magic makes zijderveld and equal area plots for magic formatted measurements files. Parameters ---------- meas_file : str input measurement file spec_file : str input specimen interpretation file samp_file : str input sample orientations file crd : str coordinate system [s,g,t] for specimen, geographic, tilt corrected g,t options require a sample file with specimen and bedding orientation input_dir_path : str input directory of meas_file, default "." angle : float angle of X direction with respect to specimen X n_plots : int, default 5 maximum number of plots to make if you want to make all possible plots, specify "all" save_plots : bool, default True if True, create and save all requested plots fmt : str, default "svg" format for figures, [svg, jpg, pdf, png] interactive : bool, default False interactively plot and display for each specimen (this is best used on the command line only) specimen : str, default "" specimen name to plot samp_file : str, default 'samples.txt' name of samples file contribution : cb.Contribution, default None if provided, use Contribution object instead of reading in data from files fignum : matplotlib figure number
13,501
def before_insert(mapper, conn, target): target._set_ids() if target.name and target.vname and target.cache_key and target.fqname and not target.dataset: return Partition.before_update(mapper, conn, target)
event.listen method for Sqlalchemy to set the sequence for this object and create an ObjectNumber value for the id_
13,502
def _get_stddev_deep_soil(self, mag, imt): if mag > 7: mag = 7 C = self.COEFFS_SOIL[imt] return C[] + C[] * mag
Calculate and return total standard deviation for deep soil sites. Implements formulae from the last column of table 4.
13,503
def get_subgraph(self, name): match = list() if name in self.obj_dict[]: sgraphs_obj_dict = self.obj_dict[].get( name ) for obj_dict_list in sgraphs_obj_dict: match.append( Subgraph( obj_dict = obj_dict_list ) ) return match
Retrieved a subgraph from the graph. Given a subgraph's name the corresponding Subgraph instance will be returned. If one or more subgraphs exist with the same name, a list of Subgraph instances is returned. An empty list is returned otherwise.
13,504
def txn(self, overwrite=False, lock=True): if lock: self._lock.acquire() try: new_state, existing_generation = self.state_and_generation new_state = copy.deepcopy(new_state) yield new_state if overwrite: existing_generation = None self.set_state(new_state, existing_generation=existing_generation) finally: if lock: self._lock.release()
Context manager for a state modification transaction.
13,505
def from_env(cls, reactor=None, env=os.environ): address = env.get(, ) token = env.get(, ) ca_cert = env.get() tls_server_name = env.get() client_cert = env.get() client_key = env.get() cf = ClientPolicyForHTTPS.from_pem_files( caKey=ca_cert, privateKey=client_key, certKey=client_cert, tls_server_name=tls_server_name ) client, reactor = default_client(reactor, contextFactory=cf) return cls(address, token, client=client, reactor=reactor)
Create a Vault client with configuration from the environment. Supports a limited number of the available config options: https://www.vaultproject.io/docs/commands/index.html#environment-variables https://github.com/hashicorp/vault/blob/v0.11.3/api/client.go#L28-L40 Supported: - ``VAULT_ADDR`` - ``VAULT_CACERT`` - ``VAULT_CLIENT_CERT`` - ``VAULT_CLIENT_KEY`` - ``VAULT_TLS_SERVER_NAME`` - ``VAULT_TOKEN`` Not currently supported: - ``VAULT_CAPATH`` - ``VAULT_CLIENT_TIMEOUT`` - ``VAULT_MAX_RETRIES`` - ``VAULT_MFA`` - ``VAULT_RATE_LIMIT`` - ``VAULT_SKIP_VERIFY`` - ``VAULT_WRAP_TTL``
13,506
def io_surface(timestep, time, fid, fld): fid.write("{} {}".format(timestep, time)) fid.writelines(["%10.2e" % item for item in fld[:]]) fid.writelines(["\n"])
Output for surface files
13,507
def info(self): ddoc_info = self.r_session.get( .join([self.document_url, ])) ddoc_info.raise_for_status() return response_to_json_dict(ddoc_info)
Retrieves the design document view information data, returns dictionary GET databasename/_design/{ddoc}/_info
13,508
def save(self, acl=None, client=None): if acl is None: acl = self save_to_backend = acl.loaded else: save_to_backend = True if save_to_backend: self._save(acl, None, client)
Save this ACL for the current bucket. If :attr:`user_project` is set, bills the API request to that project. :type acl: :class:`google.cloud.storage.acl.ACL`, or a compatible list. :param acl: The ACL object to save. If left blank, this will save current entries. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the ACL's parent.
13,509
def write_input_files(pst): par = pst.parameter_data par.loc[:,"parval1_trans"] = (par.parval1 * par.scale) + par.offset for tpl_file,in_file in zip(pst.template_files,pst.input_files): write_to_template(pst.parameter_data.parval1_trans,tpl_file,in_file)
write parameter values to a model input files using a template files with current parameter values (stored in Pst.parameter_data.parval1). This is a simple implementation of what PEST does. It does not handle all the special cases, just a basic function...user beware Parameters ---------- pst : (pyemu.Pst) a Pst instance
13,510
def _make_package(binder): package_id = binder.id if package_id is None: package_id = hash(binder) package_name = "{}.opf".format(package_id) extensions = get_model_extensions(binder) template_env = jinja2.Environment(trim_blocks=True, lstrip_blocks=True) items = [] navigation_document = bytes(HTMLFormatter(binder, extensions)) navigation_document_name = "{}{}".format( package_id, mimetypes.guess_extension(, strict=False)) item = Item(str(navigation_document_name), io.BytesIO(navigation_document), , is_navigation=True, properties=[]) items.append(item) resources = {} for model in flatten_model(binder): for resource in getattr(model, , []): resources[resource.id] = resource with resource.open() as data: item = Item(resource.id, data, resource.media_type) items.append(item) if isinstance(model, (Binder, TranslucentBinder,)): continue if isinstance(model, DocumentPointer): content = bytes(HTMLFormatter(model)) item = Item(.join([model.ident_hash, extensions[model.id]]), io.BytesIO(content), model.media_type) items.append(item) continue for reference in model.references: if reference.remote_type == INLINE_REFERENCE_TYPE: resource = _make_resource_from_inline(reference) model.resources.append(resource) resources[resource.id] = resource with resource.open() as data: item = Item(resource.id, data, resource.media_type) items.append(item) reference.bind(resource, ) elif reference.remote_type == INTERNAL_REFERENCE_TYPE: filename = os.path.basename(reference.uri) resource = resources.get(filename) if resource: reference.bind(resource, ) complete_content = bytes(HTMLFormatter(model)) item = Item(.join([model.ident_hash, extensions[model.id]]), io.BytesIO(complete_content), model.media_type) items.append(item) package = Package(package_name, items, binder.metadata) return package
Makes an ``.epub.Package`` from a Binder'ish instance.
13,511
def check_password_readable(self, section, fields): if not fields: return if len(self.read_ok) != 1: return fn = self.read_ok[0] if fileutil.is_accessable_by_others(fn): log.warn(LOG_CHECK, "The configuration file %s contains password information (in section [%s] and options %s) and the file is readable by others. Please make the file only readable by you.", fn, section, fields) if os.name == : log.warn(LOG_CHECK, _("For example execute .") % fn) elif os.name == : log.warn(LOG_CHECK, _("See http://support.microsoft.com/kb/308419 for more info on setting file permissions."))
Check if there is a readable configuration file and print a warning.
13,512
def classify(self, classifier_name, examples, max_labels=None, goodness_of_fit=False): classifier = getattr(self, classifier_name) texts_vectors = self._make_text_vectors(examples) return classifier.classes_, classifier.decision_function(texts_vectors)
Usar un clasificador SVM para etiquetar textos nuevos. Args: classifier_name (str): Nombre del clasidicador a usar. examples (list or str): Se espera un ejemplo o una lista de ejemplos a clasificar en texto plano o en ids. max_labels (int, optional): Cantidad de etiquetas a devolver para cada ejemplo. Si se devuelve mas de una el orden corresponde a la plausibilidad de cada etiqueta. Si es None devuelve todas las etiquetas posibles. goodness_of_fit (bool, optional): Indica si devuelve o no una medida de cuan buenas son las etiquetas. Nota: Usa el clasificador de `Scikit-learn <http://scikit-learn.org/>`_ Returns: tuple (array, array): (labels_considerados, puntajes) labels_considerados: Las etiquetas que se consideraron para clasificar. puntajes: Cuanto más alto el puntaje, más probable es que la etiqueta considerada sea la adecuada.
13,513
def scrnaseq_concatenate_metadata(samples): barcodes = {} counts = "" metadata = {} has_sample_barcodes = False for sample in dd.sample_data_iterator(samples): if dd.get_sample_barcodes(sample): has_sample_barcodes = True with open(dd.get_sample_barcodes(sample)) as inh: for line in inh: cols = line.strip().split(",") if len(cols) == 1: cols.append("NaN") barcodes[(dd.get_sample_name(sample), cols[0])] = cols[1:] else: barcodes[(dd.get_sample_name(sample), "NaN")] = [dd.get_sample_name(sample), "NaN"] counts = dd.get_combined_counts(sample) meta = map(str, list(sample["metadata"].values())) meta_cols = list(sample["metadata"].keys()) meta = ["NaN" if not v else v for v in meta] metadata[dd.get_sample_name(sample)] = meta metadata_fn = counts + ".metadata" if file_exists(metadata_fn): return samples with file_transaction(metadata_fn) as tx_metadata_fn: with open(tx_metadata_fn, ) as outh: outh.write(",".join(["sample"] + meta_cols) + ) with open(counts + ".colnames") as inh: for line in inh: sample = line.split(":")[0] if has_sample_barcodes: barcode = sample.split("-")[1] else: barcode = "NaN" outh.write(",".join(barcodes[(sample, barcode)] + metadata[sample]) + ) return samples
Create file same dimension than mtx.colnames with metadata and sample name to help in the creation of the SC object.
13,514
def optimally_align_text(x, y, texts, expand, renderer=None, ax=None, direction=): if ax is None: ax = plt.gca() if renderer is None: r = ax.get_figure().canvas.get_renderer() else: r = renderer bboxes = get_bboxes(texts, r, expand) if not in direction: ha = [] else: ha = [, , ] if not in direction: va = [] else: va = [, , ] alignment = list(product(ha, va)) for i, text in enumerate(texts): counts = [] for h, v in alignment: if h: text.set_ha(h) if v: text.set_va(v) bbox = text.get_window_extent(r).expanded(*expand).\ transformed(ax.transData.inverted()) c = get_points_inside_bbox(x, y, bbox) counts.append(len(c) + bbox.count_overlaps(bboxes) - 1) a = np.argmin(counts) if in direction: text.set_ha(alignment[a][0]) if in direction: text.set_va(alignment[a][1]) bboxes[i] = text.get_window_extent(r).expanded(*expand).\ transformed(ax.transData.inverted()) return texts
For all text objects find alignment that causes the least overlap with points and other texts and apply it
13,515
def _set_helper(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=helper.helper, is_container=, presence=False, yang_name="helper", rest_name="helper", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "container", : , }) self.__helper = t if hasattr(self, ): self._set()
Setter method for helper, mapped from YANG variable /rbridge_id/ipv6/router/ospf/graceful_restart/helper (container) If this variable is read-only (config: false) in the source YANG file, then _set_helper is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_helper() directly. YANG Description: Set graceful restart helper options
13,516
def get_supported_types(): from datetime import date, timedelta editable_types = [int, float, complex, list, set, dict, tuple, date, timedelta] + list(TEXT_TYPES) + list(INT_TYPES) try: from numpy import ndarray, matrix, generic editable_types += [ndarray, matrix, generic] except: pass try: from pandas import DataFrame, Series, DatetimeIndex editable_types += [DataFrame, Series, Index] except: pass picklable_types = editable_types[:] try: from spyder.pil_patch import Image editable_types.append(Image.Image) except: pass return dict(picklable=picklable_types, editable=editable_types)
Return a dictionnary containing types lists supported by the namespace browser. Note: If you update this list, don't forget to update variablexplorer.rst in spyder-docs
13,517
def forward_selection(self, data, labels, weights, num_features): clf = Ridge(alpha=0, fit_intercept=True, random_state=self.random_state) used_features = [] for _ in range(min(num_features, data.shape[1])): max_ = -100000000 best = 0 for feature in range(data.shape[1]): if feature in used_features: continue clf.fit(data[:, used_features + [feature]], labels, sample_weight=weights) score = clf.score(data[:, used_features + [feature]], labels, sample_weight=weights) if score > max_: best = feature max_ = score used_features.append(best) return np.array(used_features)
Iteratively adds features to the model
13,518
def PushSection(self, name, pre_formatters): if name == : value = self.stack[-1].context else: value = self.stack[-1].context.get(name) for i, (f, args, formatter_type) in enumerate(pre_formatters): if formatter_type == ENHANCED_FUNC: value = f(value, self, args) elif formatter_type == SIMPLE_FUNC: value = f(value) else: assert False, % formatter_type self.stack.append(_Frame(value)) return value
Given a section name, push it on the top of the stack. Returns: The new section, or None if there is no such section.
13,519
def job_path(cls, project, jobs): return google.api_core.path_template.expand( "projects/{project}/jobs/{jobs}", project=project, jobs=jobs )
Return a fully-qualified job string.
13,520
def _check_psutil(self, instance): custom_tags = instance.get(, []) if self._collect_cx_state: self._cx_state_psutil(tags=custom_tags) self._cx_counters_psutil(tags=custom_tags)
Gather metrics about connections states and interfaces counters using psutil facilities
13,521
def get_readme(self, repo): readme_contents = repo.readme() if readme_contents is not None: self.total_readmes += 1 return if self.search_limit >= 28: print time.sleep(60) self.search_limit = 0 self.search_limit += 1 search_results = self.logged_in_gh.search_code( + + repo.full_name) try: for result in search_results: path = result.path[1:] if not in path and in path.lower(): self.total_readmes += 1 return path return except (github3.models.GitHubError, StopIteration) as e: return
Checks to see if the given repo has a ReadMe. MD means it has a correct Readme recognized by GitHub.
13,522
def execute(self, sensor_graph, scope_stack): parent = scope_stack[-1] alloc = parent.allocator output = alloc.allocate_stream(DataStream.UnbufferedType, attach=True) trigger_stream, trigger_cond = parent.trigger_chain() streamer_const = alloc.allocate_stream(DataStream.ConstantType, attach=True) sensor_graph.add_node(u"({} {} && {} always) => {} using trigger_streamer".format(trigger_stream, trigger_cond, streamer_const, output)) sensor_graph.add_constant(streamer_const, self.index)
Execute this statement on the sensor_graph given the current scope tree. This adds a single node to the sensor graph with the trigger_streamer function as is processing function. Args: sensor_graph (SensorGraph): The sensor graph that we are building or modifying scope_stack (list(Scope)): A stack of nested scopes that may influence how this statement allocates clocks or other stream resources.
13,523
def diffusionCount(source, target, sourceType = "raw", extraValue = None, pandasFriendly = False, compareCounts = False, numAuthors = True, useAllAuthors = True, _ProgBar = None, extraMapping = None): sourceCountString = "SourceCount" targetCountString = "TargetCount" if not isinstance(sourceType, str): raise RuntimeError("{} is not a valid node type, only tags or the string are allowed".format(sourceType)) if not isinstance(source, RecordCollection) or not isinstance(target, RecordCollection): raise RuntimeError("Source and target must be RecordCollections.") if extraValue is not None and not isinstance(extraValue, str): raise RuntimeError("{} is not a valid extraValue, only tags are allowed".format(extraValue)) if extraMapping is None: extraMapping = lambda x : x if metaknowledge.VERBOSE_MODE or _ProgBar: if _ProgBar: PBar = _ProgBar PBar.updateVal(0, "Starting to analyse a diffusion network") else: PBar = _ProgressBar(0, "Starting to analyse a diffusion network") count = 0 maxCount = len(source) else: PBar = _ProgressBar("Starting to analyse a diffusion network", dummy = True) count = 0 maxCount = len(source) sourceDict = {} listIds = None for Rs in source: if listIds is None and Rs.get(sourceType) is not None: listIds = isinstance(Rs.get(sourceType), list) count += 1 PBar.updateVal(count / maxCount * .10, "Analyzing source: " + str(Rs)) RsVal, RsExtras = makeNodeID(Rs, sourceType) if RsVal: if useAllAuthors: for c in Rs.createCitation(multiCite = True): sourceDict[c] = RsVal else: sourceDict[Rs.createCitation()] = RsVal if extraValue is not None: if listIds: sourceCounts = {s : {targetCountString : 0} for s in itertools.chain.from_iterable(sourceDict.values())} else: sourceCounts = {s : {targetCountString : 0} for s in sourceDict.values()} else: if listIds: sourceCounts = {s : 0 for s in itertools.chain.from_iterable(sourceDict.values())} else: sourceCounts = {s : 0 for s in sourceDict.values()} count = 0 maxCount = len(target) PBar.updateVal(.10, "Done analyzing sources, starting on targets") for Rt in target: count += 1 PBar.updateVal(count / maxCount * .90 + .10, "Analyzing target: {}".format(Rt)) targetCites = Rt.get(, []) if extraValue is not None: values = Rt.get(extraValue, []) if values is None: values = [] elif not isinstance(values, list): values = [values] values = [extraMapping(val) for val in values] for c in targetCites: try: RsourceVals = sourceDict[c] except KeyError: continue if listIds: for sVal in RsourceVals: if extraValue: sourceCounts[sVal][targetCountString] += 1 for val in values: try: sourceCounts[sVal][val] += 1 except KeyError: sourceCounts[sVal][val] = 1 else: sourceCounts[sVal] += 1 else: if extraValue: sourceCounts[RsourceVals][targetCountString] += 1 for val in values: try: sourceCounts[RsourceVals][val] += 1 except KeyError: sourceCounts[RsourceVals][val] = 1 else: sourceCounts[RsourceVals] += 1 if compareCounts: localCounts = diffusionCount(source, source, sourceType = sourceType, pandasFriendly = False, compareCounts = False, extraValue = extraValue, _ProgBar = PBar) if PBar and not _ProgBar: PBar.finish("Done counting the diffusion of {} sources into {} targets".format(len(source), len(target))) if pandasFriendly: retDict = {targetCountString : []} if numAuthors: retDict["numAuthors"] = [] if compareCounts: retDict[sourceCountString] = [] if extraValue is not None: retDict[extraValue] = [] if sourceType == : retrievedFields = [] targetCount = [] for R in sourceCounts.keys(): tagsLst = [t for t in R.keys() if t not in retrievedFields] retrievedFields += tagsLst for tag in retrievedFields: retDict[tag] = [] for R, occ in sourceCounts.items(): if extraValue: Rvals = R.subDict(retrievedFields) for extraVal, occCount in occ.items(): retDict[extraValue].append(extraVal) if numAuthors: retDict["numAuthors"].append(len(R.get())) for tag in retrievedFields: retDict[tag].append(Rvals[tag]) retDict[targetCountString].append(occCount) if compareCounts: try: retDict[sourceCountString].append(localCounts[R][extraVal]) except KeyError: retDict[sourceCountString].append(0) else: Rvals = R.subDict(retrievedFields) if numAuthors: retDict["numAuthors"].append(len(R.get())) for tag in retrievedFields: retDict[tag].append(Rvals[tag]) retDict[targetCountString].append(occ) if compareCounts: retDict[sourceCountString].append(localCounts[R]) else: countLst = [] recLst = [] locLst = [] if extraValue: extraValueLst = [] for R, occ in sourceCounts.items(): if extraValue: for extraVal, occCount in occ.items(): countLst.append(occCount) recLst.append(R) extraValueLst.append(extraVal) if compareCounts: try: locLst.append(localCounts[R][extraValue]) except KeyError: locLst.append(0) else: countLst.append(occ) recLst.append(R) if compareCounts: locLst.append(localCounts[R]) if compareCounts: retDict = {sourceType : recLst, targetCountString : countLst, sourceCountString : locLst} else: retDict = {sourceType : recLst, targetCountString : countLst} if extraValue: retDict[extraValue] = extraValueLst return retDict else: if compareCounts: for R, occ in localCounts.items(): sourceCounts[R] = (sourceCounts[R], occ) return sourceCounts
Takes in two [RecordCollections](../classes/RecordCollection.html#metaknowledge.RecordCollection) and produces a `dict` counting the citations of _source_ by the [Records](../classes/Record.html#metaknowledge.Record) of _target_. By default the `dict` uses `Record` objects as keys but this can be changed with the _sourceType_ keyword to any of the WOS tags. # Parameters _source_ : `RecordCollection` > A metaknowledge `RecordCollection` containing the `Records` being cited _target_ : `RecordCollection` > A metaknowledge `RecordCollection` containing the `Records` citing those in _source_ _sourceType_ : `optional [str]` > default `'raw'`, if `'raw'` the returned `dict` will contain `Records` as keys. If it is a WOS tag the keys will be of that type. _pandasFriendly_ : `optional [bool]` > default `False`, makes the output be a dict with two keys one `"Record"` is the list of Records ( or data type requested by _sourceType_) the other is their occurrence counts as `"Counts"`. The lists are the same length. _compareCounts_ : `optional [bool]` > default `False`, if `True` the diffusion analysis will be run twice, first with source and target setup like the default (global scope) then using only the source `RecordCollection` (local scope). _extraValue_ : `optional [str]` > default `None`, if a tag the returned dictionary will have `Records` mapped to maps, these maps will map the entries for the tag to counts. If _pandasFriendly_ is also `True` the resultant dictionary will have an additional column called `'year'`. This column will contain the year the citations occurred, in addition the Records entries will be duplicated for each year they occur in. > For example if `'year'` was given then the count for a single `Record` could be `{1990 : 1, 2000 : 5}` _useAllAuthors_ : `optional [bool]` > default `True`, if `False` only the first author will be used to generate the `Citations` for the _source_ `Records` # Returns `dict[:int]` > A dictionary with the type given by _sourceType_ as keys and integers as values. > If _compareCounts_ is `True` the values are tuples with the first integer being the diffusion in the target and the second the diffusion in the source. > If _pandasFriendly_ is `True` the returned dict has keys with the names of the WOS tags and lists with their values, i.e. a table with labeled columns. The counts are in the column named `"TargetCount"` and if _compareCounts_ the local count is in a column called `"SourceCount"`.
13,524
def cancel_current_route( payment_state: InitiatorPaymentState, initiator_state: InitiatorTransferState, ) -> List[Event]: assert can_cancel(initiator_state), transfer_description = initiator_state.transfer_description payment_state.cancelled_channels.append(initiator_state.channel_identifier) return events_for_cancel_current_route(transfer_description)
Cancel current route. This allows a new route to be tried.
13,525
def unlock_file(filename): log.trace(, filename) lock = filename + try: os.remove(lock) except OSError as exc: log.trace(, filename, exc)
Unlock a locked file Note that these locks are only recognized by Salt Cloud, and not other programs or platforms.
13,526
def rm_auth_key_from_file(user, source, config=, saltenv=, fingerprint_hash_type=None): s authorized key file, using a file as source CLI Example: .. code-block:: bash salt ssh.rm_auth_key_from_file <user> salt://ssh_keys/<user>.id_rsa.pub cp.cache_fileFailed to pull key file from salt file serverNo keys detected in {0}. Is file properly formatted?ssh_auth.errorfails if in rval: return elif in rval: return else: return
Remove an authorized key from the specified user's authorized key file, using a file as source CLI Example: .. code-block:: bash salt '*' ssh.rm_auth_key_from_file <user> salt://ssh_keys/<user>.id_rsa.pub
13,527
def get_valid_time_stamp(): time_stamp = str(datetime.datetime.now()) time_stamp = "time_" + time_stamp.replace("-", "_").replace(":", "_").replace(" ", "_").replace(".", "_") return time_stamp
Get a valid time stamp without illegal characters. Adds time_ to make the time stamp a valid table name in sql. :return: String, extracted timestamp
13,528
def predict_class(self, features): if isinstance(features, RDD): return self.predict_class_distributed(features) else: return self.predict_class_local(features)
Model inference base on the given data which returning label :param features: it can be a ndarray or list of ndarray for locally inference or RDD[Sample] for running in distributed fashion :return: ndarray or RDD[Sample] depend on the the type of features.
13,529
def spi_configure_mode(self, spi_mode): if spi_mode == SPI_MODE_0: self.spi_configure(SPI_POL_RISING_FALLING, SPI_PHASE_SAMPLE_SETUP, SPI_BITORDER_MSB) elif spi_mode == SPI_MODE_3: self.spi_configure(SPI_POL_FALLING_RISING, SPI_PHASE_SETUP_SAMPLE, SPI_BITORDER_MSB) else: raise RuntimeError()
Configure the SPI interface by the well known SPI modes.
13,530
def get(self, sid): return EngagementContext(self._version, flow_sid=self._solution[], sid=sid, )
Constructs a EngagementContext :param sid: Engagement Sid. :returns: twilio.rest.studio.v1.flow.engagement.EngagementContext :rtype: twilio.rest.studio.v1.flow.engagement.EngagementContext
13,531
def get_doctype(self, index, name): if index not in self.indices: self.get_all_indices() return self.indices.get(index, {}).get(name, None)
Returns a doctype given an index and a name
13,532
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rstp_rstp_migrate_time(self, **kwargs): config = ET.Element("config") get_stp_brief_info = ET.Element("get_stp_brief_info") config = get_stp_brief_info output = ET.SubElement(get_stp_brief_info, "output") spanning_tree_info = ET.SubElement(output, "spanning-tree-info") spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode") rstp = ET.SubElement(spanning_tree_mode, "rstp") rstp = ET.SubElement(rstp, "rstp") migrate_time = ET.SubElement(rstp, "migrate-time") migrate_time.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
13,533
def _add_subscribers_for_type(self, callback_type, subscribers, callbacks, **kwargs): for subscriber in subscribers: callback_name = + callback_type if hasattr(subscriber, callback_name): _function = functools.partial(getattr(subscriber, callback_name), **kwargs) callbacks.append(_function)
add a done/queued/progress callback to the appropriate list
13,534
def region_path(cls, project, region): return google.api_core.path_template.expand( "projects/{project}/regions/{region}", project=project, region=region )
Return a fully-qualified region string.
13,535
def wrpcap(filename, pkt, *args, **kargs): with PcapWriter(filename, *args, **kargs) as pcap: pcap.write(pkt)
Write a list of packets to a pcap file gz: set to 1 to save a gzipped capture linktype: force linktype value endianness: "<" or ">", force endianness
13,536
def rule_command_cmdlist_interface_s_interface_fc_leaf_interface_fibrechannel_leaf(self, **kwargs): config = ET.Element("config") rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa") index_key = ET.SubElement(rule, "index") index_key.text = kwargs.pop() command = ET.SubElement(rule, "command") cmdlist = ET.SubElement(command, "cmdlist") interface_s = ET.SubElement(cmdlist, "interface-s") interface_fc_leaf = ET.SubElement(interface_s, "interface-fc-leaf") interface = ET.SubElement(interface_fc_leaf, "interface") fibrechannel_leaf = ET.SubElement(interface, "fibrechannel-leaf") fibrechannel_leaf.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
13,537
def _check_frames(self, frames, fill_value): if self.seekable(): remaining_frames = self.frames - self.tell() if frames < 0 or (frames > remaining_frames and fill_value is None): frames = remaining_frames elif frames < 0: raise ValueError("frames must be specified for non-seekable files") return frames
Reduce frames to no more than are available in the file.
13,538
def vertex_graph(entities): graph = nx.Graph() closed = [] for index, entity in enumerate(entities): if entity.closed: closed.append(index) else: graph.add_edges_from(entity.nodes, entity_index=index) return graph, np.array(closed)
Given a set of entity objects generate a networkx.Graph that represents their vertex nodes. Parameters -------------- entities : list Objects with 'closed' and 'nodes' attributes Returns ------------- graph : networkx.Graph Graph where node indexes represent vertices closed : (n,) int Indexes of entities which are 'closed'
13,539
def setup_logging(log_file=os.devnull): class RankFilter(logging.Filter): def __init__(self, rank): self.rank = rank def filter(self, record): record.rank = self.rank return True rank = get_rank() rank_filter = RankFilter(rank) logging_format = "%(asctime)s - %(levelname)s - %(rank)s - %(message)s" logging.basicConfig(level=logging.DEBUG, format=logging_format, datefmt="%Y-%m-%d %H:%M:%S", filename=log_file, filemode=) console = logging.StreamHandler(sys.stdout) console.setLevel(logging.INFO) formatter = logging.Formatter() console.setFormatter(formatter) logging.getLogger().addHandler(console) logging.getLogger().addFilter(rank_filter)
Configures logging. By default logs from all workers are printed to the console, entries are prefixed with "N: " where N is the rank of the worker. Logs printed to the console don't include timestaps. Full logs with timestamps are saved to the log_file file.
13,540
def get_evernote_notes(self, evernote_filter): data = [] note_store = self.client.get_note_store() our_note_list = note_store.findNotesMetadata(self.token, evernote_filter, 0, 100, EvernoteMgr.set_evernote_spec()) for note in our_note_list.notes: whole_note = note_store.getNote(self.token, note.guid, True, True, False, False) content = self._cleaning_content(whole_note.content) data.append({: note.title, : arrow.get(note.created), : whole_note.attributes.sourceURL, : content}) return data
get the notes related to the filter :param evernote_filter: filtering :return: notes
13,541
def _ParseFileEntry(self, knowledge_base, file_entry): root_key = self._GetPlistRootKey(file_entry) if not root_key: location = getattr(file_entry.path_spec, , ) raise errors.PreProcessFail(( ).format(self.ARTIFACT_DEFINITION_NAME, location)) try: match = self._GetKeysDefaultEmpty(root_key, self._KEYS) except KeyError as exception: location = getattr(file_entry.path_spec, , ) raise errors.PreProcessFail( .format( self.ARTIFACT_DEFINITION_NAME, location, exception)) name = match.get(, [None])[0] uid = match.get(, [None])[0] if not name or not uid: return user_account = artifacts.UserAccountArtifact( identifier=uid, username=name) user_account.group_identifier = match.get(, [None])[0] user_account.full_name = match.get(, [None])[0] user_account.shell = match.get(, [None])[0] user_account.user_directory = match.get(, [None])[0] try: knowledge_base.AddUserAccount(user_account) except KeyError: pass
Parses artifact file system data for a preprocessing attribute. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. file_entry (dfvfs.FileEntry): file entry that contains the artifact value data. Raises: errors.PreProcessFail: if the preprocessing fails.
13,542
def by(self, technology): if technology == PluginTechnology.LV2 \ or str(technology).upper() == PluginTechnology.LV2.value.upper(): return self.lv2_builder.all else: return []
Get the plugins registered in PedalPi by technology :param PluginTechnology technology: PluginTechnology identifier
13,543
def node_received_infos(node_id): exp = Experiment(session) info_type = request_parameter( parameter="info_type", parameter_type="known_class", default=models.Info ) if type(info_type) == Response: return info_type node = models.Node.query.get(node_id) if node is None: return error_response( error_type="/node/infos, node {} does not exist".format(node_id) ) infos = node.received_infos(type=info_type) try: exp.info_get_request(node=node, infos=infos) session.commit() except Exception: return error_response( error_type="info_get_request error", status=403, participant=node.participant, ) return success_response(infos=[i.__json__() for i in infos])
Get all the infos a node has been sent and has received. You must specify the node id in the url. You can also pass the info type.
13,544
def rank(self): max_rank = 0 for each in self.hyperedges(): if len(self.edge_links[each]) > max_rank: max_rank = len(self.edge_links[each]) return max_rank
Return the rank of the given hypergraph. @rtype: int @return: Rank of graph.
13,545
def _approx_eq_(self, other: Any, atol: Union[int, float]) -> bool: if not isinstance(other, type(self)): return NotImplemented return approx_eq(self.operations, other.operations, atol=atol)
See `cirq.protocols.SupportsApproximateEquality`.
13,546
def teardown(self): for device in self.devices: self._remove_trustee(device) self._populate_domain() self.domain = {}
Teardown trust domain by removing trusted devices.
13,547
def _get_cookie(self, name, domain): for c in self.session.cookies: if c.name==name and c.domain==domain: return c return None
Return the cookie "name" for "domain" if found If there are mote than one, only the first is returned
13,548
def run_migrations_online(): app_conf = dci_config.generate_conf() connectable = dci_config.get_engine(app_conf) with connectable.connect() as connection: context.configure( connection=connection, target_metadata=target_metadata, ) with context.begin_transaction(): context.run_migrations()
Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context.
13,549
def _server_rollback(): from os import path, remove archpath = path.abspath(path.expanduser(settings.archfile)) if path.isfile(archpath) and not args["nolive"]: vms("Removing archive JSON file at {}.".format(archpath)) remove(archpath) datapath = path.abspath(path.expanduser(settings.datafile)) if path.isfile(datapath) and not args["nolive"]: vms("Removing script database JSON file at {}".format(datapath)) remove(datapath)
Removes script database and archive files to rollback the CI server installation.
13,550
def add_observer(self, o, component_type=ComponentType): self.observers[component_type].add(o)
Add a callback that will get invoked after each component is called. Args: o (func): the callback function Keyword Args: component_type (ComponentType): the :class:`ComponentType` to observe. The callback will fire any time an instance of the class or its subclasses is invoked. The callback should look like this: .. code-block:: python def callback(comp, broker): value = broker.get(comp) # do something with value pass
13,551
def register(self, hash_types): if not isinstance(hash_types, (list, tuple)): hash_types = [hash_types] def _decor_closure(hash_func): for hash_type in hash_types: key = (hash_type.__module__, hash_type.__name__) self.keyed_extensions[key] = (hash_type, hash_func) return hash_func return _decor_closure
Registers a function to generate a hash for data of the appropriate types. This can be used to register custom classes. Internally this is used to define how to hash non-builtin objects like ndarrays and uuids. The registered function should return a tuple of bytes. First a small prefix hinting at the data type, and second the raw bytes that can be hashed. Args: hash_types (class or tuple of classes): Returns: func: closure to be used as the decorator Example: >>> # xdoctest: +SKIP >>> # Skip this doctest because we dont want tests to modify >>> # the global state. >>> import ubelt as ub >>> import pytest >>> class MyType(object): ... def __init__(self, id): ... self.id = id >>> data = MyType(1) >>> # Custom types wont work with ub.hash_data by default >>> with pytest.raises(TypeError): ... ub.hash_data(data) >>> # You can register your functions with ubelt's internal >>> # hashable_extension registery. >>> @ub.util_hash._HASHABLE_EXTENSIONS.register(MyType) >>> def hash_my_type(data): ... return b'mytype', six.b(ub.hash_data(data.id)) >>> # TODO: allow hash_data to take an new instance of >>> # HashableExtensions, so we dont have to modify the global >>> # ubelt state when we run tests. >>> my_instance = MyType(1) >>> ub.hash_data(my_instance)
13,552
def _prep_window(self, **kwargs): window = self._get_window() if isinstance(window, (list, tuple, np.ndarray)): return com.asarray_tuplesafe(window).astype(float) elif is_integer(window): import scipy.signal as sig def _validate_win_type(win_type, kwargs): arg_map = {: [], : [], : [, ], : []} if win_type in arg_map: return tuple([win_type] + _pop_args(win_type, arg_map[win_type], kwargs)) return win_type def _pop_args(win_type, arg_names, kwargs): msg = % win_type all_args = [] for n in arg_names: if n not in kwargs: raise ValueError(msg % n) all_args.append(kwargs.pop(n)) return all_args win_type = _validate_win_type(self.win_type, kwargs) return sig.get_window(win_type, window, False).astype(float)
Provide validation for our window type, return the window we have already been validated.
13,553
def build_getters_support_matrix(app): status = subprocess.call("./test.sh", stdout=sys.stdout, stderr=sys.stderr) if status != 0: print("Something bad happened when processing the test reports.") sys.exit(-1) drivers = set() matrix = { m: defaultdict(dict) for m in dir(NetworkDriver) if not (m.startswith("_") or m in EXCLUDE_METHODS) } regex_name = re.compile(r"(?P<driver>\w+)\/.*::test_(?P<getter>\w+)") filename = "./support/tests/report.json" with open(filename, "r") as f: data = json.loads(f.read()) for test in data["report"]["tests"]: match = regex_name.search(test["name"]) if match: driver = match.group("driver") drivers.add(driver) method = match.group("getter") else: continue if method in EXCLUDE_IN_REPORT: continue result = test["outcome"] if method in METHOD_ALIASES.keys(): method = METHOD_ALIASES[method] intermediate_result = matrix[method].get(driver, None) matrix[method][driver] = _merge_results(result, intermediate_result) sorted_methods = sorted(matrix.keys()) drivers = sorted(drivers) env = Environment(loader=FileSystemLoader(".")) template_file = env.get_template("matrix.j2") rendered_template = template_file.render( matrix=matrix, drivers=drivers, sorted_methods=sorted_methods ) with open("support/matrix.rst", "w") as f: f.write(rendered_template)
Build the getters support matrix.
13,554
def present(self, value): for k, v in self.special.items(): if v == value: return k return .join(self.get_separator(i) + self.format[i].present(v) for i, v in enumerate(value))
Return a user-friendly representation of a value. Lookup value in self.specials, or call .to_literal() if absent.
13,555
async def spawn_n(self, agent_cls, n, *args, addr=None, **kwargs): if addr is None: addr = await self._get_smallest_env() r_manager = await self.env.connect(addr) return await r_manager.spawn_n(agent_cls, n, *args, **kwargs)
Same as :meth:`~creamas.mp.MultiEnvironment.spawn`, but allows spawning multiple agents with the same initialization parameters simultaneously into **one** slave environment. :param str agent_cls: ``qualname`` of the agent class. That is, the name should be in the form of ``pkg.mod:cls``, e.g. ``creamas.core.agent:CreativeAgent``. :param int n: Number of agents to spawn :param str addr: Optional. Address for the slave enviroment's manager. If :attr:`addr` is None, spawns the agents in the slave environment with currently smallest number of agents. :returns: A list of (:class:`aiomas.rpc.Proxy`, address)-tuples for the spawned agents. The ``*args`` and ``**kwargs`` are passed down to each agent's :meth:`__init__`.
13,556
def deserialize_object(buffers, g=None): bufs = list(buffers) pobj = buffer_to_bytes_py2(bufs.pop(0)) canned = pickle.loads(pobj) if istype(canned, sequence_types) and len(canned) < MAX_ITEMS: for c in canned: _restore_buffers(c, bufs) newobj = uncan_sequence(canned, g) elif istype(canned, dict) and len(canned) < MAX_ITEMS: newobj = {} for k in sorted(canned): c = canned[k] _restore_buffers(c, bufs) newobj[k] = uncan(c, g) else: _restore_buffers(canned, bufs) newobj = uncan(canned, g) return newobj, bufs
Reconstruct an object serialized by serialize_object from data buffers. Parameters ---------- bufs : list of buffers/bytes g : globals to be used when uncanning Returns ------- (newobj, bufs) : unpacked object, and the list of remaining unused buffers.
13,557
def _get_bucket_endpoint(self): conn = S3Connection() bucket = conn.lookup(self.bucket_name) if not bucket: raise InputParameterError(t exist' % self.bucket_name) endpoint = str(bucket.get_location()) return endpoint
Queries S3 to identify the region hosting the provided bucket.
13,558
def subslice(inner,outer,section): if section==: return outer[0],outer[0]+inner[0] elif section==: return outer[0]+inner[1],outer[1] elif section==: return outer[0]+inner[0],outer[0]+inner[1] else: raise ValueError(%section)
helper for rediff\ outer is a slice (2-tuple, not an official python slice) in global coordinates\ inner is a slice (2-tuple) on that slice\ returns the result of sub-slicing outer by inner
13,559
def post_card(message, hook_url=None, title=None, theme_color=None): * if not hook_url: hook_url = _get_hook_url() if not message: log.error() payload = { "text": message, "title": title, "themeColor": theme_color } result = salt.utils.http.query(hook_url, method=, data=salt.utils.json.dumps(payload), status=True) if result[] <= 201: return True else: return { : False, : result.get(, result[]) }
Send a message to an MS Teams channel. :param message: The message to send to the MS Teams channel. :param hook_url: The Teams webhook URL, if not specified in the configuration. :param title: Optional title for the posted card :param theme_color: Optional hex color highlight for the posted card :return: Boolean if message was sent successfully. CLI Example: .. code-block:: bash salt '*' msteams.post_card message="Build is done"
13,560
def edit_prefix(self, auth, spec, attr): self._logger.debug("edit_prefix called; spec: %s attr: %s" % (unicode(spec), unicode(attr))) pool = None if in attr or in attr: if in attr: if attr[] is None: pool = { : None, : None } else: pool = self._get_pool(auth, { : attr[] }) else: if attr[] is None: pool = { : None, : None } else: pool = self._get_pool(auth, { : attr[] }) del(attr[]) attr[] = pool[] else: pool = { : None, : None } vrf = self._get_vrf(auth, attr) if in attr: del(attr[]) if in attr: del(attr[]) attr[] = vrf[] self._check_attr(attr, [], _prefix_attrs) if in attr: attr[] = _parse_expires(attr[]) prefixes = self.list_prefix(auth, spec) where, params1 = self._expand_prefix_spec(spec.copy()) update, params2 = self._sql_expand_update(attr) params = dict(params2.items() + params1.items()) sql = "UPDATE ip_net_plan SET " + update + " WHERE " + where sql += " RETURNING id" self._execute(sql, params) updated_prefixes = self._get_updated_rows(auth, self.search_prefix) audit_params = { : auth.username, : auth.authenticated_as, : auth.full_name, : auth.authoritative_source, : vrf[], : vrf[], : vrf[] } for p in prefixes: audit_params[] = p[] audit_params[] = p[] audit_params[] = p[] audit_params[] = p[] audit_params[] = p[] audit_params[] = % (p[], unicode(attr)) sql, params = self._sql_expand_insert(audit_params) self._execute( % sql, params) if p[] != pool[]: audit_params2 = { : p[], : p[], : p[], : p[], : p[], : auth.username, : auth.authenticated_as, : auth.full_name, : auth.authoritative_source, } if pool[] is not None: audit_params2[] = pool[] audit_params2[] = pool[] audit_params2[] = % (pool[], p[]) sql, params = self._sql_expand_insert(audit_params2) self._execute( % sql, params) if p[] is not None: pool2 = self._get_pool(auth, { : p[] }) audit_params2[] = pool2[] audit_params2[] = pool2[] audit_params2[] = % (p[], pool2[]) sql, params = self._sql_expand_insert(audit_params2) self._execute( % sql, params) return updated_prefixes
Update prefix matching `spec` with attributes `attr`. * `auth` [BaseAuth] AAA options. * `spec` [prefix_spec] Specifies the prefix to edit. * `attr` [prefix_attr] Prefix attributes. Note that there are restrictions on when and how a prefix's type can be changed; reservations can be changed to assignments and vice versa, but only if they contain no child prefixes. This is the documentation of the internal backend function. It's exposed over XML-RPC, please also see the XML-RPC documentation for :py:func:`nipap.xmlrpc.NipapXMLRPC.edit_prefix` for full understanding.
13,561
def help_box(): style = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER dialog_box = wx.Dialog(None, wx.ID_ANY, HELP_TITLE, style=style, size=(620, 450)) html_widget = HtmlHelp(dialog_box, wx.ID_ANY) html_widget.page = build_help_html() dialog_box.ShowModal() dialog_box.Destroy()
A simple HTML help dialog box using the distribution data files.
13,562
def remove_network_from_dhcp_agent(self, dhcp_agent, network_id): return self.delete((self.agent_path + self.DHCP_NETS + "/%s") % ( dhcp_agent, network_id))
Remove a network from dhcp agent.
13,563
def _write_frame(self, data): assert data is not None and 0 < len(data) < 255, length = len(data) frame = bytearray(length+8) frame[0] = PN532_SPI_DATAWRITE frame[1] = PN532_PREAMBLE frame[2] = PN532_STARTCODE1 frame[3] = PN532_STARTCODE2 frame[4] = length & 0xFF frame[5] = self._uint8_add(~length, 1) frame[6:-2] = data checksum = reduce(self._uint8_add, data, 0xFF) frame[-2] = ~checksum & 0xFF frame[-1] = PN532_POSTAMBLE logger.debug(.format(binascii.hexlify(frame))) self._gpio.set_low(self._cs) self._busy_wait_ms(2) self._spi.write(frame) self._gpio.set_high(self._cs)
Write a frame to the PN532 with the specified data bytearray.
13,564
def form_valid(self, form): ret = super(ProjectCopy, self).form_valid(form) self.copy_relations() messages.add_message(self.request, messages.SUCCESS, % self.object.name) return ret
After the form is valid lets let people know
13,565
def get_scheduling_block_ids(): ids = [key.split()[-1] for key in DB.keys(pattern=)] return sorted(ids)
Return list of scheduling block IDs
13,566
def cfg_folder_loader(path): CFG_WILDCARD = return [load_cfg(filename) for filename in sorted(glob.glob(os.path.join(path, CFG_WILDCARD)))]
:type path: str
13,567
def _poll(self): logger.info("Beginning TrustedAdvisor poll") tmp = self._get_limit_check_id() if not self.have_ta: logger.info() return {} if tmp is None: logger.critical("Unable to find Trusted Advisor " "check; not using Trusted Advisor data.") return check_id, metadata = tmp checks = self._get_refreshed_check_result(check_id) region = self.ta_region or self.conn._client_config.region_name res = {} if checks[].get(, ) == : logger.warning( ) return {} if not in checks[]: logger.warning( ) return {} for check in checks[][]: if in check and check[] != region: continue data = dict(zip(metadata, check[])) if data[] not in res: res[data[]] = {} try: val = int(data[]) except ValueError: val = data[] if val != : logger.error( , val, data[], data[]) continue else: logger.debug( , data[], data[]) res[data[]][data[]] = val logger.info("Finished TrustedAdvisor poll") return res
Poll Trusted Advisor (Support) API for limit checks. Return a dict of service name (string) keys to nested dict vals, where each key is a limit name and each value the current numeric limit. e.g.: :: { 'EC2': { 'SomeLimit': 10, } }
13,568
def Parse(self, conditions, host_data): result = CheckResult(check_id=self.check_id) methods = self.SelectChecks(conditions) result.ExtendAnomalies([m.Parse(conditions, host_data) for m in methods]) return result
Runs methods that evaluate whether collected host_data has an issue. Args: conditions: A list of conditions to determine which Methods to trigger. host_data: A map of artifacts and rdf data. Returns: A CheckResult populated with Anomalies if an issue exists.
13,569
def install(pkg=None, pkgs=None, dir=None, runas=None, registry=None, env=None, dry_run=False, silent=True): ** if pkg: pkgs = [_cmd_quote(pkg)] elif pkgs: pkgs = [_cmd_quote(v) for v in pkgs] else: pkgs = [] if registry: registry = _cmd_quote(registry) cmd = [, , ] if silent: cmd.append() if not dir: cmd.append() if registry: cmd.append(.format(registry)) if dry_run: cmd.append() cmd.extend(pkgs) env = env or {} if runas: uid = salt.utils.user.get_uid(runas) if uid: env.update({: uid, : }) cmd = .join(cmd) result = __salt__[](cmd, python_shell=True, cwd=dir, runas=runas, env=env) if result[] != 0: raise CommandExecutionError(result[]) npm_output = result[] or result[] try: return salt.utils.json.find_json(npm_output) except ValueError: return npm_output
Install an NPM package. If no directory is specified, the package will be installed globally. If no package is specified, the dependencies (from package.json) of the package in the given directory will be installed. pkg A package name in any format accepted by NPM, including a version identifier pkgs A list of package names in the same format as the ``name`` parameter .. versionadded:: 2014.7.0 dir The target directory in which to install the package, or None for global installation runas The user to run NPM with registry The NPM registry to install the package from. .. versionadded:: 2014.7.0 env Environment variables to set when invoking npm. Uses the same ``env`` format as the :py:func:`cmd.run <salt.modules.cmdmod.run>` execution function. .. versionadded:: 2014.7.0 silent Whether or not to run NPM install with --silent flag. .. versionadded:: 2016.3.0 dry_run Whether or not to run NPM install with --dry-run flag. .. versionadded:: 2015.8.4 silent Whether or not to run NPM install with --silent flag. .. versionadded:: 2015.8.5 CLI Example: .. code-block:: bash salt '*' npm.install coffee-script salt '*' npm.install [email protected]
13,570
def create_window(self, pane, name=None, set_active=True): assert isinstance(pane, Pane) assert name is None or isinstance(name, six.text_type) taken_indexes = [w.index for w in self.windows] index = self.base_index while index in taken_indexes: index += 1 w = Window(index) w.add_pane(pane) self.windows.append(w) self.windows = sorted(self.windows, key=lambda w: w.index) app = get_app(return_none=True) if app is not None and set_active: self.set_active_window(w) if name is not None: w.chosen_name = name assert w.active_pane == pane assert w._get_parent(pane)
Create a new window that contains just this pane. :param pane: The :class:`.Pane` instance to put in the new window. :param name: If given, name for the new window. :param set_active: When True, focus the new window.
13,571
def load_targets(explanatory_rasters): explanatory_raster_arrays = [] aff = None shape = None crs = None for raster in explanatory_rasters: logger.debug(raster) with rasterio.open(raster) as src: ar = src.read(1) if not aff: aff = src.affine else: assert aff == src.affine if not shape: shape = ar.shape else: assert shape == ar.shape if not crs: crs = src.crs else: assert crs == src.crs arf = ar.flatten() explanatory_raster_arrays.append(arf) expl = np.array(explanatory_raster_arrays).T raster_info = { : aff, : shape, : crs } return expl, raster_info
Parameters ---------- explanatory_rasters : List of Paths to GDAL rasters containing explanatory variables Returns ------- expl : Array of explanatory variables raster_info : dict of raster info
13,572
def run_cmd(cmd): try: p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE) stdout, stderr = p.communicate() except OSError as e: if DEBUG: raise if e.errno == errno.ENOENT: msg = .format(.join(cmd)) raise _CommandNotFound(msg, cmd) else: raise _AHBootstrapSystemExit( .format(.join(cmd), str(e))) try: stdio_encoding = locale.getdefaultlocale()[1] or except ValueError: if not isinstance(stdout, str): stdout = stdout.decode(stdio_encoding, ) if not isinstance(stderr, str): stderr = stderr.decode(stdio_encoding, ) return (p.returncode, stdout, stderr)
Run a command in a subprocess, given as a list of command-line arguments. Returns a ``(returncode, stdout, stderr)`` tuple.
13,573
def use_federated_bank_view(self): self._bank_view = FEDERATED for session in self._get_provider_sessions(): try: session.use_federated_bank_view() except AttributeError: pass
Pass through to provider ItemLookupSession.use_federated_bank_view
13,574
def install(name=None, fromrepo=None, pkgs=None, sources=None, jail=None, chroot=None, root=None, orphan=False, force=False, glob=False, local=False, dryrun=False, quiet=False, reinstall_requires=False, regex=False, pcre=False, batch=False, **kwargs): *********** try: pkg_params, pkg_type = __salt__[]( name, pkgs, sources, **kwargs ) except MinionError as exc: raise CommandExecutionError(exc) if not pkg_params: return {} env = {} opts = if salt.utils.data.is_true(orphan): opts += if salt.utils.data.is_true(force): opts += if salt.utils.data.is_true(glob): opts += if salt.utils.data.is_true(local): opts += if salt.utils.data.is_true(dryrun): opts += if salt.utils.data.is_true(quiet): opts += if salt.utils.data.is_true(reinstall_requires): opts += if salt.utils.data.is_true(regex): opts += if salt.utils.data.is_true(pcre): opts += if salt.utils.data.is_true(batch): env = { "BATCH": "true", "ASSUME_ALWAYS_YES": "YES" } old = list_pkgs(jail=jail, chroot=chroot, root=root) if pkg_type == : pkg_cmd = opts = .join([opt for opt in opts if opt in ]) targets = pkg_params elif pkg_type == : pkg_cmd = if pkgs is None and kwargs.get() and len(pkg_params) == 1: pkg_params = {name: kwargs.get()} targets = [] for param, version_num in six.iteritems(pkg_params): if version_num is None: targets.append(param) else: targets.append(.format(param, version_num)) else: raise CommandExecutionError() cmd = _pkg(jail, chroot, root) cmd.append(pkg_cmd) if fromrepo: cmd.extend([, fromrepo]) if opts: cmd.append( + opts) cmd.extend(targets) if pkg_cmd == and salt.utils.data.is_true(dryrun): ) return ret
Install package(s) from a repository name The name of the package to install CLI Example: .. code-block:: bash salt '*' pkg.install <package name> jail Install the package into the specified jail chroot Install the package into the specified chroot (ignored if ``jail`` is specified) root Install the package into the specified root (ignored if ``jail`` is specified) orphan Mark the installed package as orphan. Will be automatically removed if no other packages depend on them. For more information please refer to ``pkg-autoremove(8)``. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> orphan=True force Force the reinstallation of the package if already installed. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> force=True glob Treat the package names as shell glob patterns. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> glob=True local Do not update the repository catalogs with ``pkg-update(8)``. A value of ``True`` here is equivalent to using the ``-U`` flag with ``pkg install``. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> local=True dryrun Dru-run mode. The list of changes to packages is always printed, but no changes are actually made. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> dryrun=True quiet Force quiet output, except when dryrun is used, where pkg install will always show packages to be installed, upgraded or deleted. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> quiet=True reinstall_requires When used with force, reinstalls any packages that require the given package. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> reinstall_requires=True force=True .. versionchanged:: 2014.7.0 ``require`` kwarg renamed to ``reinstall_requires`` fromrepo In multi-repo mode, override the pkg.conf ordering and only attempt to download packages from the named repository. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> fromrepo=repo regex Treat the package names as a regular expression CLI Example: .. code-block:: bash salt '*' pkg.install <regular expression> regex=True pcre Treat the package names as extended regular expressions. CLI Example: .. code-block:: bash batch Use BATCH=true for pkg install, skipping all questions. Be careful when using in production. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> batch=True
13,575
def submitter(self): if self.api and self.submitter_id: return self.api._get_user(self.submitter_id)
| Comment: The user who submitted the ticket. The submitter always becomes the author of the first comment on the ticket
13,576
def load(self, steps_dir=None, step_file=None, step_list=None): self._closed() self.steps_library.load(steps_dir=steps_dir, step_file=step_file, step_list=step_list)
Load CWL steps into the WorkflowGenerator's steps library. Adds steps (command line tools and workflows) to the ``WorkflowGenerator``'s steps library. These steps can be used to create workflows. Args: steps_dir (str): path to directory containing CWL files. All CWL in the directory are loaded. step_file (str): path to a file containing a CWL step that will be added to the steps library.
13,577
def signature_type(self): if not self.mardata.signatures: return None for sig in self.mardata.signatures.sigs: if sig.algorithm_id == 1: return elif sig.algorithm_id == 2: return else: return
Return the signature type used in this MAR. Returns: One of None, 'unknown', 'sha1', or 'sha384'
13,578
def unfix(self, param): if param == "delta": self._unfix("logistic") else: self._fix[param] = False
Enable parameter optimization. Parameters ---------- param : str Possible values are ``"delta"``, ``"beta"``, and ``"scale"``.
13,579
def set_input_data(self, key, value): if not key in self.input_channels.keys(): self.set_input_channel(key, Channel()) self.input_channels[key].set_value(Data(self.time, value))
set_input_data will automatically create an input channel if necessary. Automatic channel creation is intended for the case where users are trying to set initial values on a block whose input channels aren't subscribed to anything in the graph.
13,580
def link_for_image(self, base_dir: str, conf: Config) -> int: return self.link_types( base_dir, [ArtifactType.app, ArtifactType.binary, ArtifactType.gen_py], conf)
Link all artifacts required for a Docker image under `base_dir` and return the number of linked artifacts.
13,581
def link(self, thing1, thing2): thing1 = thing1.strip().lower() thing2 = thing2.strip().lower() if thing1 == thing2: raise SameName("Attempted to link two of the same name") self.change(thing1, 0) self.change(thing2, 0) return self._link(thing1, thing2)
Link thing1 and thing2, adding the karma of each into a single entry. If any thing does not exist, it is created.
13,582
def show_support_save_status_output_show_support_save_status_message(self, **kwargs): config = ET.Element("config") show_support_save_status = ET.Element("show_support_save_status") config = show_support_save_status output = ET.SubElement(show_support_save_status, "output") show_support_save_status = ET.SubElement(output, "show-support-save-status") message = ET.SubElement(show_support_save_status, "message") message.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
13,583
def compute_auth_key(userid, password): import sys if sys.version_info >= (3, 0): return hashlib.sha1(b"|".join((userid.encode("ascii"), password.encode("ascii")))).hexdigest() return hashlib.sha1("|".join((userid, password))).hexdigest()
Compute the authentication key for freedns.afraid.org. This is the SHA1 hash of the string b'userid|password'. :param userid: ascii username :param password: ascii password :return: ascii authentication key (SHA1 at this point)
13,584
def get_all_profiles(store=): return { : get_all_settings(profile=, store=store), : get_all_settings(profile=, store=store), : get_all_settings(profile=, store=store) }
Gets all properties for all profiles in the specified store Args: store (str): The store to use. This is either the local firewall policy or the policy defined by local group policy. Valid options are: - lgpo - local Default is ``local`` Returns: dict: A dictionary containing the specified settings for each profile
13,585
def storages(self): return storage.StorageCollection( self._conn, utils.get_subresource_path_by(self, ), redfish_version=self.redfish_version)
This property gets the list of instances for Storages This property gets the list of instances for Storages :returns: a list of instances of Storages
13,586
def make_psrrates(pkllist, nbins=60, period=0.156): state = pickle.load(open(pkllist[0], )) if in state[]: immaxcol = state[].index() logger.info() elif in state[]: try: immaxcol = state[].index() logger.info() except: immaxcol = state[].index() logger.info() for pklfile in pkllist: loc, prop = read_candidates(pklfile) ffm = [] if (loc): times = int2mjd(state, loc) for (mint,maxt) in zip(np.arange(times.min()-period/2,times.max()+period/2,period), np.arange(times.min()+period/2,times.max()+3*period/2,period)): ff = np.array([prop[i][immaxcol] for i in range(len(prop))]) mm = ff[np.where( (times >= mint) & (times < maxt) )] if mm: ffm.append(mm.max()) ffm.sort() logger.info( % len(ffm)) if pkllist.index(pklfile) == 0: duration0 = times.max() - times.min() ratemin = 1/duration0 ratemax = len(ffm)/duration0 rates = np.linspace(ratemin, ratemax, nbins) f0m = ffm elif pkllist.index(pklfile) == 1: duration1 = times.max() - times.min() f1m = ffm elif pkllist.index(pklfile) == 2: f2m = ffm elif pkllist.index(pklfile) == 3: f3m = ffm f0 = []; f1 = []; f2 = []; f3 = [] for rr in rates: num0 = (np.round(rr*duration0)).astype(int) num1 = (np.round(rr*duration1)).astype(int) if (num0 > 0) and (num0 <= len(f0m)): f0.append((rr,f0m[-num0])) if (num1 > 0) and (num1 <= len(f1m)): f1.append((rr,f1m[-num1])) if (num1 > 0) and (num1 <= len(f2m)): f2.append((rr,f2m[-num1])) if len(pkllist) == 4: if f3m: if (num1 > 0) and (num1 <= len(f3m)): f3.append((rr,f3m[-num1])) if f3: return {0: np.array(f0).transpose(), 1: np.array(f1).transpose(), 2: np.array(f2).transpose(), 3: np.array(f3).transpose()} else: return {0: np.array(f0).transpose(), 1: np.array(f1).transpose(), 2: np.array(f2).transpose()}
Visualize cands in set of pkl files from pulsar observations. Input pkl list assumed to start with on-axis pulsar scan, followed by off-axis scans. nbins for output histogram. period is pulsar period in seconds (used to find single peak for cluster of detections).
13,587
def Detect(self, baseline, host_data): result = CheckResult() for detector in self.detectors: finding = detector(baseline, host_data) if finding: result.ExtendAnomalies([finding]) if result: return result
Run host_data through detectors and return them if a detector triggers. Args: baseline: The base set of rdf values used to evaluate whether an issue exists. host_data: The rdf values passed back by the filters. Returns: A CheckResult message containing anomalies if any detectors identified an issue, None otherwise.
13,588
def get_instance(self, payload): return ConnectAppInstance(self._version, payload, account_sid=self._solution[], )
Build an instance of ConnectAppInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.connect_app.ConnectAppInstance :rtype: twilio.rest.api.v2010.account.connect_app.ConnectAppInstance
13,589
def dqdv_cycle(cycle, splitter=True, **kwargs): c_first = cycle.loc[cycle["direction"] == -1] c_last = cycle.loc[cycle["direction"] == 1] converter = Converter(**kwargs) converter.set_data(c_first["capacity"], c_first["voltage"]) converter.inspect_data() converter.pre_process_data() converter.increment_data() converter.post_process_data() voltage_first = converter.voltage_processed incremental_capacity_first = converter.incremental_capacity if splitter: voltage_first = np.append(voltage_first, np.NaN) incremental_capacity_first = np.append(incremental_capacity_first, np.NaN) converter = Converter(**kwargs) converter.set_data(c_last["capacity"], c_last["voltage"]) converter.inspect_data() converter.pre_process_data() converter.increment_data() converter.post_process_data() voltage_last = converter.voltage_processed[::-1] incremental_capacity_last = converter.incremental_capacity[::-1] voltage = np.concatenate((voltage_first, voltage_last)) incremental_capacity = np.concatenate((incremental_capacity_first, incremental_capacity_last)) return voltage, incremental_capacity
Convenience functions for creating dq-dv data from given capacity and voltage cycle. Returns the a DataFrame with a 'voltage' and a 'incremental_capacity' column. Args: cycle (pandas.DataFrame): the cycle data ('voltage', 'capacity', 'direction' (1 or -1)). splitter (bool): insert a np.NaN row between charge and discharge. Returns: List of step numbers corresponding to the selected steptype. Returns a pandas.DataFrame instead of a list if pdtype is set to True. Example: >>> cycle_df = my_data.get_cap( >>> ... 1, >>> ... categorical_column=True, >>> ... method = "forth-and-forth" >>> ... ) >>> voltage, incremental = ica.dqdv_cycle(cycle_df)
13,590
def deleteRole(self, *args, **kwargs): return self._makeApiCall(self.funcinfo["deleteRole"], *args, **kwargs)
Delete Role Delete a role. This operation will succeed regardless of whether or not the role exists. This method is ``stable``
13,591
def filter(self, source_file, encoding): with codecs.open(source_file, , encoding=encoding) as f: text = f.read() return [filters.SourceText(self._filter(text), source_file, encoding, )]
Parse file.
13,592
def is_all_field_none(self): if self._color is not None: return False if self._alias is not None: return False if self._description is not None: return False if self._attachment is not None: return False if self._pointer is not None: return False if self._status is not None: return False if self._redirect_url is not None: return False return True
:rtype: bool
13,593
def add(self, data_source, module, package=None): super(Data, self).add(data_source, module, package) if data_source not in self.layer: self.layer[data_source] = {: module, : package} self.objects[data_source] = None
Add data_source to model. Tries to import module, then looks for data source class definition. :param data_source: Name of data source to add. :type data_source: str :param module: Module in which data source resides. Can be absolute or relative. See :func:`importlib.import_module` :type module: str :param package: Optional, but must be used if module is relative. :type package: str .. seealso:: :func:`importlib.import_module`
13,594
def clone(self, callable=None, **overrides): old = {k: v for k, v in self.get_param_values() if k not in [, ]} params = dict(old, **overrides) callable = self.callable if callable is None else callable return self.__class__(callable, **params)
Clones the Callable optionally with new settings Args: callable: New callable function to wrap **overrides: Parameter overrides to apply Returns: Cloned Callable object
13,595
def do_hit(self, arg): if arg[]: self.hit_create(arg[], arg[], arg[]) self.update_hit_tally() elif arg[]: self.amt_services_wrapper.hit_extend(arg[], arg[], arg[]) elif arg[]: self.amt_services_wrapper.hit_expire(arg[], arg[]) self.update_hit_tally() elif arg[] or arg[]: self.amt_services_wrapper.hit_delete(arg[], arg[]) self.update_hit_tally() elif arg[]: self.hit_list(arg[], arg[], arg[]) else: self.help_hit()
Usage: hit create [<numWorkers> <reward> <duration>] hit extend <HITid> [(--assignments <number>)] [(--expiration <minutes>)] hit expire (--all | <HITid> ...) hit dispose (--all | <HITid> ...) hit delete (--all | <HITid> ...) hit list [--active | --reviewable] [--all-studies] hit help
13,596
def single(self, predicate): result = self.where(predicate).to_list() count = len(result) if count == 0: raise NoMatchingElement("No matching element found") if count > 1: raise MoreThanOneMatchingElement( "More than one matching element found. Use where instead" ) return result[0]
Returns single element that matches given predicate. Raises: * NoMatchingElement error if no matching elements are found * MoreThanOneMatchingElement error if more than one matching element is found :param predicate: predicate as a lambda expression :return: Matching element as object
13,597
def cmd(send, msg, args): uid = args[][][] token = args[][][] parser = arguments.ArgParser(args[]) parser.add_argument(, action=arguments.ZipParser) try: cmdargs = parser.parse_args(msg) except arguments.ArgumentException as e: send(str(e)) return req = get("http://www.stands4.com/services/v2/zip.php", params={: uid, : token, : cmdargs.zipcode}) xml = etree.fromstring(req.content, parser=etree.XMLParser(recover=True)) location = xml.find().text send("%s: %s" % (cmdargs.zipcode, location))
Gets the location of a ZIP code Syntax: {command} (zipcode) Powered by STANDS4, www.stands4.com
13,598
def sendCommands(comPort, commands): mutex.acquire() try: try: port = serial.Serial(port=comPort) header = footer = for command in _translateCommands(commands): _sendBinaryData(port, header + command + footer) except serial.SerialException: print( % comPort) print() raise finally: mutex.release()
Send X10 commands using the FireCracker on comPort comPort should be the name of a serial port on the host platform. On Windows, for example, 'com1'. commands should be a string consisting of X10 commands separated by commas. For example. 'A1 On, A Dim, A Dim, A Dim, A Lamps Off'. The letter is a house code (A-P) and the number is the device number (1-16). Possible commands for a house code / device number combination are 'On' and 'Off'. The commands 'Bright' and 'Dim' should be used with a house code alone after sending an On command to a specific device. The 'All On', 'All Off', 'Lamps On', and 'Lamps Off' commands should also be used with a house code alone. # Turn on module A1 >>> sendCommands('com1', 'A1 On') # Turn all modules with house code A off >>> sendCommands('com1', 'A All Off') # Turn all lamp modules with house code B on >>> sendCommands('com1', 'B Lamps On') # Turn on module A1 and dim it 3 steps, then brighten it 1 step >>> sendCommands('com1', 'A1 On, A Dim, A Dim, A Dim, A Bright')
13,599
def monitor(self, name, cb, request=None, notify_disconnect=False, queue=None): R = Subscription(self, name, cb, notify_disconnect=notify_disconnect, queue=queue) R._S = super(Context, self).monitor(name, R._event, request) return R
Create a subscription. :param str name: PV name string :param callable cb: Processing callback :param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default. :param bool notify_disconnect: In additional to Values, the callback may also be call with instances of Exception. Specifically: Disconnected , RemoteError, or Cancelled :param WorkQueue queue: A work queue through which monitor callbacks are dispatched. :returns: a :py:class:`Subscription` instance The callable will be invoked with one argument which is either. * A p4p.Value (Subject to :py:ref:`unwrap`) * A sub-class of Exception (Disconnected , RemoteError, or Cancelled)