Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
9,800
def pair(args): p = OptionParser(pair.__doc__) p.set_sep(sep=None, help="Separator in name to reduce to clone id" +\ "e.g. GFNQ33242/1 use /, BOT01-2453H.b1 use .") p.add_option("-m", dest="matepairs", default=False, action="store_true", help="generate .matepairs file [often used for Celera Assembler]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(p.print_help()) fastafile, = args qualfile = get_qual(fastafile) prefix = fastafile.rsplit(".", 1)[0] pairsfile = prefix + ".pairs.fasta" fragsfile = prefix + ".frags.fasta" pairsfw = open(pairsfile, "w") fragsfw = open(fragsfile, "w") if opts.matepairs: matepairsfile = prefix + ".matepairs" matepairsfw = open(matepairsfile, "w") if qualfile: pairsqualfile = pairsfile + ".qual" pairsqualhandle = open(pairsqualfile, "w") fragsqualfile = fragsfile + ".qual" fragsqualhandle = open(fragsqualfile, "w") f = Fasta(fastafile) if qualfile: q = SeqIO.index(qualfile, "qual") all_keys = list(f.keys()) all_keys.sort() sep = opts.sep if sep: key_fun = lambda x: x.split(sep, 1)[0] else: key_fun = lambda x: x[:-1] for key, variants in groupby(all_keys, key=key_fun): variants = list(variants) paired = (len(variants) == 2) if paired and opts.matepairs: print("\t".join(("%s/1" % key, "%s/2" % key)), file=matepairsfw) fw = pairsfw if paired else fragsfw if qualfile: qualfw = pairsqualhandle if paired else fragsqualhandle for i, var in enumerate(variants): rec = f[var] if qualfile: recqual = q[var] newid = "%s/%d" % (key, i + 1) rec.id = newid rec.description = "" SeqIO.write([rec], fw, "fasta") if qualfile: recqual.id = newid recqual.description = "" SeqIO.write([recqual], qualfw, "qual") logging.debug("sequences written to `%s` and `%s`" % \ (pairsfile, fragsfile)) if opts.matepairs: logging.debug("mates written to `%s`" % matepairsfile)
%prog pair fastafile Generate .pairs.fasta and .fragments.fasta by matching records into the pairs and the rest go to fragments.
9,801
def char_range(starting_char, ending_char): assert isinstance(starting_char, str), assert isinstance(ending_char, str), for char in range(ord(starting_char), ord(ending_char) + 1): yield chr(char)
Create a range generator for chars
9,802
def _cleanup(self): for channel in self._open_channels: try: self.disconnect_channel(channel) except Exception: pass for handler in self._handlers.values(): try: handler.tear_down() except Exception: pass try: self.socket.close() except Exception: self.logger.exception( "[%s:%s] _cleanup", self.fn or self.host, self.port) self._report_connection_status( ConnectionStatus(CONNECTION_STATUS_DISCONNECTED, NetworkAddress(self.host, self.port))) self.connecting = True
Cleanup open channels and handlers
9,803
def trimSegments(self, minPermanence=None, minNumSyns=None): if minPermanence is None: minPermanence = self.connectedPerm if minNumSyns is None: minNumSyns = self.activationThreshold totalSegsRemoved, totalSynsRemoved = 0, 0 for c,i in product(xrange(self.numberOfCols), xrange(self.cellsPerColumn)): (segsRemoved, synsRemoved) = self.trimSegmentsInCell(colIdx=c, cellIdx=i, segList=self.cells[c][i], minPermanence=minPermanence, minNumSyns=minNumSyns) totalSegsRemoved += segsRemoved totalSynsRemoved += synsRemoved return totalSegsRemoved, totalSynsRemoved
This method deletes all synapses whose permanence is less than minPermanence and deletes any segments that have less than minNumSyns synapses remaining. Parameters: -------------------------------------------------------------- minPermanence: Any syn whose permamence is 0 or < minPermanence will be deleted. If None is passed in, then self.connectedPerm is used. minNumSyns: Any segment with less than minNumSyns synapses remaining in it will be deleted. If None is passed in, then self.activationThreshold is used. retval: (numSegsRemoved, numSynsRemoved)
9,804
def map_pixel(self, point_x, point_y): row, col = map_pixel(point_x, point_y, self.x_cell_size, self.y_cell_size, self.xmin, self.ymax) try: return self.raster[row, col] except: raise RasterGeoError()
geo.map_pixel(point_x, point_y) Return value of raster in location Note: (point_x, point_y) must belong to the geographic coordinate system and the coverage of the raster
9,805
def sizeClassifier(path, min_size=DEFAULTS[]): filestat = _stat(path) if stat.S_ISLNK(filestat.st_mode): return if filestat.st_size < min_size: return return filestat.st_size
Sort a file into a group based on on-disk size. :param paths: See :func:`fastdupes.groupify` :param min_size: Files smaller than this size (in bytes) will be ignored. :type min_size: :class:`__builtins__.int` :returns: See :func:`fastdupes.groupify` .. todo:: Rework the calling of :func:`~os.stat` to minimize the number of calls. It's a fairly significant percentage of the time taken according to the profiler.
9,806
def to_dict(mapreduce_yaml): all_configs = [] for config in mapreduce_yaml.mapreduce: out = { "name": config.name, "mapper_input_reader": config.mapper.input_reader, "mapper_handler": config.mapper.handler, } if config.mapper.params_validator: out["mapper_params_validator"] = config.mapper.params_validator if config.mapper.params: param_defaults = {} for param in config.mapper.params: param_defaults[param.name] = param.default or param.value out["mapper_params"] = param_defaults if config.params: param_defaults = {} for param in config.params: param_defaults[param.name] = param.default or param.value out["params"] = param_defaults if config.mapper.output_writer: out["mapper_output_writer"] = config.mapper.output_writer all_configs.append(out) return all_configs
Converts a MapReduceYaml file into a JSON-encodable dictionary. For use in user-visible UI and internal methods for interfacing with user code (like param validation). as a list Args: mapreduce_yaml: The Pyton representation of the mapreduce.yaml document. Returns: A list of configuration dictionaries.
9,807
def xml_to_metrics(xmlstr, object_type): listeners.active\2014-10-02T00:00:00Z\ xmldoc = minidom.parseString(xmlstr) return_obj = object_type() members = dict(vars(return_obj)) for xml_entry in _MinidomXmlToObject.get_children_from_path(xmldoc, ): for node in _MinidomXmlToObject.get_children_from_path(xml_entry, , ): for name in members: xml_name = _get_serialization_name(name) children = _MinidomXmlToObject.get_child_nodes(node, xml_name) if not children: continue child = children[0] node_type = child.getAttributeNS("http://schemas.microsoft.com/ado/2007/08/dataservices/metadata", ) node_value = _ServiceBusManagementXmlSerializer.odata_converter(child.firstChild.nodeValue, node_type) setattr(return_obj, name, node_value) for name, value in _MinidomXmlToObject.get_entry_properties_from_node( xml_entry, include_id=True, use_title_as_id=False).items(): if name in members: continue setattr(return_obj, name, value) return return_obj
Converts xml response to service bus metrics objects The xml format for MetricProperties <entry> <id>https://sbgm.windows.net/Metrics(\'listeners.active\')</id> <title/> <updated>2014-10-09T11:56:50Z</updated> <author> <name/> </author> <content type="application/xml"> <m:properties> <d:Name>listeners.active</d:Name> <d:PrimaryAggregation>Average</d:PrimaryAggregation> <d:Unit>Count</d:Unit> <d:DisplayName>Active listeners</d:DisplayName> </m:properties> </content> </entry> The xml format for MetricValues <entry> <id>https://sbgm.windows.net/MetricValues(datetime\'2014-10-02T00:00:00Z\')</id> <title/> <updated>2014-10-09T18:38:28Z</updated> <author> <name/> </author> <content type="application/xml"> <m:properties> <d:Timestamp m:type="Edm.DateTime">2014-10-02T00:00:00Z</d:Timestamp> <d:Min m:type="Edm.Int64">-118</d:Min> <d:Max m:type="Edm.Int64">15</d:Max> <d:Average m:type="Edm.Single">-78.44444</d:Average> <d:Total m:type="Edm.Int64">0</d:Total> </m:properties> </content> </entry>
9,808
def create(self, alert_config, occurrence_frequency_count=None, occurrence_frequency_unit=None, alert_frequency_count=None, alert_frequency_unit=None): data = { : occurrence_frequency_count or 1, : occurrence_frequency_unit or , : alert_frequency_count or 1, : alert_frequency_unit or , : [], : True, } data.update(alert_config.args()) return self._post( request=ApiActions.CREATE.value, uri=ApiUri.ACTIONS.value, params=data )
Create a new alert :param alert_config: A list of AlertConfig classes (Ex: ``[EmailAlertConfig('[email protected]')]``) :type alert_config: list of :class:`PagerDutyAlertConfig<logentries_api.alerts.PagerDutyAlertConfig>`, :class:`WebHookAlertConfig<logentries_api.alerts.WebHookAlertConfig>`, :class:`EmailAlertConfig<logentries_api.alerts.EmailAlertConfig>`, :class:`SlackAlertConfig<logentries_api.alerts.SlackAlertConfig>`, or :class:`HipChatAlertConfig<logentries_api.alerts.HipChatAlertConfig>` :param occurrence_frequency_count: How many times per ``alert_frequency_unit`` for a match before issuing an alert. Defaults to 1 :type occurrence_frequency_count: int :param occurrence_frequency_unit: The time period to monitor for sending an alert. Must be 'day', or 'hour'. Defaults to 'hour' :type occurrence_frequency_unit: str :param alert_frequency_count: How many times per ``alert_frequency_unit`` to issue an alert. Defaults to 1 :type alert_frequency_count: int :param alert_frequency_unit: How often to regulate sending alerts. Must be 'day', or 'hour'. Defaults to 'hour' :type alert_frequency_unit: str :returns: The response of your post :rtype: dict :raises: This will raise a :class:`ServerException<logentries_api.exceptions.ServerException>` if there is an error from Logentries
9,809
def save(): from .models import ModuleInfo logger = logging.getLogger(__name__) logger.info("Saving changes") for module in modules(): if module.enabled: if module.changed: module.save() module.restart() module.commit() else: logger.debug( % module.verbose_name) else: logger.debug( % module.verbose_name) ModuleInfo.commit() logger.info("Changes saved")
Apply configuration changes on all the modules
9,810
def work(self): import time for val in range(200): time.sleep(0.1) self.sigUpdate.emit(val + 1)
Use a blocking <sleep> call to periodically trigger a signal.
9,811
def transform(self, data): out=[] keys = sorted(data.keys()) for k in keys: out.append("%s=%s" % (k, data[k])) return "\n".join(out)
:param data: :type data: dict :return: :rtype:
9,812
def next(self, fetch: bool = False, next_symbol: _NextSymbol = DEFAULT_NEXT_SYMBOL) -> _Next: def get_next(): candidates = self.find(, containing=next_symbol) for candidate in candidates: if candidate.attrs.get(): if in candidate.attrs.get(, []): return candidate.attrs[] for _class in candidate.attrs.get(, []): if in _class: return candidate.attrs[] if in candidate.attrs[]: return candidate.attrs[] try: return candidates[-1].attrs[] except IndexError: return None __next = get_next() if __next: url = self._make_absolute(__next) else: return None if fetch: return self.session.get(url) else: return url
Attempts to find the next page, if there is one. If ``fetch`` is ``True`` (default), returns :class:`HTML <HTML>` object of next page. If ``fetch`` is ``False``, simply returns the next URL.
9,813
def run(self, *args): params = self.parser.parse_args(args) with params.outfile as outfile: if params.identities: code = self.export_identities(outfile, params.source) elif params.orgs: code = self.export_organizations(outfile) else: raise RuntimeError("Unexpected export option") return code
Export data from the registry. By default, it writes the data to the standard output. If a positional argument is given, it will write the data on that file.
9,814
def forward_remote( self, remote_port, local_port=None, remote_host="127.0.0.1", local_host="localhost", ): if not local_port: local_port = remote_port tunnels = [] def callback(channel, src_addr_tup, dst_addr_tup): sock = socket.socket() sock.connect((local_host, local_port)) for tunnel in tunnels: tunnel.finished.set() tunnel.join() self.transport.cancel_port_forward( address=remote_host, port=remote_port )
Open a tunnel connecting ``remote_port`` to the local environment. For example, say you're running a daemon in development mode on your workstation at port 8080, and want to funnel traffic to it from a production or staging environment. In most situations this isn't possible as your office/home network probably blocks inbound traffic. But you have SSH access to this server, so you can temporarily make port 8080 on that server act like port 8080 on your workstation:: from fabric import Connection c = Connection('my-remote-server') with c.forward_remote(8080): c.run("remote-data-writer --port 8080") # Assuming remote-data-writer runs until interrupted, this will # stay open until you Ctrl-C... This method is analogous to using the ``-R`` option of OpenSSH's ``ssh`` program. :param int remote_port: The remote port number on which to listen. :param int local_port: The local port number. Defaults to the same value as ``remote_port``. :param str local_host: The local hostname/interface the forwarded connection talks to. Default: ``localhost``. :param str remote_host: The remote interface address to listen on when forwarding connections. Default: ``127.0.0.1`` (i.e. only listen on the remote localhost). :returns: Nothing; this method is only useful as a context manager affecting local operating system state. .. versionadded:: 2.0
9,815
def _version_string(): platform_system = platform.system() if platform_system == : os_name, os_version, _ = platform.dist() else: os_name = platform_system os_version = platform.version() python_version = platform.python_version() return % (__version__, os_name.lower(), os_version, python_version)
Gets the output for `trytravis --version`.
9,816
def select_eep(self, rorg_func, rorg_type, direction=None, command=None): self.rorg_func = rorg_func self.rorg_type = rorg_type self._profile = self.eep.find_profile(self._bit_data, self.rorg, rorg_func, rorg_type, direction, command) return self._profile is not None
Set EEP based on FUNC and TYPE
9,817
def project_invite(object_id, input_params={}, always_retry=False, **kwargs): return DXHTTPRequest( % object_id, input_params, always_retry=always_retry, **kwargs)
Invokes the /project-xxxx/invite API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Project-Permissions-and-Sharing#API-method%3A-%2Fproject-xxxx%2Finvite
9,818
def update_many(self, **kwargs): db_objects = self.get_dbcollection_with_es(**kwargs) return self.Model._update_many( db_objects, self._json_params, self.request)
Update multiple objects from collection. First ES is queried, then the results are used to query DB. This is done to make sure updated objects are those filtered by ES in the 'index' method (so user updates what he saw).
9,819
def from_triple(cls, triple): with ffi.OutputString() as outerr: target = ffi.lib.LLVMPY_GetTargetFromTriple(triple.encode(), outerr) if not target: raise RuntimeError(str(outerr)) target = cls(target) target._triple = triple return target
Create a Target instance for the given triple (a string).
9,820
def table_exists(self, table): if not self.dataset_exists(table.dataset): return False try: self.client.tables().get(projectId=table.project_id, datasetId=table.dataset_id, tableId=table.table_id).execute() except http.HttpError as ex: if ex.resp.status == 404: return False raise return True
Returns whether the given table exists. :param table: :type table: BQTable
9,821
def read_probes(self, key = None): print((, key, self._PROBES())) if key is None: d = {} for k in list(self._PROBES.keys()): d[k] = self.read_probes(k) return d else: assert key in list(self._PROBES.keys()) value = None return value
function is overloaded: - read_probes() - read_probes(key) Args: key: name of requested value Returns: - if called without argument: returns the values of all probes in dictionary form - if called with argument: returns the value the requested key
9,822
def _check_restart_params(self, restart_strategy, min_beta, s_greedy, xi_restart): r if restart_strategy is None: return True if self.mode != : raise ValueError( ) greedy_params_check = (min_beta is None or s_greedy is None or s_greedy <= 1) if restart_strategy == and greedy_params_check: raise ValueError( ) if xi_restart is None or xi_restart >= 1: raise ValueError() return True
r""" Check restarting parameters This method checks that the restarting parameters are set and satisfy the correct assumptions. It also checks that the current mode is regular (as opposed to CD for now). Parameters ---------- restart_strategy: str or None name of the restarting strategy. If None, there is no restarting. Defaults to None. min_beta: float or None the minimum beta when using the greedy restarting strategy. Defaults to None. s_greedy: float or None. parameter for the safeguard comparison in the greedy restarting strategy. It has to be > 1. Defaults to None. xi_restart: float or None. mutlitplicative parameter for the update of beta in the greedy restarting strategy and for the update of r_lazy in the adaptive restarting strategies. It has to be > 1. Defaults to None. Returns ------- bool: True Raises ------ ValueError When a parameter that should be set isn't or doesn't verify the correct assumptions.
9,823
def _noneload(l: Loader, value, type_) -> None: if value is None: return None raise TypedloadValueError(, value=value, type_=type_)
Loads a value that can only be None, so it fails if it isn't
9,824
async def close_async(self, context, reason): logger.info("Connection closed (reason {}, id {}, offset {}, sq_number {})".format( reason, context.partition_id, context.offset, context.sequence_number))
Called by processor host to indicate that the event processor is being stopped. :param context: Information about the partition :type context: ~azure.eventprocessorhost.PartitionContext
9,825
def attributes(self) -> Sequence[bytes]: ret: List[bytes] = [] if not self.exists: ret.append(b) if self.has_children: ret.append(b) else: ret.append(b) if self.marked is True: ret.append(b) elif self.marked is False: ret.append(b) return ret
The mailbox attributes that should be returned with the mailbox in a ``LIST`` response, e.g. ``\\Noselect``. See Also: `RFC 3348 <https://tools.ietf.org/html/rfc3348>`_
9,826
def activate(self, *, filter_func=None): if self.active: raise RuntimeError("Type safety check already active") self.__module_finder = ModuleFinder(Validator.decorate) if filter_func is not None: self.__module_finder.set_filter(filter_func) self.__module_finder.install()
Activate the type safety checker. After the call all functions that need to be checked will be.
9,827
def __init(self): params = { "f" : "json" } json_dict = self._get(url=self._url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port) attributes = [attr for attr in dir(self) if not attr.startswith() and \ not attr.startswith()] for k,v in json_dict.items(): if k in attributes: setattr(self, "_"+ k, json_dict[k]) else: print( k, " - attribute not implemented in RouteNetworkLayer.") del k,v
initializes all the properties
9,828
def is_reached(self, uid=None): if self.reached_limit: return True if uid: if uid in self.seen: return False self.count += 1 self.seen.add(uid) else: self.count += 1 if self.count > self.limit: if self.warning: self.warning( "Check {} exceeded limit of {} {}, ignoring next ones".format( self.check_name, self.limit, self.name ) ) self.reached_limit = True return True return False
is_reached is to be called for every object that counts towards the limit. - When called with no uid, the Limiter assumes this is a new object and unconditionally increments the counter (less CPU and memory usage). - When a given object can be passed multiple times, a uid must be provided to deduplicate calls. Only the first occurrence of a uid will increment the counter. :param uid: (optional) unique identifier of the object, to deduplicate calls :returns: boolean, true if limit exceeded
9,829
def PhenomModel(self, r): if r <= 0: raise ValueError field = self.B0 + self.B1 * G4.m / r + self.B2 * math.exp(-1 * self.H * r / G4.m) return field
Fit to field map A phenomenological fit by Ryan Bayes (Glasgow) to a field map generated by Bob Wands (FNAL). It assumes a 1 cm plate. This is dated January 30th, 2012. Not defined for r <= 0
9,830
def plot(x, fmt=, marker=None, markers=None, linestyle=None, linestyles=None, color=None, colors=None, palette=, group=None, hue=None, labels=None, legend=None, title=None, size=None, elev=10, azim=-60, ndims=3, model=None, model_params=None, reduce=, cluster=None, align=None, normalize=None, n_clusters=None, save_path=None, animate=False, duration=30, tail_duration=2, rotations=2, zoom=1, chemtrails=False, precog=False, bullettime=False, frame_rate=50, explore=False, show=True, transform=None, vectorizer=, semantic=, corpus=, ax=None): if (model is not None) or (model_params is not None): warnings.warn() reduce = {} reduce[] = model reduce[] = model_params if group is not None: warnings.warn( ) hue = group if ax is not None: if ndims>2: if ax.name!=: raise ValueError( ) text_args = { : vectorizer, : semantic, : corpus } if transform is None: raw = format_data(x, **text_args) xform = analyze(raw, ndims=ndims, normalize=normalize, reduce=reduce, align=align, internal=True) else: xform = transform xform_data = copy.copy(xform) mpl_kwargs = {} if color is not None: mpl_kwargs[] = color if colors is not None: mpl_kwargs[] = colors warnings.warn() if linestyle is not None: mpl_kwargs[] = linestyle if linestyles is not None: mpl_kwargs[] = linestyles warnings.warn() if marker is not None: mpl_kwargs[] = marker if markers is not None: mpl_kwargs[] = markers warnings.warn() if (ndims and ndims < 3): xform = reducer(xform, ndims=ndims, reduce=reduce, internal=True) else: xform = reducer(xform, ndims=3, reduce=reduce, internal=True) if cluster is not None: if hue is not None: warnings.warn() if isinstance(cluster, (six.string_types, six.binary_type)): model = cluster params = default_params(model) elif isinstance(cluster, dict): model = cluster[] params = default_params(model, cluster[]) else: raise ValueError( ) if n_clusters is not None: if cluster in (,): warnings.warn( ) else: params[] = n_clusters cluster_labels = clusterer(xform, cluster={: model, : params}) xform, labels = reshape_data(xform, cluster_labels, labels) hue = cluster_labels elif n_clusters is not None: cluster_labels = clusterer(xform, cluster=, n_clusters=n_clusters) xform, labels = reshape_data(xform, cluster_labels, labels) if hue is not None: warnings.warn() elif hue is not None: if color is not None: warnings.warn("Using group, color keyword will be ignored.") if any(isinstance(el, list) for el in hue): hue = list(itertools.chain(*hue)) if all(isinstance(el, int) or isinstance(el, float) for el in hue): hue = vals2bins(hue) elif all(isinstance(el, str) for el in hue): hue = group_by_category(hue) if n_clusters is None: xform, labels = reshape_data(xform, hue, labels) if is_line(fmt): xform = patch_lines(xform) if legend is not None: if legend is False: legend = None elif legend is True and hue is not None: legend = [item for item in sorted(set(hue), key=list(hue).index)] elif legend is True and hue is None: legend = [i + 1 for i in range(len(xform))] mpl_kwargs[] = legend if fmt is None or isinstance(fmt, six.string_types): if is_line(fmt): if xform[0].shape[0] > 1: xform = interp_array_list(xform, interp_val=frame_rate*duration/(xform[0].shape[0] - 1)) elif type(fmt) is list: for idx, xi in enumerate(xform): if is_line(fmt[idx]): if xi.shape[0] > 1: xform[idx] = interp_array_list(xi, interp_val=frame_rate*duration/(xi.shape[0] - 1)) if explore: assert xform[0].shape[1] is 3, "Explore mode is currently only supported for 3D plots." mpl_kwargs[]=True xform = center(xform) xform = scale(xform) if isinstance(palette, np.bytes_): palette = palette.decode("utf-8") sns.set_palette(palette=palette, n_colors=len(xform)) sns.set_style(style=) kwargs_list = parse_kwargs(xform, mpl_kwargs) if fmt is not None: if type(fmt) is not list: draw_fmt = [fmt for i in xform] else: draw_fmt = fmt else: draw_fmt = []*len(x) for i, xi in enumerate(xform): xform[i] = np.nan_to_num(xi) fig, ax, data, line_ani = _draw(xform, fmt=draw_fmt, kwargs_list=kwargs_list, labels=labels, legend=legend, title=title, animate=animate, duration=duration, tail_duration=tail_duration, rotations=rotations, zoom=zoom, chemtrails=chemtrails, precog=precog, bullettime=bullettime, frame_rate=frame_rate, elev=elev, azim=azim, explore=explore, show=show, size=size, ax=ax) plt.tight_layout() if save_path is not None: if animate: Writer = animation.writers[] writer = Writer(fps=frame_rate, bitrate=1800) line_ani.save(save_path, writer=writer) else: plt.savefig(save_path) if show: plt.show() else: for kwarg in kwargs: if isinstance(kwargs[kwarg], list): try: kwargs[kwarg]=np.array(kwargs[kwarg]) except: warnings.warn( ) return DataGeometry(fig=fig, ax=ax, data=x, xform_data=xform_data, line_ani=line_ani, reduce=reduce_dict, align=align_dict, normalize=normalize, semantic=semantic, vectorizer=vectorizer, corpus=corpus, kwargs=kwargs)
Plots dimensionality reduced data and parses plot arguments Parameters ---------- x : Numpy array, DataFrame, String, Geo or mixed list Data for the plot. The form should be samples (rows) by features (cols). fmt : str or list of strings A list of format strings. All matplotlib format strings are supported. linestyle(s) : str or list of str A list of line styles marker(s) : str or list of str A list of marker types color(s) : str or list of str A list of marker types palette : str A matplotlib or seaborn color palette group : str/int/float or list A list of group labels. Length must match the number of rows in your dataset. If the data type is numerical, the values will be mapped to rgb values in the specified palette. If the data type is strings, the points will be labeled categorically. To label a subset of points, use None (i.e. ['a', None, 'b','a']). labels : list A list of labels for each point. Must be dimensionality of data (x). If no label is wanted for a particular point, input None. legend : list or bool If set to True, legend is implicitly computed from data. Passing a list will add string labels to the legend (one for each list item). title : str A title for the plot size : list A list of [width, height] in inches to resize the figure normalize : str or False If set to 'across', the columns of the input data will be z-scored across lists (default). If set to 'within', the columns will be z-scored within each list that is passed. If set to 'row', each row of the input data will be z-scored. If set to False, the input data will be returned (default is False). reduce : str or dict Decomposition/manifold learning model to use. Models supported: PCA, IncrementalPCA, SparsePCA, MiniBatchSparsePCA, KernelPCA, FastICA, FactorAnalysis, TruncatedSVD, DictionaryLearning, MiniBatchDictionaryLearning, TSNE, Isomap, SpectralEmbedding, LocallyLinearEmbedding, and MDS. Can be passed as a string, but for finer control of the model parameters, pass as a dictionary, e.g. reduce={'model' : 'PCA', 'params' : {'whiten' : True}}. See scikit-learn specific model docs for details on parameters supported for each model. ndims : int An `int` representing the number of dims to reduce the data x to. If ndims > 3, will plot in 3 dimensions but return the higher dimensional data. Default is None, which will plot data in 3 dimensions and return the data with the same number of dimensions possibly normalized and/or aligned according to normalize/align kwargs. align : str or dict or False/None If str, either 'hyper' or 'SRM'. If 'hyper', alignment algorithm will be hyperalignment. If 'SRM', alignment algorithm will be shared response model. You can also pass a dictionary for finer control, where the 'model' key is a string that specifies the model and the params key is a dictionary of parameter values (default : 'hyper'). cluster : str or dict or False/None If cluster is passed, HyperTools will perform clustering using the specified clustering clustering model. Supportted algorithms are: KMeans, MiniBatchKMeans, AgglomerativeClustering, Birch, FeatureAgglomeration, SpectralClustering and HDBSCAN (default: None). Can be passed as a string, but for finer control of the model parameters, pass as a dictionary, e.g. reduce={'model' : 'KMeans', 'params' : {'max_iter' : 100}}. See scikit-learn specific model docs for details on parameters supported for each model. If no parameters are specified in the string a default set of parameters will be used. n_clusters : int If n_clusters is passed, HyperTools will perform k-means clustering with the k parameter set to n_clusters. The resulting clusters will be plotted in different colors according to the color palette. save_path : str Path to save the image/movie. Must include the file extension in the save path (i.e. save_path='/path/to/file/image.png'). NOTE: If saving an animation, FFMPEG must be installed (this is a matplotlib req). FFMPEG can be easily installed on a mac via homebrew brew install ffmpeg or linux via apt-get apt-get install ffmpeg. If you don't have homebrew (mac only), you can install it like this: /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)". animate : bool, 'parallel' or 'spin' If True or 'parallel', plots the data as an animated trajectory, with each dataset plotted simultaneously. If 'spin', all the data is plotted at once but the camera spins around the plot (default: False). duration (animation only) : float Length of the animation in seconds (default: 30 seconds) tail_duration (animation only) : float Sets the length of the tail of the data (default: 2 seconds) rotations (animation only) : float Number of rotations around the box (default: 2) zoom (animation only) : float How far to zoom into the plot, positive numbers will zoom in (default: 0) chemtrails (animation only) : bool A low-opacity trail is left behind the trajectory (default: False). precog (animation only) : bool A low-opacity trail is plotted ahead of the trajectory (default: False). bullettime (animation only) : bool A low-opacity trail is plotted ahead and behind the trajectory (default: False). frame_rate (animation only) : int or float Frame rate for animation (default: 50) explore : bool Displays user defined labels will appear on hover. If no labels are passed, the point index and coordinate will be plotted. To use, set explore=True. Note: Explore mode is currently only supported for 3D static plots, and is an experimental feature (i.e it may not yet work properly). show : bool If set to False, the figure will not be displayed, but the figure, axis and data objects will still be returned (default: True). transform : list of numpy arrays or None The transformed data, bypasses transformations if this is set (default : None). vectorizer : str, dict, class or class instance The vectorizer to use. Built-in options are 'CountVectorizer' or 'TfidfVectorizer'. To change default parameters, set to a dictionary e.g. {'model' : 'CountVectorizer', 'params' : {'max_features' : 10}}. See http://scikit-learn.org/stable/modules/classes.html#module-sklearn.feature_extraction.text for details. You can also specify your own vectorizer model as a class, or class instance. With either option, the class must have a fit_transform method (see here: http://scikit-learn.org/stable/data_transforms.html). If a class, pass any parameters as a dictionary to vectorizer_params. If a class instance, no parameters can be passed. semantic : str, dict, class or class instance Text model to use to transform text data. Built-in options are 'LatentDirichletAllocation' or 'NMF' (default: LDA). To change default parameters, set to a dictionary e.g. {'model' : 'NMF', 'params' : {'n_components' : 10}}. See http://scikit-learn.org/stable/modules/classes.html#module-sklearn.decomposition for details on the two model options. You can also specify your own text model as a class, or class instance. With either option, the class must have a fit_transform method (see here: http://scikit-learn.org/stable/data_transforms.html). If a class, pass any parameters as a dictionary to text_params. If a class instance, no parameters can be passed. corpus : list (or list of lists) of text samples or 'wiki', 'nips', 'sotus'. Text to use to fit the semantic model (optional). If set to 'wiki', 'nips' or 'sotus' and the default semantic and vectorizer models are used, a pretrained model will be loaded which can save a lot of time. ax : matplotlib.Axes Axis handle to plot the figure Returns ---------- geo : hypertools.DataGeometry A new data geometry object
9,831
def delete_group(self, group_id): url = self.TEAM_GROUPS_ID_URL % group_id connection = Connection(self.token) connection.set_url(self.production, url) return connection.delete_request()
Remove a group from your team :param group_id: Id of group
9,832
def ensure_chosen_alternatives_are_in_user_alt_ids(choice_col, wide_data, availability_vars): if not wide_data[choice_col].isin(availability_vars.keys()).all(): msg = "One or more values in wide_data[choice_col] is not in the user " msg_2 = "provided alternative ids in availability_vars.keys()" raise ValueError(msg + msg_2) return None
Ensures that all chosen alternatives in `wide_df` are present in the `availability_vars` dict. Raises a helpful ValueError if not. Parameters ---------- choice_col : str. Denotes the column in `wide_data` that contains a one if the alternative pertaining to the given row was the observed outcome for the observation pertaining to the given row and a zero otherwise. wide_data : pandas dataframe. Contains one row for each observation. Should contain the specified `choice_col` column. availability_vars : dict. There should be one key value pair for each alternative that is observed in the dataset. Each key should be the alternative id for the alternative, and the value should be the column heading in `wide_data` that denotes (using ones and zeros) whether an alternative is available/unavailable, respectively, for a given observation. Alternative id's, i.e. the keys, must be integers. Returns ------- None.
9,833
def labels(self): return tuple(_Label(label.get(), label.get(), label.text) for label in self.root.iter())
Tuple of labels.
9,834
def show(self): textWidth = max(60, shutil.get_terminal_size((80, 20)).columns) text = f + self.comment print(.join( textwrap.wrap( text, width=textWidth, initial_indent=, subsequent_indent= * 24))) local_parameters = { x: y for x, y in self.parameters.items() if x not in self.global_parameters } if local_parameters: print() for name, (value, comment) in local_parameters.items(): par_str = f print(par_str) if comment: print(.join( textwrap.wrap( comment, width=textWidth, initial_indent= * 24, subsequent_indent= * 24)))
Output for command sos show
9,835
def get(key, default=-1): if isinstance(key, int): return NotifyMessage(key) if key not in NotifyMessage._member_map_: extend_enum(NotifyMessage, key, default) return NotifyMessage[key]
Backport support for original codes.
9,836
def evaluate(self, node: InstanceNode) -> XPathValue: return self._eval(XPathContext(node, node, 1, 1))
Evaluate the receiver and return the result. Args: node: Context node for XPath evaluation. Raises: XPathTypeError: If a subexpression of the receiver is of a wrong type.
9,837
def xor(key, data): if type(key) is int: key = six.int2byte(key) key_len = len(key) return b.join( six.int2byte(c ^ six.indexbytes(key, i % key_len)) for i, c in enumerate(six.iterbytes(data)) )
Perform cyclical exclusive or operations on ``data``. The ``key`` can be a an integer *(0 <= key < 256)* or a byte sequence. If the key is smaller than the provided ``data``, the ``key`` will be repeated. Args: key(int or bytes): The key to xor ``data`` with. data(bytes): The data to perform the xor operation on. Returns: bytes: The result of the exclusive or operation. Examples: >>> from pwny import * >>> xor(5, b'ABCD') b'DGFA' >>> xor(5, b'DGFA') b'ABCD' >>> xor(b'pwny', b'ABCDEFGHIJKLMNOPQRSTUVWXYZ') b'15-=51)19=%5=9!)!%=-%!9!)-' >>> xor(b'pwny', b'15-=51)19=%5=9!)!%=-%!9!)-') b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
9,838
def _setattr_url_map(self): if self.apiopts.get(, True) is False: url_blacklist = [, , , ] else: url_blacklist = [] urls = ((url, cls) for url, cls in six.iteritems(self.url_map) if url not in url_blacklist) for url, cls in urls: setattr(self, url, cls())
Set an attribute on the local instance for each key/val in url_map CherryPy uses class attributes to resolve URLs.
9,839
def macro_network(): tpm = np.array([[0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 1.0, 1.0], [0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 1.0, 1.0], [0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 1.0, 1.0], [1.0, 1.0, 0.3, 0.3], [1.0, 1.0, 0.3, 0.3], [1.0, 1.0, 0.3, 0.3], [1.0, 1.0, 1.0, 1.0]]) return Network(tpm, node_labels=LABELS[:tpm.shape[1]])
A network of micro elements which has greater integrated information after coarse graining to a macro scale.
9,840
def screen_resolution(): w = 0 h = 0 try: import ctypes user32 = ctypes.windll.user32 w, h = user32.GetSystemMetrics(0), user32.GetSystemMetrics(1) except AttributeError: try: import AppKit size = AppKit.NSScreen.screens()[0].frame().size w, h = int(size.width), int(size.height) except ImportError: try: import Xlib import Xlib.display display = Xlib.display.Display() root = display.screen().root size = root.get_geometry() w, h = size.width, size.height except ImportError: w = 1920 h = 1080 return w, h
Returns the current screen's resolution. Should be multi-platform. :return: A tuple containing the width and height of the screen.
9,841
def add(self, source, email=None, name=None, username=None, uuid=None, matching=None, interactive=False): matcher = None if matching: try: blacklist = api.blacklist(self.db) matcher = create_identity_matcher(matching, blacklist) except MatcherNotSupportedError as e: self.error(str(e)) return e.code try: new_uuid = api.add_identity(self.db, source, email, name, username, uuid) uuid = uuid or new_uuid self.display(, id=new_uuid, uuid=uuid) if matcher: self.__merge_on_matching(uuid, matcher, interactive) except AlreadyExistsError as e: msg = "unique identity already exists in the registry" % e.eid self.error(msg) return e.code except (NotFoundError, InvalidValueError) as e: self.error(str(e)) return e.code return CMD_SUCCESS
Add an identity to the registry. This method adds a new identity to the registry. By default, a new unique identity will be also added an associated to the new identity. When <uuid> parameter is set, it only creates a new identity that will be associated to a unique identity defined by <uuid>. The method will print the uuids associated to the new registered identity. Optionally, this method can look for possible identities that match with the new one to insert. If a match is found, that means both identities are likely the same. Therefore, both identities would be merged into one. The algorithm used to search for matches will be defined by <matching> parameter. Please take into account that both unique identities will be always merged into the one from the registry, not into the new one. When <interactive> parameter is set to True, the user will have to confirm whether these to identities should be merged into one. By default, the method is set to False. :param source: data source :param email: email of the identity :param name: full name of the identity :param username: user name used by the identity :param uuid: associates the new identity to the unique identity identified by this id :param matching: type of matching used to merge existing identities :param interactive: interactive mode for merging identities, only available when <matching> parameter is set
9,842
def q(self, x, q0): y1_0 = q0 y0_0 = 0 y0 = [y0_0, y1_0] y = _sp.integrate.odeint(self._func, y0, x, Dfun=self._gradient, rtol=self.rtol, atol=self.atol) return y[:, 1]
Numerically solved trajectory function for initial conditons :math:`q(0) = q_0` and :math:`q'(0) = 0`.
9,843
def has_perm(self, user, perm, obj=None, *args, **kwargs): try: if not self._obj_ok(obj): if hasattr(obj, ): obj = obj.get_permissions_object(perm) else: raise InvalidPermissionObjectException return user.permset_tree.allow(Action(perm), obj) except ObjectDoesNotExist: return False
Test user permissions for a single action and object. :param user: The user to test. :type user: ``User`` :param perm: The action to test. :type perm: ``str`` :param obj: The object path to test. :type obj: ``tutelary.engine.Object`` :returns: ``bool`` -- is the action permitted?
9,844
def authenticate(self, name, password, mechanism="DEFAULT"): if not isinstance(name, (bytes, unicode)): raise TypeError("TxMongo: name must be an instance of basestring.") if not isinstance(password, (bytes, unicode)): raise TypeError("TxMongo: password must be an instance of basestring.") return self.connection.authenticate(self, name, password, mechanism)
Send an authentication command for this database. mostly stolen from pymongo
9,845
def p_new_expr(self, p): if len(p) == 2: p[0] = p[1] else: p[0] = ast.NewExpr(p[2])
new_expr : member_expr | NEW new_expr
9,846
def parse(query_string, unquote=True, normalized=False, encoding=DEFAULT_ENCODING): mydict = {} plist = [] if query_string == "": return mydict if type(query_string) == bytes: query_string = query_string.decode() for element in query_string.split("&"): try: if unquote: (var, val) = element.split("=") if sys.version_info[0] == 2: var = var.encode() val = val.encode() var = urllib.unquote_plus(var) val = urllib.unquote_plus(val) else: (var, val) = element.split("=") except ValueError: raise MalformedQueryStringError if encoding: var = var.decode(encoding) val = val.decode(encoding) plist.append(parser_helper(var, val)) for di in plist: (k, v) = di.popitem() tempdict = mydict while k in tempdict and type(v) is dict: tempdict = tempdict[k] (k, v) = v.popitem() if k in tempdict and type(tempdict[k]).__name__ == : tempdict[k].append(v) elif k in tempdict: tempdict[k] = [tempdict[k], v] else: tempdict[k] = v if normalized == True: return _normalize(mydict) return mydict
Main parse function @param query_string: @param unquote: unquote html query string ? @param encoding: An optional encoding used to decode the keys and values. Defaults to utf-8, which the W3C declares as a defaul in the W3C algorithm for encoding. @see http://www.w3.org/TR/html5/forms.html#application/x-www-form-urlencoded-encoding-algorithm @param normalized: parse number key in dict to proper list ?
9,847
def tuple_search(t, i, v): for e in t: if e[i] == v: return e return None
Search tuple array by index and value :param t: tuple array :param i: index of the value in each tuple :param v: value :return: the first tuple in the array with the specific index / value
9,848
def sites(c): docs_c["run"].hide = True www_c["run"].hide = True docs["build"](docs_c) www["build"](www_c) docs_c["run"].hide = False www_c["run"].hide = False docs["build"](docs_c, nitpick=True) www["build"](www_c, nitpick=True)
Build both doc sites w/ maxed nitpicking.
9,849
def reads(err_log): num_reads = 0 paired_reads = 0 with open(err_log, ) as error_log: for line in error_log: if in line: num_reads = line.split()[-1].rstrip() elif in line: paired_reads = line.split()[-2].rstrip() return num_reads, paired_reads
Parse the outputs from bbmerge to extract the total number of reads, as well as the number of reads that could be paired :param err_log: bbmerge outputs the stats in the error file :return: num_reads, the total number of reads, paired_reads, number of paired readds
9,850
def extract_to(self, *, stream=None, fileprefix=): if bool(stream) == bool(fileprefix): raise ValueError("Cannot set both stream and fileprefix") if stream: return self._extract_to_stream(stream=stream) bio = BytesIO() extension = self._extract_to_stream(stream=bio) bio.seek(0) filepath = Path(Path(fileprefix).name + extension) with filepath.open() as target: copyfileobj(bio, target) return str(filepath)
Attempt to extract the image directly to a usable image file If possible, the compressed data is extracted and inserted into a compressed image file format without transcoding the compressed content. If this is not possible, the data will be decompressed and extracted to an appropriate format. Because it is not known until attempted what image format will be extracted, users should not assume what format they are getting back. When saving the image to a file, use a temporary filename, and then rename the file to its final name based on the returned file extension. Examples: >>> im.extract_to(stream=bytes_io) '.png' >>> im.extract_to(fileprefix='/tmp/image00') '/tmp/image00.jpg' Args: stream: Writable stream to write data to. fileprefix (str or Path): The path to write the extracted image to, without the file extension. Returns: str: If *fileprefix* was provided, then the fileprefix with the appropriate extension. If no *fileprefix*, then an extension indicating the file type.
9,851
def DoubleClick(cls): element = cls._element() action = ActionChains(Web.driver) action.double_click(element) action.perform()
左键点击2次
9,852
def add_pr_curve(self, tag, labels, predictions, num_thresholds, global_step=None, weights=None): if num_thresholds < 2: raise ValueError() labels = _make_numpy_array(labels) predictions = _make_numpy_array(predictions) self._file_writer.add_summary(pr_curve_summary(tag, labels, predictions, num_thresholds, weights), global_step)
Adds precision-recall curve. Note: This function internally calls `asnumpy()` for MXNet `NDArray` inputs. Since `asnumpy()` is a blocking function call, this function would block the main thread till it returns. It may consequently affect the performance of async execution of the MXNet engine. Parameters ---------- tag : str A tag attached to the summary. Used by TensorBoard for organization. labels : MXNet `NDArray` or `numpy.ndarray`. The ground truth values. A tensor of 0/1 values with arbitrary shape. predictions : MXNet `NDArray` or `numpy.ndarray`. A float32 tensor whose values are in the range `[0, 1]`. Dimensions must match those of `labels`. num_thresholds : int Number of thresholds, evenly distributed in `[0, 1]`, to compute PR metrics for. Should be `>= 2`. This value should be a constant integer value, not a tensor that stores an integer. The thresholds for computing the pr curves are calculated in the following way: `width = 1.0 / (num_thresholds - 1), thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]`. global_step : int Global step value to record. weights : MXNet `NDArray` or `numpy.ndarray`. Optional float32 tensor. Individual counts are multiplied by this value. This tensor must be either the same shape as or broadcastable to the `labels` tensor.
9,853
def delete(self): if self.exists(): try: self._api.buckets_delete(self._name) except Exception as e: raise e
Deletes the bucket. Raises: Exception if there was an error deleting the bucket.
9,854
def connections_from_object(self, from_obj): self._validate_ctypes(from_obj, None) return self.connections.filter(from_pk=from_obj.pk)
Returns a ``Connection`` query set matching all connections with the given object as a source.
9,855
def throw(self, method, args={}, nowait=False, **kwargs): r = self.call_or_cast(method, args, type=ACTOR_TYPE.RR, nowait=nowait, **kwargs) if not nowait: return r
Call method on one of the agents in round robin. See :meth:`call_or_cast` for a full list of supported arguments. If the keyword argument `nowait` is false (default) it will block and return the reply.
9,856
def clear_mask(self): if self.masktag: try: self.canvas.delete_object_by_tag(self.masktag, redraw=False) except Exception: pass if self.maskhltag: try: self.canvas.delete_object_by_tag(self.maskhltag, redraw=False) except Exception: pass self.treeview.clear() self.fitsimage.redraw()
Clear mask from image. This does not clear loaded masks from memory.
9,857
def results_tc(self, key, value): if os.access(self.default_args.tc_out_path, os.W_OK): results_file = .format(self.default_args.tc_out_path) else: results_file = new = True open(results_file, ).close() with open(results_file, ) as fh: results = for line in fh.read().strip().split(): if not line: continue try: k, v = line.split() except ValueError: k, v = line.split() if k == key: v = value new = False if v is not None: results += .format(k, v) if new and value is not None: fh.seek(0) fh.write(results) fh.truncate()
Write data to results_tc file in TcEX specified directory. The TcEx platform support persistent values between executions of the App. This method will store the values for TC to read and put into the Database. Args: key (string): The data key to be stored. value (string): The data value to be stored.
9,858
def __get_average_intra_cluster_distance(self, entry): linear_part_first = list_math_addition(self.linear_sum, entry.linear_sum); linear_part_second = linear_part_first; linear_part_distance = sum(list_math_multiplication(linear_part_first, linear_part_second)); general_part_distance = 2.0 * (self.number_points + entry.number_points) * (self.square_sum + entry.square_sum) - 2.0 * linear_part_distance; return (general_part_distance / ( (self.number_points + entry.number_points) * (self.number_points + entry.number_points - 1.0) )) ** 0.5;
! @brief Calculates average intra cluster distance between current and specified clusters. @param[in] entry (cfentry): Clustering feature to which distance should be obtained. @return (double) Average intra cluster distance.
9,859
def read_from_file(self, filename): if not exists(filename): return -1 with open(filename) as fd: needs_json = fd.read() try: minimum_needs = json.loads(needs_json) except (TypeError, ValueError): minimum_needs = None if not minimum_needs: return -1 return self.update_minimum_needs(minimum_needs)
Read from an existing json file. :param filename: The file to be written to. :type filename: basestring, str :returns: Success status. -1 for unsuccessful 0 for success :rtype: int
9,860
def Parse(self): (start_line, lang) = self.ParseDesc() if start_line < 0: return if == lang: self.ParsePythonFlags(start_line) elif == lang: self.ParseCFlags(start_line) elif == lang: self.ParseJavaFlags(start_line)
Parse program output.
9,861
def addFeatureToGraph( self, add_region=True, region_id=None, feature_as_class=False): if feature_as_class: self.model.addClassToGraph( self.fid, self.label, self.ftype, self.description) else: self.model.addIndividualToGraph( self.fid, self.label, self.ftype, self.description) if self.start is None and self.stop is None: add_region = False if add_region: regionchr = re.sub(r, , self.start[]) if region_id is None: self.addRegionPositionToGraph(region_id, beginp, endp) return
We make the assumption here that all features are instances. The features are located on a region, which begins and ends with faldo:Position The feature locations leverage the Faldo model, which has a general structure like: Triples: feature_id a feature_type (individual) faldo:location region_id region_id a faldo:region faldo:begin start_position faldo:end end_position start_position a (any of: faldo:(((Both|Plus|Minus)Strand)|Exact)Position) faldo:position Integer(numeric position) faldo:reference reference_id end_position a (any of: faldo:(((Both|Plus|Minus)Strand)|Exact)Position) faldo:position Integer(numeric position) faldo:reference reference_id :param graph: :return:
9,862
def getMibSymbol(self): if self._state & self.ST_CLEAN: return self._modName, self._symName, self._indices else: raise SmiError( % self.__class__.__name__)
Returns MIB variable symbolic identification. Returns ------- str MIB module name str MIB variable symbolic name : :py:class:`~pysnmp.proto.rfc1902.ObjectName` class instance representing MIB variable instance index. Raises ------ SmiError If MIB variable conversion has not been performed. Examples -------- >>> objectIdentity = ObjectIdentity('1.3.6.1.2.1.1.1.0') >>> objectIdentity.resolveWithMib(mibViewController) >>> objectIdentity.getMibSymbol() ('SNMPv2-MIB', 'sysDescr', (0,)) >>>
9,863
def verify_system_status(self): if not sys.platform.startswith(): raise InstallError() if self.python.is_system_python(): if self.python.is_python_binding_installed(): message = Log.info(message) raise InstallSkipError(message) elif self.sys_installed: pass else: message = raise InstallError(message) if self.rpm.is_system_rpm(): self.verify_package_status()
Verify system status.
9,864
def subnet_range(ip_net, cidr): subnets_dict = dict() subnet = whole_subnet_maker(ip_net, cidr) subnets_dict[] = ip_net subnets_dict[] = subnet subnets_dict[] = % (whole_subnet_maker(ip_net, cidr), cidr) if int(cidr) >= 24: subnet_split = subnet.split() first_ip = int(subnet_split[3]) + 1 last_ip = (int(subnet_split[3]) + 1) + (253 - int(__mask_conversion[int(cidr)][])) bcast_ip = (int(subnet_split[3]) + 1) + (254 - int(__mask_conversion[int(cidr)][])) temp = % (subnet_split[0], subnet_split[1], subnet_split[2]) subnets_dict[] = % (temp, first_ip, temp, last_ip) subnets_dict[] = % (temp, bcast_ip) subnets_dict[] = __mask_conversion[int(cidr)][] subnets_dict[] = __mask_conversion[int(cidr)][] subnets_dict[] = __mask_conversion[int(cidr)][] elif int(cidr) >= 16: subnet_split = subnet.split() first_ip = int(subnet_split[2]) last_ip = (int(subnet_split[2]) + 1) + (254 - int(__mask_conversion[int(cidr)][])) bcast_ip = (int(subnet_split[2]) + 1) + (254 - int(__mask_conversion[int(cidr)][])) temp = % (subnet_split[0], subnet_split[1]) subnets_dict[] = % (temp, first_ip, temp, last_ip) subnets_dict[] = % (temp, bcast_ip) subnets_dict[] = __mask_conversion[int(cidr)][] subnets_dict[] = __mask_conversion[int(cidr)][] subnets_dict[] = __mask_conversion[int(cidr)][] elif int(cidr) >= 8: subnet_split = subnet.split() first_ip = int(subnet_split[1]) last_ip = (int(subnet_split[1]) + 1) + (254 - int(__mask_conversion[int(cidr)][])) bcast_ip = (int(subnet_split[1]) + 1) + (254 - int(__mask_conversion[int(cidr)][])) temp = % (subnet_split[0],) subnets_dict[] = % (temp, first_ip, temp, last_ip) subnets_dict[] = % (temp, bcast_ip) subnets_dict[] = __mask_conversion[int(cidr)][] subnets_dict[] = __mask_conversion[int(cidr)][] subnets_dict[] = __mask_conversion[int(cidr)][] elif int(cidr) >= 1: subnet_split = subnet.split() first_ip = int(subnet_split[0]) last_ip = (int(subnet_split[0]) + 1) + (254 - int(__mask_conversion[int(cidr)][])) bcast_ip = (int(subnet_split[0]) + 1) + (254 - int(__mask_conversion[int(cidr)][])) subnets_dict[] = % (first_ip, last_ip) subnets_dict[] = % (bcast_ip,) subnets_dict[] = __mask_conversion[int(cidr)][] subnets_dict[] = __mask_conversion[int(cidr)][] subnets_dict[] = __mask_conversion[int(cidr)][] return subnets_dict
Function to return a subnet range value from a IP address and CIDR pair Args: ip_net: Unicast or Multicast IP address or subnet in the following format 192.168.1.1, 239.1.1.1 cidr: CIDR value of 1 to 32 Returns: returns a dictionary of info
9,865
def parse_name(cls, name: str, default: T = None) -> T: if not name: return default name = name.lower() return next((item for item in cls if name == item.name.lower()), default)
Parse specified name for IntEnum; return default if not found.
9,866
def m2i(self, pkt, s): diff_tag, s = BER_tagging_dec(s, hidden_tag=self.ASN1_tag, implicit_tag=self.implicit_tag, explicit_tag=self.explicit_tag, safe=self.flexible_tag) if diff_tag is not None: if self.implicit_tag is not None: self.implicit_tag = diff_tag elif self.explicit_tag is not None: self.explicit_tag = diff_tag codec = self.ASN1_tag.get_codec(pkt.ASN1_codec) if self.flexible_tag: return codec.safedec(s, context=self.context) else: return codec.dec(s, context=self.context)
The good thing about safedec is that it may still decode ASN1 even if there is a mismatch between the expected tag (self.ASN1_tag) and the actual tag; the decoded ASN1 object will simply be put into an ASN1_BADTAG object. However, safedec prevents the raising of exceptions needed for ASN1F_optional processing. Thus we use 'flexible_tag', which should be False with ASN1F_optional. Regarding other fields, we might need to know whether encoding went as expected or not. Noticeably, input methods from cert.py expect certain exceptions to be raised. Hence default flexible_tag is False.
9,867
def ProfileRunValidationOutputFromOptions(feed, options): import cProfile import pstats locals_for_exec = locals() cProfile.runctx(, globals(), locals_for_exec, ) import resource print("Time: %d seconds" % ( resource.getrusage(resource.RUSAGE_SELF).ru_utime + resource.getrusage(resource.RUSAGE_SELF).ru_stime)) def _VmB(VmKey): _proc_status = % os.getpid() _scale = {: 1024.0, : 1024.0*1024.0, : 1024.0, : 1024.0*1024.0} try: t = open(_proc_status) v = t.read() t.close() except: raise Exception("no proc file %s" % _proc_status) return 0 try: i = v.index(VmKey) v = v[i:].split(None, 3) except: return 0 if len(v) < 3: raise Exception("%s" % v) return 0 return int(float(v[1]) * _scale[v[2]]) print("Virtual Memory Size: %d bytes" % _VmB()) p = pstats.Stats() p.strip_dirs() p.sort_stats().print_stats(30) p.sort_stats().print_callers(30) return locals_for_exec[]
Run RunValidationOutputFromOptions, print profile and return exit code.
9,868
def set_bind(self): RangedInt.set_bind(self) self.unbind() self.unbind() self.bind(, lambda e: self.set(self._min())) self.bind(, lambda e: self.set(self._max()))
Sets key bindings -- we need this more than once
9,869
def GET(self, courseid, taskid, path): try: course = self.course_factory.get_course(courseid) if not self.user_manager.course_is_open_to_user(course): return self.template_helper.get_renderer().course_unavailable() path_norm = posixpath.normpath(urllib.parse.unquote(path)) if taskid == "$common": public_folder = course.get_fs().from_subfolder("$common").from_subfolder("public") else: task = course.get_task(taskid) if not self.user_manager.task_is_visible_by_user(task): return self.template_helper.get_renderer().task_unavailable() public_folder = task.get_fs().from_subfolder("public") (method, mimetype_or_none, file_or_url) = public_folder.distribute(path_norm, False) if method == "local": web.header(, mimetype_or_none) return file_or_url elif method == "url": raise web.redirect(file_or_url) else: raise web.notfound() except web.HTTPError as error_or_redirect: raise error_or_redirect except: if web.config.debug: raise else: raise web.notfound()
GET request
9,870
def parser(self): module = self.module subcommands = self.subcommands if subcommands: module_desc = inspect.getdoc(module) parser = Parser(description=module_desc, module=module) subparsers = parser.add_subparsers() for sc_name, callback in subcommands.items(): sc_name = sc_name.replace("_", "-") cb_desc = inspect.getdoc(callback) sc_parser = subparsers.add_parser( sc_name, callback=callback, help=cb_desc ) else: parser = Parser(callback=self.callbacks[self.function_name], module=module) return parser
return the parser for the current name
9,871
def flock(path): with open(path, "w+") as lf: try: fcntl.flock(lf, fcntl.LOCK_EX | fcntl.LOCK_NB) acquired = True yield acquired except OSError: acquired = False yield acquired finally: if acquired: fcntl.flock(lf, fcntl.LOCK_UN)
Attempt to acquire a POSIX file lock.
9,872
def assets(self, asset_code=None, asset_issuer=None, cursor=None, order=, limit=10): endpoint = params = self.__query_params(asset_code=asset_code, asset_issuer=asset_issuer, cursor=cursor, order=order, limit=limit) return self.query(endpoint, params)
This endpoint represents all assets. It will give you all the assets in the system along with various statistics about each. See the documentation below for details on query parameters that are available. `GET /assets{?asset_code,asset_issuer,cursor,limit,order} <https://www.stellar.org/developers/horizon/reference/endpoints/assets-all.html>`_ :param str asset_code: Code of the Asset to filter by. :param str asset_issuer: Issuer of the Asset to filter by. :param int cursor: A paging token, specifying where to start returning records from. :param str order: The order in which to return rows, "asc" or "desc", ordered by asset_code then by asset_issuer. :param int limit: Maximum number of records to return. :return: A list of all valid payment operations :rtype: dict
9,873
def update(self): if not self.showing: return d = self.declaration self.set_show(d.show)
Update the PopupWindow if it is currently showing. This avoids calling update during initialization.
9,874
def read_excel(file_name, offset=1, sheet_index=0): try: workbook = xlrd.open_workbook(file_name) except Exception as e: return None if len(workbook.sheets()) <= 0: return [] sh = workbook.sheets()[sheet_index] raw_data = [] n_rows = sh.nrows row = sh.row_values(0) header = [] for t in row: t = t.strip().lower() header.append(t) for i in range(offset, n_rows): try: row = sh.row_values(i) d = {} for j, t in enumerate(header): d[t] = row[j] raw_data.append(d) except Exception as e: pass return raw_data
读取 Excel :param sheet_index: :param file_name: :param offset: 偏移,一般第一行是表头,不需要读取数据 :return:
9,875
def main(): rc_settings = read_rcfile() parser = ArgumentParser(description=) parser.add_argument(, , type=int, nargs="?", help=) parser.add_argument(, , type=str, help=) parser.add_argument(, , action=, version=__version__) parser.add_argument(, , action=, help=) parser.add_argument(, , help=) parser.add_argument(, , type=int, help=) parser.add_argument(, , action=, help=) parser.add_argument( , metavar="The http server to send the data", help= ) parser.add_argument( , metavar=, help=, default=os.environ.get() ) parser.add_argument( , metavar=, nargs=, help= ) parser.add_argument( , metavar=, help= ) args = parser.parse_args() settings = compute_settings(vars(args), rc_settings) out = millipede( settings[], comment=settings[], reverse=settings[], template=settings[], position=settings[], opposite=settings[] ) if args.http_host: if args.http_auth: try: login, passwd = args.http_auth.split() except ValueError: parser.error( "Credentials should be a string like " "`user:pass')
Entry point
9,876
def get_requests(self): safe = self.get_safe_struct() self.download_list = [] self.structure_recursion(safe, self.parent_folder) self.sort_download_list() return self.download_list, self.folder_list
Creates product structure and returns list of files for download :return: list of download requests :rtype: list(download.DownloadRequest)
9,877
def text(self): params = .join(x.text for x in self.params) return .format(self.proto_text, params)
Formatted Command declaration. This is the C declaration for the command.
9,878
def _combine_out_files(chr_files, work_dir, data): out_file = "%s.bed" % sshared.outname_from_inputs(chr_files) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for chr_file in chr_files: with open(chr_file) as in_handle: is_empty = in_handle.readline().startswith("track name=empty") if not is_empty: with open(chr_file) as in_handle: shutil.copyfileobj(in_handle, out_handle) return out_file
Concatenate all CNV calls into a single file.
9,879
def status_unreblog(self, id): id = self.__unpack_id(id) url = .format(str(id)) return self.__api_request(, url)
Un-reblog a status. Returns a `toot dict`_ with the status that used to be reblogged.
9,880
def create(sub_array_id): config = request.data config[] = .format(sub_array_id) return add_scheduling_block(config)
Create / register a Scheduling Block instance with SDP.
9,881
def depends_on_helper(obj): if isinstance(obj, AWSObject): return obj.title elif isinstance(obj, list): return list(map(depends_on_helper, obj)) return obj
Handles using .title if the given object is a troposphere resource. If the given object is a troposphere resource, use the `.title` attribute of that resource. If it's a string, just use the string. This should allow more pythonic use of DependsOn.
9,882
def p_contextualize_item(self, t): if len(t) == 5: t[0] = contextualize_item(t[2], t[4], line=t.lineno(1)) elif t[5] == "with": t[0] = contextualize_item(t[2], t[4], ctxt_tool=t[6], line=t.lineno(1)) else: t[0] = contextualize_item(t[2], t[4], num=t[6], line=t.lineno(1))
contextualize_item : SYSTEM VAR CONFIGURE VAR | SYSTEM VAR CONFIGURE VAR STEP NUMBER | SYSTEM VAR CONFIGURE VAR WITH VAR
9,883
def main(http_port, peer_name, node_name, app_id): framework = pelix.framework.create_framework( (, , , , , , , , , , , , , , , , ,), {herald.FWPROP_NODE_UID: node_name, herald.FWPROP_NODE_NAME: node_name, herald.FWPROP_PEER_NAME: peer_name, herald.FWPROP_APPLICATION_ID: app_id}) framework.start() context = framework.get_bundle_context() with use_waiting_list(context) as ipopo: ipopo.add(pelix.http.FACTORY_HTTP_BASIC, "http-server", {pelix.http.HTTP_SERVICE_PORT: http_port}) ipopo.add(herald.transports.http.FACTORY_SERVLET, "herald-http-servlet") ipopo.add(herald.transports.http.FACTORY_DISCOVERY_MULTICAST, "herald-http-discovery-multicast") framework.wait_for_stop()
Runs the framework :param http_port: HTTP port to listen to :param peer_name: Name of the peer :param node_name: Name (also, UID) of the node hosting the peer :param app_id: Application ID
9,884
def gen(self, text, start=0): for cc in self.chunkComment(text, start): c = self.extractChunkContent(cc) cc = .join(cc) m = self.matchComment(c) idx = text.index(cc, start) e = idx + len(cc) if m: assert text[idx:e] == cc try: end = text.index(, e - 1) + 1 except ValueError: end = len(text) text = text[:e] + text[end:] new = self.genOutputs(self.code(text), m) new = .join(new) text = text[:e] + new + text[e:] return self.gen(text, e + len(new)) return text
Return the source code in text, filled with autogenerated code starting at start.
9,885
def patch_ast(node, source, sorted_children=False): if hasattr(node, ): return node walker = _PatchingASTWalker(source, children=sorted_children) ast.call_for_nodes(node, walker) return node
Patches the given node After calling, each node in `node` will have a new field named `region` that is a tuple containing the start and end offsets of the code that generated it. If `sorted_children` is true, a `sorted_children` field will be created for each node, too. It is a list containing child nodes as well as whitespaces and comments that occur between them.
9,886
def set(self, x): for name, value in iter(x.items()): if hasattr(value, "ndim"): if self[name].value.ndim < value.ndim: self[name].value.itemset(value.squeeze()) else: self[name].value = value else: self[name].value.itemset(value)
Set variable values via a dictionary mapping name to value.
9,887
def set_decade_lims(axis=None,direction=None): rxy if axis is None: axis = plt.gca() if direction is None or direction == : MIN,MAX = axis.get_xlim() MIN = 10 ** ( np.floor(np.log10(MIN)) ) MAX = 10 ** ( np.ceil (np.log10(MAX)) ) axis.set_xlim([MIN,MAX]) if direction is None or direction == : MIN,MAX = axis.get_ylim() MIN = 10 ** ( np.floor(np.log10(MIN)) ) MAX = 10 ** ( np.ceil (np.log10(MAX)) ) axis.set_ylim([MIN,MAX])
r''' Set limits the the floor/ceil values in terms of decades. :options: **axis** ([``plt.gca()``] | ...) Specify the axis to which to apply the limits. **direction** ([``None``] | ``'x'`` | ``'y'``) Limit the application to a certain direction (default: both).
9,888
def best_match(desired_language: {str, Language}, supported_languages: list, min_score: int=75) -> (str, int): if desired_language in supported_languages: return desired_language, 100 desired_language = standardize_tag(desired_language) if desired_language in supported_languages: return desired_language, 100 match_scores = [ (supported, tag_match_score(desired_language, supported)) for supported in supported_languages ] match_scores = [ (supported, score) for (supported, score) in match_scores if score >= min_score ] + [(, 0)] match_scores.sort(key=lambda item: -item[1]) return match_scores[0]
You have software that supports any of the `supported_languages`. You want to use `desired_language`. This function lets you choose the right language, even if there isn't an exact match. Returns: - The best-matching language code, which will be one of the `supported_languages` or 'und' - The score of the match, from 0 to 100 `min_score` sets the minimum match score. If all languages match with a lower score than that, the result will be 'und' with a score of 0. When there is a tie for the best matching language, the first one in the tie will be used. Setting `min_score` lower will enable more things to match, at the cost of possibly mis-handling data or upsetting users. Read the documentation for :func:`tag_match_score` to understand what the numbers mean. >>> best_match('fr', ['de', 'en', 'fr']) ('fr', 100) >>> best_match('sh', ['hr', 'bs', 'sr-Latn', 'sr-Cyrl']) ('sr-Latn', 100) >>> best_match('zh-CN', ['zh-Hant', 'zh-Hans', 'gan', 'nan']) ('zh-Hans', 100) >>> best_match('zh-CN', ['cmn-Hant', 'cmn-Hans', 'gan', 'nan']) ('cmn-Hans', 100) >>> best_match('pt', ['pt-BR', 'pt-PT']) ('pt-BR', 100) >>> best_match('en-AU', ['en-GB', 'en-US']) ('en-GB', 96) >>> best_match('es-MX', ['es-ES', 'es-419', 'en-US']) ('es-419', 96) >>> best_match('es-MX', ['es-PU', 'es-AR', 'es-PY']) ('es-PU', 95) >>> best_match('es-MX', ['es-AR', 'es-PU', 'es-PY']) ('es-AR', 95) >>> best_match('zsm', ['id', 'mhp']) ('id', 86) >>> best_match('eu', ['el', 'en', 'es']) ('es', 90) >>> best_match('eu', ['el', 'en', 'es'], min_score=92) ('und', 0)
9,889
def loads(cls, json_data): try: return cls(**cls.MARSHMALLOW_SCHEMA.loads(json_data)) except marshmallow.exceptions.ValidationError as exc: raise ValidationError("Failed to load message", extra=exc.args[0])
description of load
9,890
def join(*args, **kwargs): import os.path if _is_list(args[0]): return os.path.join(*args[0]) return os.path.join(*args, **kwargs)
Join parts of a path together
9,891
def send(data, channels=None, push_time=None, expiration_time=None, expiration_interval=None, where=None, cql=None): if expiration_interval and expiration_time: raise TypeError(t be setdata0proddevchannelspush_timeutcYYYY-MM-DDTHH:mm:ss.SSSZexpiration_timeexpiration_intervalwherewherecql/pushobjectId']) return notification
发送推送消息。返回结果为此条推送对应的 _Notification 表中的对象,但是如果需要使用其中的数据,需要调用 fetch() 方法将数据同步至本地。 :param channels: 需要推送的频道 :type channels: list or tuple :param push_time: 推送的时间 :type push_time: datetime :param expiration_time: 消息过期的绝对日期时间 :type expiration_time: datetime :param expiration_interval: 消息过期的相对时间,从调用 API 的时间开始算起,单位是秒 :type expiration_interval: int :param where: 一个查询 _Installation 表的查询条件 leancloud.Query 对象 :type where: leancloud.Query :param cql: 一个查询 _Installation 表的查询条件 CQL 语句 :type cql: string_types :param data: 推送给设备的具体信息,详情查看 https://leancloud.cn/docs/push_guide.html#消息内容_Data :rtype: Notification
9,892
def markup_description(description): if apply_markdown: description = apply_markdown(description) else: description = escape(description).replace(, ) description = + description + return mark_safe(description)
Apply HTML markup to the given description.
9,893
def _augment_file(self, f): def get_url(target): if target.file_size is None: return None if target.file_name is not None: return self.base_url + .format(target.file_id.hex, target.file_name) else: return self.base_url + .format(target.file_id.hex, ) f.get_url = types.MethodType(get_url, f) def download_to(target, file_name): url = target.get_url() r = requests.get(url, stream=True) with open(file_name, ) as file_to_write: for chunk in r.iter_content(chunk_size=1024): if chunk: file_to_write.write(chunk) file_to_write.flush() return file_name f.download_to = types.MethodType(download_to, f) return f
Augment a FileRecord with methods to get the data URL and to download, returning the updated file for use in generator functions :internal:
9,894
def add_state_errors(self, errors): if not self.errors: self.errors = dict() if not in self.errors: self.errors[] = [] if type(errors) is not list: errors = [errors] for error in errors: if not isinstance(error, Error): err = raise x.InvalidErrorType(err.format(Error)) self.errors[].append(error) return self
Add state errors Accepts a list of errors (or a single Error) coming from validators applied to entity as whole that are used for entity state validation The errors will exist on a __state__ property of the errors object. :param errors: list or Error, list of entity state validation errors :return: shiftschema.result.Result
9,895
def to_serializable_dict(self, attrs_to_serialize=None, rels_to_expand=None, rels_to_serialize=None, key_modifications=None): return self.todict( attrs_to_serialize=attrs_to_serialize, rels_to_expand=rels_to_expand, rels_to_serialize=rels_to_serialize, key_modifications=key_modifications)
An alias for `todict`
9,896
def render_mail_template(subject_template, body_template, context): try: subject = strip_spaces(render_to_string(subject_template, context)) body = render_to_string(body_template, context) finally: pass return subject, body
Renders both the subject and body templates in the given context. Returns a tuple (subject, body) of the result.
9,897
def tplot_save(names, filename=None): if isinstance(names,int): names = list(data_quants.keys())[names-1] if not isinstance(names, list): names = [names] for name in names: if isinstance(data_quants[name].data, list): for data_name in data_quants[name].data: if data_name not in names: names.append(data_name) to_pickle =[] for name in names: if name not in data_quants.keys(): print("That name is currently not in pytplot") return to_pickle.append(data_quants[name]) num_quants = len(to_pickle) to_pickle = [num_quants] + to_pickle temp_tplot_opt_glob = tplot_opt_glob to_pickle.append(temp_tplot_opt_glob) if filename==None: filename=+.join(names)+ pickle.dump(to_pickle, open(filename, "wb")) return
This function will save tplot variables into a single file by using the python "pickle" function. This file can then be "restored" using tplot_restore. This is useful if you want to end the pytplot session, but save all of your data/options. All variables and plot options can be read back into tplot with the "tplot_restore" command. Parameters: names : str/list A string or a list of strings of the tplot variables you would like saved. filename : str, optional The filename where you want to save the file. Returns: None Examples: >>> # Save a single tplot variable >>> import pytplot >>> x_data = [1,2,3,4,5] >>> y_data = [1,2,3,4,5] >>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data}) >>> pytplot.ylim('Variable1', 2, 4) >>> pytplot.save('Variable1', filename='C:/temp/variable1.pytplot')
9,898
def send_select_and_operate_command_set(self, command_set, callback=asiodnp3.PrintingCommandCallback.Get(), config=opendnp3.TaskConfig().Default()): self.master.SelectAndOperate(command_set, callback, config)
Select and operate a set of commands :param command_set: set of command headers :param callback: callback that will be invoked upon completion or failure :param config: optional configuration that controls normal callbacks and allows the user to be specified for SA
9,899
def get_ref_dict(self, schema): schema_key = make_schema_key(schema) ref_schema = build_reference( "schema", self.openapi_version.major, self.refs[schema_key] ) if getattr(schema, "many", False): return {"type": "array", "items": ref_schema} return ref_schema
Method to create a dictionary containing a JSON reference to the schema in the spec