Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
379,800
def modifiedaminoacids(df, kind=): colors = [,,] total_aas, quants = analysis.modifiedaminoacids(df) df = pd.DataFrame() for a, n in quants.items(): df[a] = [n] df.sort_index(axis=1, inplace=True) if kind == or kind == : ax1 = df.plot(kind=, figsize=(7,7), color=colors) ax1.set_ylabel() ax1.set_xlabel() ax1.set_xticks([]) ylim = np.max(df.values)+1000 ax1.set_ylim(0, ylim ) _bartoplabel(ax1, 100*df.values[0], total_aas, ylim ) ax1.set_xlim((-0.3, 0.3)) return ax if kind == or kind == : dfp =df.T residues = dfp.index.values dfp.index = ["%.2f%% (%d)" % (100*df[i].values[0]/total_aas, df[i].values[0]) for i in dfp.index.values ] ax2 = dfp.plot(kind=, y=0, colors=colors) ax2.legend(residues, loc=, bbox_to_anchor=(1.0, 1.0)) ax2.set_ylabel() ax2.set_xlabel() ax2.figure.set_size_inches(6,6) for t in ax2.texts: t.set_fontsize(15) return ax2 return ax1, ax2
Generate a plot of relative numbers of modified amino acids in source DataFrame. Plot a pie or bar chart showing the number and percentage of modified amino acids in the supplied data frame. The amino acids displayed will be determined from the supplied data/modification type. :param df: processed DataFrame :param kind: `str` type of plot; either "pie" or "bar" :return: matplotlib ax
379,801
def assert_angles_allclose(x, y, **kwargs): c2 = (np.sin(x)-np.sin(y))**2 + (np.cos(x)-np.cos(y))**2 diff = np.arccos((2.0 - c2)/2.0) assert np.allclose(diff, 0.0, **kwargs)
Like numpy's assert_allclose, but for angles (in radians).
379,802
def twoDimensionalScatter(title, title_x, title_y, x, y, lim_x = None, lim_y = None, color = , size = 20, alpha=None): plt.figure() plt.scatter(x, y, c=color, s=size, alpha=alpha, edgecolors=) plt.xlabel(title_x) plt.ylabel(title_y) plt.title(title) if type(color) is not str: plt.colorbar() if lim_x: plt.xlim(lim_x[0], lim_x[1]) if lim_y: plt.ylim(lim_y[0], lim_y[1])
Create a two-dimensional scatter plot. INPUTS
379,803
def _compute_projection_filters(G, sf, estimated_source): eps = np.finfo(np.float).eps (nsampl, nchan) = estimated_source.shape if len(G.shape) == 4: G = G[None, None, ...] sf = sf[None, ...] nsrc = G.shape[0] filters_len = G.shape[-1] estimated_source = _zeropad(estimated_source.T, filters_len - 1, axis=1) n_fft = int(2**np.ceil(np.log2(nsampl + filters_len - 1.))) sef = scipy.fftpack.fft(estimated_source, n=n_fft) D = np.zeros((nsrc, nchan, filters_len, nchan)) for (j, cj, c) in itertools.product( list(range(nsrc)), list(range(nchan)), list(range(nchan)) ): ssef = sf[j, cj] * np.conj(sef[c]) ssef = np.real(scipy.fftpack.ifft(ssef)) D[j, cj, :, c] = np.hstack((ssef[0], ssef[-1:-filters_len:-1])) D = D.reshape(nsrc * nchan * filters_len, nchan) G = _reshape_G(G) try: C = np.linalg.solve(G + eps*np.eye(G.shape[0]), D).reshape( nsrc, nchan, filters_len, nchan ) except np.linalg.linalg.LinAlgError: C = np.linalg.lstsq(G, D)[0].reshape( nsrc, nchan, filters_len, nchan ) if nsrc == 1: C = C[0] return C
Least-squares projection of estimated source on the subspace spanned by delayed versions of reference sources, with delays between 0 and filters_len-1
379,804
def field(ctx, text, index, delimiter=): splits = text.split(delimiter) splits = [f for f in splits if f != delimiter and len(f.strip()) > 0] index = conversions.to_integer(index, ctx) if index < 1: raise ValueError() if index <= len(splits): return splits[index-1] else: return
Reference a field in string separated by a delimiter
379,805
def work_get(self, wallet, account): wallet = self._process_value(wallet, ) account = self._process_value(account, ) payload = {"wallet": wallet, "account": account} resp = self.call(, payload) return resp[]
Retrieves work for **account** in **wallet** .. enable_control required .. version 8.0 required :param wallet: Wallet to get account work for :type wallet: str :param account: Account to get work for :type account: str :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.work_get( ... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F", ... account="xrb_1111111111111111111111111111111111111111111111111111hifc8npp" ... ) "432e5cf728c90f4f"
379,806
def chunks(arr, size): for i in _range(0, len(arr), size): yield arr[i:i+size]
Splits a list into chunks :param arr: list to split :type arr: :class:`list` :param size: number of elements in each chunk :type size: :class:`int` :return: generator object :rtype: :class:`generator`
379,807
def post(self, request, *args, **kwargs): now = timezone.now() data = { : request.POST.get(), : slugify(request.POST.get()), : DRAFT if in request.POST else PUBLISHED, : [Site.objects.get_current().pk], : [request.user.pk], : , : , : now, : now, : now, : request.POST.get(), : request.POST.get()} form = QuickEntryForm(data) if form.is_valid(): form.instance.content = self.htmlize(form.cleaned_data[]) entry = form.save() return redirect(entry) data = {: smart_str(request.POST.get(, )), : smart_str(self.htmlize( request.POST.get(, ))), : smart_str(request.POST.get(, )), : slugify(request.POST.get(, )), : request.user.pk, : Site.objects.get_current().pk} return redirect( % (reverse(), urlencode(data)))
Handle the datas for posting a quick entry, and redirect to the admin in case of error or to the entry's page in case of success.
379,808
def getCmd(snmpEngine, authData, transportTarget, contextData, *varBinds, **options): def cbFun(snmpEngine, sendRequestHandle, errorIndication, errorStatus, errorIndex, varBinds, cbCtx): cbCtx[] = errorIndication cbCtx[] = errorStatus cbCtx[] = errorIndex cbCtx[] = varBinds cbCtx = {} while True: if varBinds: cmdgen.getCmd(snmpEngine, authData, transportTarget, contextData, *varBinds, cbFun=cbFun, cbCtx=cbCtx, lookupMib=options.get(, True)) snmpEngine.transportDispatcher.runDispatcher() errorIndication = cbCtx[] errorStatus = cbCtx[] errorIndex = cbCtx[] varBinds = cbCtx[] else: errorIndication = errorStatus = errorIndex = None varBinds = [] varBinds = (yield errorIndication, errorStatus, errorIndex, varBinds) if not varBinds: break
Creates a generator to perform one or more SNMP GET queries. On each iteration, new SNMP GET request is send (:RFC:`1905#section-4.2.1`). The iterator blocks waiting for response to arrive or error to occur. Parameters ---------- snmpEngine : :py:class:`~pysnmp.hlapi.SnmpEngine` Class instance representing SNMP engine. authData : :py:class:`~pysnmp.hlapi.CommunityData` or :py:class:`~pysnmp.hlapi.UsmUserData` Class instance representing SNMP credentials. transportTarget : :py:class:`~pysnmp.hlapi.asyncore.UdpTransportTarget` or :py:class:`~pysnmp.hlapi.asyncore.Udp6TransportTarget` Class instance representing transport type along with SNMP peer address. contextData : :py:class:`~pysnmp.hlapi.ContextData` Class instance representing SNMP ContextEngineId and ContextName values. \*varBinds : :py:class:`~pysnmp.smi.rfc1902.ObjectType` One or more class instances representing MIB variables to place into SNMP request. Other Parameters ---------------- \*\*options : Request options: * `lookupMib` - load MIB and resolve response MIB variables at the cost of slightly reduced performance. Default is `True`. Yields ------ errorIndication : str True value indicates SNMP engine error. errorStatus : str True value indicates SNMP PDU error. errorIndex : int Non-zero value refers to `varBinds[errorIndex-1]` varBinds : tuple A sequence of :py:class:`~pysnmp.smi.rfc1902.ObjectType` class instances representing MIB variables returned in SNMP response. Raises ------ PySnmpError Or its derivative indicating that an error occurred while performing SNMP operation. Notes ----- The `getCmd` generator will be exhausted immediately unless a new sequence of `varBinds` are send back into running generator (supported since Python 2.6). Examples -------- >>> from pysnmp.hlapi import * >>> g = getCmd(SnmpEngine(), ... CommunityData('public'), ... UdpTransportTarget(('demo.snmplabs.com', 161)), ... ContextData(), ... ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr', 0))) >>> next(g) (None, 0, 0, [ObjectType(ObjectIdentity(ObjectName('1.3.6.1.2.1.1.1.0')), DisplayString('SunOS zeus.snmplabs.com 4.1.3_U1 1 sun4m'))]) >>>
379,809
def wrap_requests(requests_func): def call(url, *args, **kwargs): blacklist_hostnames = execution_context.get_opencensus_attr( ) parsed_url = urlparse(url) if parsed_url.port is None: dest_url = parsed_url.hostname else: dest_url = .format(parsed_url.hostname, parsed_url.port) if utils.disable_tracing_hostname(dest_url, blacklist_hostnames): return requests_func(url, *args, **kwargs) _tracer = execution_context.get_opencensus_tracer() _span = _tracer.start_span() _span.name = .format(requests_func.__name__) _span.span_kind = span_module.SpanKind.CLIENT _tracer.add_attribute_to_current_span(HTTP_URL, url) result = requests_func(url, *args, **kwargs) _tracer.add_attribute_to_current_span( HTTP_STATUS_CODE, str(result.status_code)) _tracer.end_span() return result return call
Wrap the requests function to trace it.
379,810
def getcols(sheetMatch=None,colMatch="Decay"): book=BOOK() if sheetMatch is None: matchingSheets=book.sheetNames print(%(len(matchingSheets))) else: matchingSheets=[x for x in book.sheetNames if sheetMatch in x] print(%(len(matchingSheets),len(book.sheetNames),sheetMatch)) matchingSheetsWithCol=[] for sheetName in matchingSheets: i = book.sheetNames.index(sheetName) for j,colName in enumerate(book.sheets[i].colDesc): if colMatch in colName: matchingSheetsWithCol.append((sheetName,j)) break else: print(" no match in [%s]%s"%(book.bookName,sheetName)) print("%d of %d of those have your column"%(len(matchingSheetsWithCol),len(matchingSheets))) for item in matchingSheetsWithCol: print(item,item[0],item[1])
find every column in every sheet and put it in a new sheet or book.
379,811
def asxc(cls, obj): if isinstance(obj, cls): return obj if is_string(obj): return cls.from_name(obj) raise TypeError("Don't know how to convert <%s:%s> to Xcfunc" % (type(obj), str(obj)))
Convert object into Xcfunc.
379,812
def assertTimeZoneIsNotNone(self, dt, msg=None): if not isinstance(dt, datetime): raise TypeError() self.assertIsNotNone(dt.tzinfo, msg=msg)
Fail unless ``dt`` has a non-null ``tzinfo`` attribute. Parameters ---------- dt : datetime msg : str If not provided, the :mod:`marbles.mixins` or :mod:`unittest` standard message will be used. Raises ------ TypeError If ``dt`` is not a datetime object.
379,813
def remove_pardir_symbols(path, sep=os.sep, pardir=os.pardir): bits = path.split(sep) bits = (x for x in bits if x != pardir) return sep.join(bits)
Remove relative path symobls such as '..' Args: path (str): A target path string sep (str): A strint to refer path delimiter (Default: `os.sep`) pardir (str): A string to refer parent directory (Default: `os.pardir`) Returns: str
379,814
def shell(self, name=, site=None, use_root=0, **kwargs): r = self.database_renderer(name=name, site=site) if int(use_root): kwargs = dict( db_user=r.env.db_root_username, db_password=r.env.db_root_password, db_host=r.env.db_host, db_name=r.env.db_name, ) r.env.update(kwargs) if not name: r.env.db_name = r.run({db_password}\)
Opens a SQL shell to the given database, assuming the configured database and user supports this feature.
379,815
def explode(self, obj): if obj in self._done: return False result = False for item in self._explode: if hasattr(item, ): if obj._moId == item._moId: result = True else: if obj.__class__.__name__ == item.__name__: result = True if result: self._done.add(obj) return result
Determine if the object should be exploded.
379,816
def extract_command(outputdir, domain_methods, text_domain, keywords, comment_tags, base_dir, project, version, msgid_bugs_address): monkeypatch_i18n()
Extracts strings into .pot files :arg domain: domains to generate strings for or 'all' for all domains :arg outputdir: output dir for .pot files; usually locale/templates/LC_MESSAGES/ :arg domain_methods: DOMAIN_METHODS setting :arg text_domain: TEXT_DOMAIN settings :arg keywords: KEYWORDS setting :arg comment_tags: COMMENT_TAGS setting :arg base_dir: BASE_DIR setting :arg project: PROJECT setting :arg version: VERSION setting :arg msgid_bugs_address: MSGID_BUGS_ADDRESS setting
379,817
def create_concept_scheme(rdf, ns, lname=): ont = None if not ns: if p.startswith(prot): rdf.remove((ont, p, o)) replace_uri(rdf, ont, cs) return cs
Create a skos:ConceptScheme in the model and return it.
379,818
def restore(self, bAsync = True): if bAsync: win32.ShowWindowAsync( self.get_handle(), win32.SW_RESTORE ) else: win32.ShowWindow( self.get_handle(), win32.SW_RESTORE )
Unmaximize and unminimize the window. @see: L{maximize}, L{minimize} @type bAsync: bool @param bAsync: Perform the request asynchronously. @raise WindowsError: An error occured while processing this request.
379,819
def data(self, data, part=False, dataset=): links = self.parser(self.scanner(data, part), part, dataset) self.storage.add_links(links)
Parse data and update links. Parameters ---------- data Data to parse. part : `bool`, optional True if data is partial (default: `False`). dataset : `str`, optional Dataset key prefix (default: '').
379,820
def get_handler(self, *args, **options): handler = super(Command, self).get_handler(*args, **options) insecure_serving = options.get(, False) if self.should_use_static_handler(options): return StaticFilesHandler(handler) return handler
Returns the static files serving handler wrapping the default handler, if static files should be served. Otherwise just returns the default handler.
379,821
def delete(self): if self.dynamic_version_of is None: self._delete_dynamic_versions() else: super(DynamicFieldMixin, self).delete() self._inventory.srem(self.dynamic_part)
If a dynamic version, delete it the standard way and remove it from the inventory, else delete all dynamic versions.
379,822
def normalize_feature_objects(feature_objs): for obj in feature_objs: if hasattr(obj, "__geo_interface__") and \ in obj.__geo_interface__.keys() and \ obj.__geo_interface__[] == : yield obj.__geo_interface__ elif isinstance(obj, dict) and in obj and \ obj[] == : yield obj else: raise ValueError("Did not recognize object {0}" "as GeoJSON Feature".format(obj))
Takes an iterable of GeoJSON-like Feature mappings or an iterable of objects with a geo interface and normalizes it to the former.
379,823
def calendar(type=, direction=, last=1, startDate=None, token=, version=): if startDate: startDate = _strOrDate(startDate) return _getJson(.format(type=type, direction=direction, last=last, date=startDate), token, version) return _getJson( + type + + direction + + str(last), token, version)
This call allows you to fetch a number of trade dates or holidays from a given date. For example, if you want the next trading day, you would call /ref-data/us/dates/trade/next/1. https://iexcloud.io/docs/api/#u-s-exchanges 8am, 9am, 12pm, 1pm UTC daily Args: type (string); "holiday" or "trade" direction (string); "next" or "last" last (int); number to move in direction startDate (date); start date for next or last, YYYYMMDD token (string); Access token version (string); API version Returns: dict: result
379,824
def install_binary_dist(self, members, virtualenv_compatible=True, prefix=None, python=None, track_installed_files=False): module_search_path = set(map(os.path.normpath, sys.path)) prefix = os.path.normpath(prefix or self.config.install_prefix) python = os.path.normpath(python or self.config.python_executable) installed_files = [] for member, from_handle in members: pathname = member.name if virtualenv_compatible: pathname = re.sub(, , pathname) if self.config.on_debian and in pathname: installed_files.append(pathname) directory = os.path.dirname(pathname) if not os.path.isdir(directory): logger.debug("Creating directory: %s ..", directory) makedirs(directory) logger.debug("Creating file: %s ..", pathname) with open(pathname, ) as to_handle: contents = from_handle.read() if contents.startswith(b): contents = self.fix_hashbang(contents, python) to_handle.write(contents) os.chmod(pathname, member.mode) if track_installed_files: self.update_installed_files(installed_files)
Install a binary distribution into the given prefix. :param members: An iterable of tuples with two values each: 1. A :class:`tarfile.TarInfo` object. 2. A file-like object. :param prefix: The "prefix" under which the requirements should be installed. This will be a pathname like ``/usr``, ``/usr/local`` or the pathname of a virtual environment. Defaults to :attr:`.Config.install_prefix`. :param python: The pathname of the Python executable to use in the shebang line of all executable Python scripts inside the binary distribution. Defaults to :attr:`.Config.python_executable`. :param virtualenv_compatible: Whether to enable workarounds to make the resulting filenames compatible with virtual environments (defaults to :data:`True`). :param track_installed_files: If this is :data:`True` (not the default for this method because of backwards compatibility) pip-accel will create ``installed-files.txt`` as required by pip to properly uninstall packages. This method installs a binary distribution created by :class:`build_binary_dist()` into the given prefix (a directory like ``/usr``, ``/usr/local`` or a virtual environment).
379,825
def as_dict(self): d = {"@module": self.__class__.__module__, "@class": self.__class__.__name__, "molecule": self.molecule.as_dict(), "graphs": json_graph.adjacency_data(self.graph)} return d
As in :Class: `pymatgen.core.Molecule` except with using `to_dict_of_dicts` from NetworkX to store graph information.
379,826
def _set_defined_policy(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=defined_policy.defined_policy, is_container=, presence=False, yang_name="defined-policy", rest_name="defined-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "container", : , }) self.__defined_policy = t if hasattr(self, ): self._set()
Setter method for defined_policy, mapped from YANG variable /rbridge_id/secpolicy/defined_policy (container) If this variable is read-only (config: false) in the source YANG file, then _set_defined_policy is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_defined_policy() directly. YANG Description: Set the defined policy
379,827
def load_datafile(name, search_path, codecs=get_codecs(), **kwargs): return munge.load_datafile(name, search_path, codecs, **kwargs)
find datafile and load them from codec TODO only does the first one kwargs: default = if passed will return that on failure instead of throwing
379,828
def stats(self, request, *args, **kwargs): queryset = self.filter_queryset(self.get_queryset()) alerts_severities_count = queryset.values().annotate(count=Count()) severity_names = dict(models.Alert.SeverityChoices.CHOICES) alerts_severities_count = { severity_names[asc[]].lower(): asc[] for asc in alerts_severities_count} for severity_name in severity_names.values(): if severity_name.lower() not in alerts_severities_count: alerts_severities_count[severity_name.lower()] = 0 return response.Response(alerts_severities_count, status=status.HTTP_200_OK)
To get count of alerts per severities - run **GET** request against */api/alerts/stats/*. This endpoint supports all filters that are available for alerts list (*/api/alerts/*). Response example: .. code-block:: javascript { "debug": 2, "error": 1, "info": 1, "warning": 1 }
379,829
def _get_snmpv3(self, oid): snmp_target = (self.hostname, self.snmp_port) cmd_gen = cmdgen.CommandGenerator() (error_detected, error_status, error_index, snmp_data) = cmd_gen.getCmd( cmdgen.UsmUserData( self.user, self.auth_key, self.encrypt_key, authProtocol=self.auth_proto, privProtocol=self.encryp_proto, ), cmdgen.UdpTransportTarget(snmp_target, timeout=1.5, retries=2), oid, lookupNames=True, lookupValues=True, ) if not error_detected and snmp_data[0][1]: return text_type(snmp_data[0][1]) return ""
Try to send an SNMP GET operation using SNMPv3 for the specified OID. Parameters ---------- oid : str The SNMP OID that you want to get. Returns ------- string : str The string as part of the value from the OID you are trying to retrieve.
379,830
def to_frame(self, data, state): data_len = data.find(b) if data_len < 0: raise exc.NoFrames() frame_len = data_len + 1 if (self.carriage_return and data_len and data[data_len - 1] == ord(b)): data_len -= 1 frame = six.binary_type(data[:data_len]) del data[:frame_len] return frame
Extract a single frame from the data buffer. The consumed data should be removed from the buffer. If no complete frame can be read, must raise a ``NoFrames`` exception. :param data: A ``bytearray`` instance containing the data so far read. :param state: An instance of ``FramerState``. If the buffer contains a partial frame, this object can be used to store state information to allow the remainder of the frame to be read. :returns: A frame. The frame may be any object. The stock framers always return bytes.
379,831
def arch(self): if self.method in (, , ): return self.params[2] if self.method in (, ): return self.params[1] if self.method == : return self.params[3] if self.method == : return self.params[0][]
Return an architecture for this task. :returns: an arch string (eg "noarch", or "ppc64le"), or None this task has no architecture associated with it.
379,832
def filter_params(self, value): if value is None: return {} val_min = value.get(, None) val_max = value.get(, None) params = {} if val_min == val_max: return { self.target: val_min } key = self.target + "__" if val_min is not None: params[key+self.lookup_types[0]] = val_min if val_max is not None: params[key+self.lookup_types[1]] = val_max return params
return filtering params
379,833
def grant_usage_install_privileges(cls, cur, schema_name, roles): cur.execute( .format(schema_name, roles))
Sets search path
379,834
def parent(self, parent): if parent is None: self._parent = None else: from rafcon.core.states.state import State assert isinstance(parent, State) old_parent = self.parent self._parent = ref(parent) valid, message = self._check_validity() if not valid: if not old_parent: self._parent = None else: self._parent = ref(old_parent) class_name = self.__class__.__name__ if global_config.get_config_value("LIBRARY_RECOVERY_MODE") is True: do_delete_item = True if "not have matching data types" in message: do_delete_item = False self._parent = ref(parent) raise RecoveryModeException("{0} invalid within state \"{1}\" (id {2}): {3}".format( class_name, parent.name, parent.state_id, message), do_delete_item=do_delete_item) else: raise ValueError("{0} invalid within state \"{1}\" (id {2}): {3} {4}".format( class_name, parent.name, parent.state_id, message, self))
Setter for the parent state of the state element :param rafcon.core.states.state.State parent: Parent state or None
379,835
def create_columns(self): reader = self._get_csv_reader() headings = six.next(reader) try: examples = six.next(reader) except StopIteration: examples = [] found_fields = set() for i, value in enumerate(headings): if i >= 20: break infer_field = self.has_headings and value not in found_fields to_field = ( { "date": "date", "amount": "amount", "description": "description", "memo": "description", "notes": "description", }.get(value.lower(), "") if infer_field else "" ) if to_field: found_fields.add(to_field) TransactionCsvImportColumn.objects.update_or_create( transaction_import=self, column_number=i + 1, column_heading=value if self.has_headings else "", to_field=to_field, example=examples[i].strip() if examples else "", )
For each column in file create a TransactionCsvImportColumn
379,836
def cmdline(argv=sys.argv[1:]): parser = ArgumentParser( description=) parser.add_argument(, help=) parser.add_argument(, help=) options = parser.parse_args(argv) factory = StopWordFactory() language = options.language stop_words = factory.get_stop_words(language, fail_safe=True) content = open(options.source, ).read().decode() print(stop_words.rebase(content))
Script for rebasing a text file
379,837
def extract_datetime_hour(cls, datetime_str): if not datetime_str: raise DateTimeFormatterException() try: return cls._extract_timestamp(datetime_str, cls.DATETIME_HOUR_FORMAT) except (TypeError, ValueError): raise DateTimeFormatterException(.format(datetime_str))
Tries to extract a `datetime` object from the given string, including only hours. Raises `DateTimeFormatterException` if the extraction fails.
379,838
def fromML(vec): if isinstance(vec, newlinalg.DenseVector): return DenseVector(vec.array) elif isinstance(vec, newlinalg.SparseVector): return SparseVector(vec.size, vec.indices, vec.values) else: raise TypeError("Unsupported vector type %s" % type(vec))
Convert a vector from the new mllib-local representation. This does NOT copy the data; it copies references. :param vec: a :py:class:`pyspark.ml.linalg.Vector` :return: a :py:class:`pyspark.mllib.linalg.Vector` .. versionadded:: 2.0.0
379,839
def get_resource_by_agent(self, agent_id): collection = JSONClientValidated(, collection=, runtime=self._runtime) result = collection.find_one( dict({: {: [str(agent_id)]}}, **self._view_filter())) return objects.Resource( osid_object_map=result, runtime=self._runtime, proxy=self._proxy)
Gets the ``Resource`` associated with the given agent. arg: agent_id (osid.id.Id): ``Id`` of the ``Agent`` return: (osid.resource.Resource) - associated resource raise: NotFound - ``agent_id`` is not found raise: NullArgument - ``agent_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
379,840
def clear_max_string_length(self): if (self.get_max_string_length_metadata().is_read_only() or self.get_max_string_length_metadata().is_required()): raise NoAccess() self.my_osid_object_form._my_map[] = \ self.get_max_string_length_metadata().get_default_cardinal_values()[0]
stub
379,841
def update(context, id, export_control, active): component_info = component.get(context, id=id) etag = component_info.json()[][] result = component.update(context, id=id, etag=etag, export_control=export_control, state=utils.active_string(active)) utils.format_output(result, context.format)
update(context, id, export_control, active) Update a component >>> dcictl component-update [OPTIONS] :param string id: ID of the component [required] :param boolean export-control: Set the component visible for users :param boolean active: Set the component in the active state
379,842
def all_state_variables_read(self): if self._all_state_variables_read is None: self._all_state_variables_read = self._explore_functions( lambda x: x.state_variables_read) return self._all_state_variables_read
recursive version of variables_read
379,843
def sendConnect(self, data): if self.backend == : import zmq self.context = zmq.Context() self.socket = self.context.socket(zmq.DEALER) if sys.version_info < (3,): self.socket.setsockopt_string(zmq.IDENTITY, unicode()) else: self.socket.setsockopt_string(zmq.IDENTITY, ) self.socket.connect( "tcp://{hostname}:{port}".format( port=self.brokerPort, hostname = self.hostname ) ) self.socket.send_multipart([b"CONNECT", pickle.dumps(data, pickle.HIGHEST_PROTOCOL)]) else: pass
Send a CONNECT command to the broker :param data: List of other broker main socket URL
379,844
def update_peer(self, current_name, new_name, new_url, username, password, peer_type="REPLICATION"): if self._get_resource_root().version < 11: peer_type = None peer = ApiCmPeer(self._get_resource_root(), name=new_name, url=new_url, username=username, password=password, type=peer_type) return self._put("peers/" + current_name, ApiCmPeer, data=peer, api_version=3)
Update a replication peer. @param current_name: The name of the peer to updated. @param new_name: The new name for the peer. @param new_url: The new url for the peer. @param username: The admin username to use to setup the remote side of the peer connection. @param password: The password of the admin user. @param peer_type: Added in v11. The type of the peer. Defaults to 'REPLICATION'. @return: The updated peer. @since: API v3
379,845
def bake_content(request): ident_hash = request.matchdict[] try: id, version = split_ident_hash(ident_hash) except IdentHashError: raise httpexceptions.HTTPNotFound() if not version: raise httpexceptions.HTTPBadRequest() with db_connect() as db_conn: with db_conn.cursor() as cursor: cursor.execute(, (ident_hash,)) try: is_binder, stateid, module_ident = cursor.fetchone() except TypeError: raise httpexceptions.HTTPNotFound() if not is_binder: raise httpexceptions.HTTPBadRequest( .format(ident_hash)) if stateid == 5: cursor.execute(, (module_ident, ident_hash)) else: cursor.execute(, (ident_hash,))
Invoke the baking process - trigger post-publication
379,846
def add_tcp_callback(port, callback, threaded_callback=False): _rpio.add_tcp_callback(port, callback, threaded_callback)
Adds a unix socket server callback, which will be invoked when values arrive from a connected socket client. The callback must accept two parameters, eg. ``def callback(socket, msg)``.
379,847
def put(self, locator = None, component = None): if component == None: raise Exception("Component cannot be null") self._lock.acquire() try: self._references.append(Reference(locator, component)) finally: self._lock.release()
Puts a new reference into this reference map. :param locator: a component reference to be added. :param component: a locator to find the reference by.
379,848
def shift_and_scale(matrix, shift, scale): zeroed = matrix - matrix.min() scaled = (scale - shift) * (zeroed / zeroed.max()) return scaled + shift
Shift and scale matrix so its minimum value is placed at `shift` and its maximum value is scaled to `scale`
379,849
def strip_rate(self, idx): val, = struct.unpack_from(, self._rtap, idx) rate_unit = float(1) / 2 return idx + 1, rate_unit * val
strip(1 byte) radiotap.datarate note that, unit of this field is originally 0.5 Mbps :idx: int :return: int idx :return: double rate in terms of Mbps
379,850
def nullspace(A, atol=1e-13, rtol=0): A = np.atleast_2d(A) u, s, vh = np.linalg.svd(A) tol = max(atol, rtol * s[0]) nnz = (s >= tol).sum() ns = vh[nnz:].conj().T return ns
Compute an approximate basis for the nullspace of A. The algorithm used by this function is based on the singular value decomposition of `A`. Parameters ---------- A : numpy.ndarray A should be at most 2-D. A 1-D array with length k will be treated as a 2-D with shape (1, k) atol : float The absolute tolerance for a zero singular value. Singular values smaller than `atol` are considered to be zero. rtol : float The relative tolerance. Singular values less than rtol*smax are considered to be zero, where smax is the largest singular value. If both `atol` and `rtol` are positive, the combined tolerance is the maximum of the two; that is:: tol = max(atol, rtol * smax) Singular values smaller than `tol` are considered to be zero. Returns ------- numpy.ndarray If `A` is an array with shape (m, k), then `ns` will be an array with shape (k, n), where n is the estimated dimension of the nullspace of `A`. The columns of `ns` are a basis for the nullspace; each element in numpy.dot(A, ns) will be approximately zero. Notes ----- Taken from the numpy cookbook.
379,851
def get_wulff_shape(self, material_id): from pymatgen.symmetry.analyzer import SpacegroupAnalyzer from pymatgen.analysis.wulff import WulffShape, hkl_tuple_to_str structure = self.get_structure_by_material_id(material_id) surfaces = self.get_surface_data(material_id)["surfaces"] lattice = (SpacegroupAnalyzer(structure) .get_conventional_standard_structure().lattice) miller_energy_map = {} for surf in surfaces: miller = tuple(surf["miller_index"]) if (miller not in miller_energy_map) or surf["is_reconstructed"]: miller_energy_map[miller] = surf["surface_energy"] millers, energies = zip(*miller_energy_map.items()) return WulffShape(lattice, millers, energies)
Constructs a Wulff shape for a material. Args: material_id (str): Materials Project material_id, e.g. 'mp-123'. Returns: pymatgen.analysis.wulff.WulffShape
379,852
def mimetype_icon(path, fallback=None): mime = mimetypes.guess_type(path)[0] if mime: icon = mime.replace(, ) if QtGui.QIcon.hasThemeIcon(icon): icon = QtGui.QIcon.fromTheme(icon) if not icon.isNull(): return icon if fallback: return QtGui.QIcon(fallback) return QtGui.QIcon.fromTheme()
Tries to create an icon from theme using the file mimetype. E.g.:: return self.mimetype_icon( path, fallback=':/icons/text-x-python.png') :param path: file path for which the icon must be created :param fallback: fallback icon path (qrc or file system) :returns: QIcon or None if the file mimetype icon could not be found.
379,853
def unique_filename(**kwargs): if not in kwargs: path = temp_dir() kwargs[] = path else: path = temp_dir(kwargs[]) kwargs[] = path if not os.path.exists(kwargs[]): os.umask(umask) handle, filename = mkstemp(**kwargs) os.close(handle) try: os.remove(filename) except OSError: pass return filename
Create new filename guaranteed not to exist previously Use mkstemp to create the file, then remove it and return the name If dir is specified, the tempfile will be created in the path specified otherwise the file will be created in a directory following this scheme: :file:'/tmp/inasafe/<dd-mm-yyyy>/<user>/impacts' See http://docs.python.org/library/tempfile.html for details. Example usage: tempdir = temp_dir(sub_dir='test') filename = unique_filename(suffix='.foo', dir=tempdir) print filename /tmp/inasafe/23-08-2012/timlinux/test/tmpyeO5VR.foo Or with no preferred subdir, a default subdir of 'impacts' is used: filename = unique_filename(suffix='.shp') print filename /tmp/inasafe/23-08-2012/timlinux/impacts/tmpoOAmOi.shp
379,854
def naiveWordAlignment(tg, utteranceTierName, wordTierName, isleDict, phoneHelperTierName=None, removeOverlappingSegments=False): utterance utteranceTier = tg.tierDict[utteranceTierName] wordTier = None if wordTierName in tg.tierNameList: wordTier = tg.tierDict[wordTierName] wordEntryList = [] phoneEntryList = [] if wordTier is not None: if removeOverlappingSegments: for startT, stopT, _ in utteranceTier.entryList: wordTier = wordTier.eraseRegion(startT, stopT, , False) wordEntryList = wordTier.entryList for startT, stopT, label in utteranceTier.entryList: wordList = label.split() superPhoneList = [] numPhones = 0 i = 0 while i < len(wordList): word = wordList[i] try: firstSyllableList = isleDict.lookup(word)[0][0][0] except isletool.WordNotInISLE: wordList.pop(i) continue phoneList = [phone for syllable in firstSyllableList for phone in syllable] superPhoneList.append(phoneList) numPhones += len(phoneList) i += 1 subWordEntryList = [] subPhoneEntryList = [] if wordTier is not None: subWordEntryList = wordTier.crop(startT, stopT, "truncated", False).entryList if len(subWordEntryList) == 0: wordStartT = startT phoneDur = (stopT - startT) / float(numPhones) for i, word in enumerate(wordList): phoneListTxt = " ".join(superPhoneList[i]) wordStartT = wordStartT wordEndT = wordStartT + (phoneDur * len(superPhoneList[i])) subWordEntryList.append((wordStartT, wordEndT, word)) subPhoneEntryList.append((wordStartT, wordEndT, phoneListTxt)) wordStartT = wordEndT wordEntryList.extend(subWordEntryList) phoneEntryList.extend(subPhoneEntryList) newWordTier = tgio.IntervalTier(wordTierName, wordEntryList, tg.minTimestamp, tg.maxTimestamp) if wordTier is not None: tg.replaceTier(wordTierName, newWordTier) else: tg.addTier(newWordTier) if phoneHelperTierName is not None and len(phoneEntryList) > 0: newPhoneTier = tgio.IntervalTier(phoneHelperTierName, phoneEntryList, tg.minTimestamp, tg.minTimestamp) if phoneHelperTierName in tg.tierNameList: tg.replaceTier(phoneHelperTierName, newPhoneTier) else: tg.addTier(newPhoneTier) return tg
Performs naive alignment for utterances in a textgrid Naive alignment gives each segment equal duration. Word duration is determined by the duration of an utterance and the number of phones in the word. By 'utterance' I mean a string of words separated by a space bounded in time eg (0.5, 1.5, "he said he likes ketchup"). phoneHelperTierName - creates a tier that is parallel to the word tier. However, the labels are the phones for the word, rather than the word removeOverlappingSegments - remove any labeled words or phones that fall under labeled utterances
379,855
def configure_logging(logger_name, filename=None): if filename is None: if logger_name is None: probing_paths = [path.join(, ), path.join(, )] else: probing_paths = [ path.join(, logger_name + ), path.join(, logger_name + ), path.join(, ), path.join(, )] for relative_path in probing_paths: configuration_file = path.join(app_root, relative_path) if path.exists(configuration_file): filename = configuration_file break elif not path.isabs(filename): found = False for conf in , : configuration_file = path.join(app_root, conf, filename) if path.exists(configuration_file): filename = configuration_file found = True break if not found: raise ValueError(.format(filename)) elif not path.exists(filename): raise ValueError(.format(filename)) if filename is not None: global _current_logging_configuration_file filename = path.realpath(filename) if filename != _current_logging_configuration_file: working_directory = getcwd() chdir(app_root) try: fileConfig(filename, {: splunk_home}) finally: chdir(working_directory) _current_logging_configuration_file = filename if len(root.handlers) == 0: root.addHandler(StreamHandler()) return None if logger_name is None else getLogger(logger_name), filename
Configure logging and return the named logger and the location of the logging configuration file loaded. This function expects a Splunk app directory structure:: <app-root> bin ... default ... local ... This function looks for a logging configuration file at each of these locations, loading the first, if any, logging configuration file that it finds:: local/{name}.logging.conf default/{name}.logging.conf local/logging.conf default/logging.conf The current working directory is set to *<app-root>* before the logging configuration file is loaded. Hence, paths in the logging configuration file are relative to *<app-root>*. The current directory is reset before return. You may short circuit the search for a logging configuration file by providing an alternative file location in `path`. Logging configuration files must be in `ConfigParser format`_. #Arguments: :param logger_name: Logger name :type logger_name: bytes, unicode :param filename: Location of an alternative logging configuration file or `None`. :type filename: bytes, unicode or NoneType :returns: The named logger and the location of the logging configuration file loaded. :rtype: tuple .. _ConfigParser format: https://docs.python.org/2/library/logging.config.html#configuration-file-format
379,856
def reporter(self, analysistype=): logging.info(.format(analysistype)) genusgenes = dict() targetpath = str() for sample in self.runmetadata.samples: if sample.general.bestassemblyfile != : targetpath = sample[analysistype].targetpath for organismfile in glob(os.path.join(targetpath, )): organism = os.path.splitext(os.path.basename(organismfile))[0] for record in SeqIO.parse(open(organismfile), ): try: genusgenes[organism].add(record.id.split()[0]) except (KeyError, IndexError): genusgenes[organism] = set() genusgenes[organism].add(record.id.split()[0]) for sample in self.runmetadata.samples: sample[analysistype].targetgenera = list() if sample.general.bestassemblyfile != : for organism in genusgenes: for gene in sample[analysistype].results: if gene.split()[0] in genusgenes[organism]: if organism not in sample[analysistype].targetgenera: sample[analysistype].targetgenera.append(organism) make_path(self.reportpath) header = .format(.join(self.genelist)) data = str() with open(os.path.join(self.reportpath, analysistype + ), ) as report: for sample in self.runmetadata.samples: sample[analysistype].report_output = list() if sample.general.bestassemblyfile != : data += .format(sample.name, .join(sample[analysistype].targetgenera)) best_dict = dict() if sample[analysistype].results: gene_check = list() for target, pid in sample[analysistype].results.items(): gene_name = target.split()[0] for gene in self.genelist: if gene == gene_name: try: if float(pid) > best_dict[gene]: best_dict[gene] = float(pid) except KeyError: best_dict[gene] = float(pid) for gene in self.genelist: try: best_dict[gene] except KeyError: data += for name, identity in sample[analysistype].results.items(): if name.split()[0] == gene and gene not in gene_check: data += .format(pid=best_dict[gene]) try: if not sample.general.trimmedcorrectedfastqfiles[0].endswith(): data += \ .format(avgd=sample[analysistype].avgdepth[name], std=sample[analysistype].standarddev[name]) else: data += except IndexError: data += gene_check.append(gene) sample[analysistype].report_output.append(gene) data += else: data += report.write(header) report.write(data)
Creates a report of the genesippr results :param analysistype: The variable to use when accessing attributes in the metadata object
379,857
def parse_pv(header): order_fit = parse_order_fit(header) def parse_with_base(i): key_base = "PV%d_" % i pvi_x = [header[key_base + "0"]] def parse_range(lower, upper): for j in range(lower, upper + 1): pvi_x.append(header[key_base + str(j)]) if order_fit >= 1: parse_range(1, 3) if order_fit >= 2: parse_range(4, 6) if order_fit >= 3: parse_range(7, 10) return pvi_x return [parse_with_base(1), parse_with_base(2)]
Parses the PV array from an astropy FITS header. Args: header: astropy.io.fits.header.Header The header containing the PV values. Returns: cd: 2d array (list(list(float)) [[PV1_0, PV1_1, ... PV1_N], [PV2_0, PV2_1, ... PV2_N]] Note that N depends on the order of the fit. For example, an order 3 fit goes up to PV?_10.
379,858
def download(url): session = requests.Session() session.mount(, FileAdapter()) try: res = session.get(url) except requests.exceptions.ConnectionError as e: raise e res.raise_for_status() return res
Uses requests to download an URL, maybe from a file
379,859
def get_nets_radb(self, response, is_http=False): nets = [] if is_http: regex = r else: regex = r for match in re.finditer( regex, response, re.MULTILINE ): try: net = copy.deepcopy(BASE_NET) net[] = match.group(1).strip() net[] = match.start() net[] = match.end() nets.append(net) except ValueError: pass return nets
The function for parsing network blocks from ASN origin data. Args: response (:obj:`str`): The response from the RADB whois/http server. is_http (:obj:`bool`): If the query is RADB HTTP instead of whois, set to True. Defaults to False. Returns: list: A list of network block dictionaries :: [{ 'cidr' (str) - The assigned CIDR 'start' (int) - The index for the start of the parsed network block 'end' (int) - The index for the end of the parsed network block }]
379,860
def _ReadTab(Year): dtype_in = [(,),(,),(,),(,), (,),(,),(,), (,),(,),(,), (,),(,),(,),(,),(,), (,),(,),(,),(,),(,), (,),(,),(,)] fname = Globals.DataPath+.format(Year) data = pf.ReadASCIIData(fname,Header=False,dtype=dtype_in) dtype_out = [(,),(,),(,),(,),(,),(,), (,),(,),(,),(,),(,),(,), (,),(,),(,),(,),(,), (,),(,),(,),(,),(,), (,),(,),(,),(,),(,),(,)] out = np.recarray(data.size,dtype=dtype_out) names = data.dtype.names for n in names: if n in out.dtype.names: out[n] = data[n] out.G1 = 0.0 out.G2 = 0.0 out.Kp = 0.0 out.ut = out.Hr + out.Mn/60.0 for i in range(0,out.size): out.Date[i] = TT.DayNotoDate(out.Year[i],out.DayNo[i]) return out
Reads OMNI data tab with Tsyganenko parameters. Input: Year: Integer year to read
379,861
def set_firewall_settings(profile, inbound=None, outbound=None, store=): if profile.lower() not in (, , ): raise ValueError(.format(profile)) if inbound and inbound.lower() not in (, , , ): raise ValueError(.format(inbound)) if outbound and outbound.lower() not in (, , ): raise ValueError(.format(outbound)) if not inbound and not outbound: raise ValueError() return True
Set the firewall inbound/outbound settings for the specified profile and store Args: profile (str): The firewall profile to configure. Valid options are: - domain - public - private inbound (str): The inbound setting. If ``None`` is passed, the setting will remain unchanged. Valid values are: - blockinbound - blockinboundalways - allowinbound - notconfigured Default is ``None`` outbound (str): The outbound setting. If ``None`` is passed, the setting will remain unchanged. Valid values are: - allowoutbound - blockoutbound - notconfigured Default is ``None`` store (str): The store to use. This is either the local firewall policy or the policy defined by local group policy. Valid options are: - lgpo - local Default is ``local`` Returns: bool: ``True`` if successful Raises: CommandExecutionError: If an error occurs ValueError: If the parameters are incorrect
379,862
def init_app(self, app): super(InvenioIIIFAPI, self).init_app(app) api = Api(app=app) self.iiif_ext.init_restful(api, prefix=app.config[])
Flask application initialization.
379,863
def trace_memory_start(self): self.trace_memory_clean_caches() objgraph.show_growth(limit=30) gc.collect() self._memory_start = self.worker.get_memory()["total"]
Starts measuring memory consumption
379,864
def on_fork(): reset_logging_framework() fixup_prngs() mitogen.core.Latch._on_fork() mitogen.core.Side._on_fork() mitogen.core.ExternalContext.service_stub_lock = threading.Lock() mitogen__service = sys.modules.get() if mitogen__service: mitogen__service._pool_lock = threading.Lock()
Should be called by any program integrating Mitogen each time the process is forked, in the context of the new child.
379,865
def _JzStaeckelIntegrandSquared(v,E,Lz,I3V,delta,u0,cosh2u0,sinh2u0, potu0pi2,pot): sin2v= nu.sin(v)**2. dV= cosh2u0*potu0pi2\ -(sinh2u0+sin2v)*potentialStaeckel(u0,v,pot,delta) return E*sin2v+I3V+dV-Lz**2./2./delta**2./sin2v
The J_z integrand: p_v(v)/2/delta^2
379,866
def swipe_right(self, steps=10, *args, **selectors): self.device(**selectors).swipe.right(steps=steps)
Swipe the UI object with *selectors* from center to right See `Swipe Left` for more details.
379,867
def add_table(self, table, row=None, col=0, row_spaces=1): name = table.name assert name is not None, "Tables must have a name" assert name not in self.__tables, "Table %s already exists in this worksheet" % name if row is None: row = self.__next_row self.__next_row = max(row + table.height + row_spaces, self.__next_row) self.__tables[name] = (table, (row, col)) return row, col
Adds a table to the worksheet at (row, col). Return the (row, col) where the table has been put. :param xltable.Table table: Table to add to the worksheet. :param int row: Row to start the table at (defaults to the next free row). :param int col: Column to start the table at. :param int row_spaces: Number of rows to leave between this table and the next.
379,868
def query(self): logger.debug("Grafana query... %s", cherrypy.request.method) if cherrypy.request.method == : cherrypy.response.headers[] = cherrypy.response.headers[] = cherrypy.response.headers[] = cherrypy.request.handler = None return {} if getattr(cherrypy.request, , None): posted_data = cherrypy.request.json logger.debug("Posted data: %s", cherrypy.request.json) targets = None target = None try: targets = posted_data.get("targets") assert targets assert len(targets) == 1 target = targets[0].get("target") except Exception as exp: cherrypy.response.status = 409 return {: u, : u % exp} resp = [] if target in []: resp = [{ "type": "table", "columns": [ { "text": "Time", "type": "time", "sort": True, "desc": True }, { "text": "Severity", "type": "integer" }, { "text": "Message", "type": "string" } ], "rows": [] }] severity = { "info": 0, : 1, : 2, : 3 } for log in reversed(self.app.recent_events): resp[0][].append([log[] * 1000, severity.get(log[].lower(), 3), log[]]) if target in []: resp = [{ "type": "table", "columns": [ { "text": "Raised", "type": "time", "sort": True, "desc": True }, { "text": "Severity", "type": "integer" }, { "text": "Host", "type": "string" }, { "text": "Service", "type": "string" }, { "text": "State", "type": "integer" }, { "text": "Output", "type": "string" } ], "rows": [] }] severity = { "up": 0, : 2, : 0, : 1, : 2 } problems = {} for scheduler_link in self.app.conf.schedulers: sched_res = scheduler_link.con.get(, wait=True) if in sched_res: problems.update(sched_res[]) for problem_uuid in problems: log = problems[problem_uuid] resp[0][].append([log[] * 1000, severity.get(log[].lower(), 3), log[], log[], log[], log[]]) return resp
Request object passed to datasource.query function: { 'timezone': 'browser', 'panelId': 38, 'range': { 'from': '2018-08-29T02:38:09.633Z', 'to': '2018-08-29T03:38:09.633Z', 'raw': {'from': 'now-1h', 'to': 'now'} }, 'rangeRaw': {'from': 'now-1h', 'to': 'now'}, 'interval': '10s', 'intervalMs': 10000, 'targets': [ { 'target': 'problems', 'refId': 'A', 'type': 'table'} ], 'format': 'json', 'maxDataPoints': 314, 'scopedVars': { '__interval': {'text': '10s', 'value': '10s'}, '__interval_ms': {'text': 10000, 'value': 10000} } } Only the first target is considered. If several targets are required, an error is raised. The target is a string that is searched in the target_queries dictionary. If found the corresponding query is executed and the result is returned. Table response from datasource.query. An array of: [ { "type": "table", "columns": [ { "text": "Time", "type": "time", "sort": true, "desc": true, }, { "text": "mean", }, { "text": "sum", } ], "rows": [ [ 1457425380000, null, null ], [ 1457425370000, 1002.76215352, 1002.76215352 ], ] } ] :return: See upper comment :rtype: list
379,869
def start(self, host, nornir): self.host = host self.nornir = nornir try: logger.debug("Host %r: running task %r", self.host.name, self.name) r = self.task(self, **self.params) if not isinstance(r, Result): r = Result(host=host, result=r) except NornirSubTaskError as e: tb = traceback.format_exc() logger.error( "Host %r: task %r failed with traceback:\n%s", self.host.name, self.name, tb, ) r = Result(host, exception=e, result=str(e), failed=True) except Exception as e: tb = traceback.format_exc() logger.error( "Host %r: task %r failed with traceback:\n%s", self.host.name, self.name, tb, ) r = Result(host, exception=e, result=tb, failed=True) r.name = self.name r.severity_level = logging.ERROR if r.failed else self.severity_level self.results.insert(0, r) return self.results
Run the task for the given host. Arguments: host (:obj:`nornir.core.inventory.Host`): Host we are operating with. Populated right before calling the ``task`` nornir(:obj:`nornir.core.Nornir`): Populated right before calling the ``task`` Returns: host (:obj:`nornir.core.task.MultiResult`): Results of the task and its subtasks
379,870
def _format_arg_list(args, variadic=False): def sugar(s): s = s.replace("{", "{{").replace("}", "}}") if len(s) > 50: return s[:20] + " ... " + s[-20:] else: return s def arg_to_str(arg): if isinstance(arg, str): return sugar(repr(arg)) elif arg is Parameter.empty: return else: return sugar(str(arg)) if not args: if variadic: return "(\u2026)" else: return "()" return "(" + ", ".join(map(arg_to_str, args)) + ")"
Format a list of arguments for pretty printing. :param a: list of arguments. :type a: list :param v: tell if the function accepts variadic arguments :type v: bool
379,871
def info(self): url = "{}/v7/finance/quote?symbols={}".format( self._base_url, self.ticker) r = _requests.get(url=url).json()["quoteResponse"]["result"] if len(r) > 0: return r[0] return {}
retreive metadata and currenct price data
379,872
def fonts(self): for width in (w for w in FontWidth if w in self): for slant in (s for s in FontSlant if s in self[width]): for weight in (w for w in FontWeight if w in self[width][slant]): yield self[width][slant][weight]
Generator yielding all fonts of this typeface Yields: Font: the next font in this typeface
379,873
def parts_to_url(parts=None, scheme=None, netloc=None, path=None, query=None, fragment=None): if isinstance(parts, _urllib_parse.SplitResult): scheme, netloc, path, query, fragment = parts elif parts and isinstance(parts, dict): scheme = parts.get(, ) netloc = parts.get(, ) path = parts.get(, []) query = parts.get(, {}) fragment = parts.get(, ) if isinstance(path, (list, tuple)): path = + .join(path).strip() if isinstance(query, (dict, tuple)): query = _unquote(_urlencode(query, doseq=True)) return _urlunsplit((scheme, netloc, path, query, fragment)) or None
Build url urlunsplit style, but optionally handle path as a list and/or query as a dict
379,874
def _processDocstring(self, node, tail=, **kwargs): typeName = type(node).__name__ curLineNum = startLineNum = 0 if typeName != : startLineNum = curLineNum = node.lineno - 1 line = while curLineNum < len(self.lines): line = self.lines[curLineNum] match = AstWalker.__docstrMarkerRE.match(line) if match: break curLineNum += 1 docstringStart = curLineNum if not AstWalker.__docstrOneLineRE.match(line): curLineNum += 1 while curLineNum < len(self.lines): line = self.lines[curLineNum] if line.find(match.group(2)) >= 0: break curLineNum += 1 endLineNum = curLineNum + 1 for firstVarLineNum, firstVarLine in enumerate(self.docLines): if in firstVarLine: break lastVarLineNum = len(self.docLines) if lastVarLineNum > 0 and in firstVarLine: while lastVarLineNum > firstVarLineNum: lastVarLineNum -= 1 if in self.docLines[lastVarLineNum]: break lastVarLineNum += 1 if firstVarLineNum < len(self.docLines): indentLineNum = endLineNum indentStr = while not indentStr and indentLineNum < len(self.lines): match = AstWalker.__indentRE.match(self.lines[indentLineNum]) indentStr = match and match.group(1) or indentLineNum += 1 varLines = [.format(linesep, docLine).replace( linesep, linesep + indentStr) for docLine in self.docLines[ firstVarLineNum: lastVarLineNum]] defLines.extend(varLines) self.docLines[firstVarLineNum: lastVarLineNum] = [] namespaceLoc = defLines[-1].find() if namespaceLoc >= 0: self.docLines[-1] += defLines[-1][namespaceLoc:] defLines[-1] = defLines[-1][:namespaceLoc] if typeName != : self.lines[startLineNum: endLineNum] = self.docLines + defLines else: self.lines[startLineNum: endLineNum] = defLines + self.docLines
Handles a docstring for functions, classes, and modules. Basically just figures out the bounds of the docstring and sends it off to the parser to do the actual work.
379,875
def backup_key(self, name, mount_point=DEFAULT_MOUNT_POINT): api_path = .format( mount_point=mount_point, name=name, ) response = self._adapter.get( url=api_path, ) return response.json()
Return a plaintext backup of a named key. The backup contains all the configuration data and keys of all the versions along with the HMAC key. The response from this endpoint can be used with the /restore endpoint to restore the key. Supported methods: GET: /{mount_point}/backup/{name}. Produces: 200 application/json :param name: Name of the key. :type name: str | unicode :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The JSON response of the request. :rtype: requests.Response
379,876
def _set_overlay_policy_map(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("pmap_name",overlay_policy_map.overlay_policy_map, yang_name="overlay-policy-map", rest_name="overlay-policy-map", parent=self, is_container=, user_ordered=False, path_helper=self._path_helper, yang_keys=, extensions={u: {u: u, u: None, u: None, u: u, u: None, u: None, u: u, u: u}}), is_container=, yang_name="overlay-policy-map", rest_name="overlay-policy-map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None, u: None, u: u, u: None, u: None, u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "list", : , }) self.__overlay_policy_map = t if hasattr(self, ): self._set()
Setter method for overlay_policy_map, mapped from YANG variable /overlay_policy_map (list) If this variable is read-only (config: false) in the source YANG file, then _set_overlay_policy_map is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_overlay_policy_map() directly. YANG Description: Define a policy-map[Actions on the classified packet].
379,877
def remove_listener(self, callback): listeners = filter(lambda x: x[] == callback, self.listeners) for l in listeners: self.listeners.remove(l)
Remove a listener.
379,878
def _operation_status_message(self): msg = None action = None if not google_v2_operations.is_done(self._op): last_event = google_v2_operations.get_last_event(self._op) if last_event: msg = last_event[] action_id = last_event.get(, {}).get() if action_id: action = google_v2_operations.get_action_by_id(self._op, action_id) else: msg = else: failed_events = google_v2_operations.get_failed_events(self._op) if failed_events: failed_event = failed_events[-1] msg = failed_event.get(, {}).get() action_id = failed_event.get(, {}).get() if action_id: action = google_v2_operations.get_action_by_id(self._op, action_id) if not msg: error = google_v2_operations.get_error(self._op) if error: msg = error[] else: msg = return msg, action
Returns the most relevant status string and failed action. This string is meant for display only. Returns: A printable status string and name of failed action (if any).
379,879
def get_scoped_package_version_metadata_from_recycle_bin(self, feed_id, package_scope, unscoped_package_name, package_version): route_values = {} if feed_id is not None: route_values[] = self._serialize.url(, feed_id, ) if package_scope is not None: route_values[] = self._serialize.url(, package_scope, ) if unscoped_package_name is not None: route_values[] = self._serialize.url(, unscoped_package_name, ) if package_version is not None: route_values[] = self._serialize.url(, package_version, ) response = self._send(http_method=, location_id=, version=, route_values=route_values) return self._deserialize(, response)
GetScopedPackageVersionMetadataFromRecycleBin. [Preview API] Get information about a scoped package version in the recycle bin. :param str feed_id: Name or ID of the feed. :param str package_scope: Scope of the package (the 'scope' part of @scope/name) :param str unscoped_package_name: Name of the package (the 'name' part of @scope/name). :param str package_version: Version of the package. :rtype: :class:`<NpmPackageVersionDeletionState> <azure.devops.v5_0.npm.models.NpmPackageVersionDeletionState>`
379,880
def removeUserGroups(self, users=None): admin = None userCommunity = None portal = None groupAdmin = None user = None userCommData = None group = None try: admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler) if users is None: print ("You have selected to remove all users groups, you must modify the code to do this") usersObj = [] commUsers = admin.portals.portalSelf.users(start=1, num=100) usersObj = commUsers[] return else: usersObj = [] userStr = users.split() for user in userStr: try: user = admin.community.users.user(str(user).strip()) usersObj.append(user) except: print ("%s does not exist" % str(user).strip()) if usersObj: for userCommData in usersObj: print ("Loading groups for user: %s" % userCommData.username) if userCommData.groups: for group in userCommData.groups: groupObj = admin.community.groups.group(groupId=group[]) if groupObj.owner == userCommData.username: print (groupObj.delete()) else: print ("No Groups Found") except: line, filename, synerror = trace() raise common.ArcRestHelperError({ "function": "removeUserGroups", "line": line, "filename": filename, "synerror": synerror, } ) finally: admin = None userCommunity = None portal = None groupAdmin = None user = None userCommData = None group = None del admin del userCommunity del portal del groupAdmin del user del userCommData del group gc.collect()
Removes users' groups. Args: users (str): A comma delimited list of user names. Defaults to ``None``. Warning: When ``users`` is not provided (``None``), all users in the organization will have their groups deleted!
379,881
def initialize(self, timeouts): if self.bind is True: self.socket.bind(self.address) else: self.socket.connect(self.address) self._set_timeouts(timeouts)
Bind or connect the nanomsg socket to some address
379,882
def fromjson(source, *args, **kwargs): source = read_source_from_arg(source) return JsonView(source, *args, **kwargs)
Extract data from a JSON file. The file must contain a JSON array as the top level object, and each member of the array will be treated as a row of data. E.g.:: >>> import petl as etl >>> data = ''' ... [{"foo": "a", "bar": 1}, ... {"foo": "b", "bar": 2}, ... {"foo": "c", "bar": 2}] ... ''' >>> with open('example.json', 'w') as f: ... f.write(data) ... 74 >>> table1 = etl.fromjson('example.json', header=['foo', 'bar']) >>> table1 +-----+-----+ | foo | bar | +=====+=====+ | 'a' | 1 | +-----+-----+ | 'b' | 2 | +-----+-----+ | 'c' | 2 | +-----+-----+ If your JSON file does not fit this structure, you will need to parse it via :func:`json.load` and select the array to treat as the data, see also :func:`petl.io.json.fromdicts`. .. versionchanged:: 1.1.0 If no `header` is specified, fields will be discovered by sampling keys from the first `sample` objects in `source`. The header will be constructed from keys in the order discovered. Note that this ordering may not be stable, and therefore it may be advisable to specify an explicit `header` or to use another function like :func:`petl.transform.headers.sortheader` on the resulting table to guarantee stability.
379,883
def _convert_to_indexer(self, obj, axis=None, is_setter=False, raise_missing=False): if axis is None: axis = self.axis or 0 labels = self.obj._get_axis(axis) if isinstance(obj, slice): return self._convert_slice_indexer(obj, axis) try: obj = self._convert_scalar_indexer(obj, axis) except TypeError: if is_setter: pass is_int_index = labels.is_integer() is_int_positional = is_integer(obj) and not is_int_index try: return labels.get_loc(obj) except LookupError: if isinstance(obj, tuple) and isinstance(labels, MultiIndex): if is_setter and len(obj) == labels.nlevels: return {: obj} raise except TypeError: pass except (ValueError): if not is_int_positional: raise if is_int_positional: if is_setter: if self.name == : return {: obj} if (obj >= self.obj.shape[axis] and not isinstance(labels, MultiIndex)): raise ValueError("cannot set by positional indexing with " "enlargement") return obj if is_nested_tuple(obj, labels): return labels.get_locs(obj) elif is_list_like_indexer(obj): if com.is_bool_indexer(obj): obj = check_bool_indexer(labels, obj) inds, = obj.nonzero() return inds else: kwargs = {: True if is_setter else raise_missing} return self._get_listlike_indexer(obj, axis, **kwargs)[1] else: try: return labels.get_loc(obj) except LookupError: if not is_list_like_indexer(obj) and is_setter: return {: obj} raise
Convert indexing key into something we can use to do actual fancy indexing on an ndarray Examples ix[:5] -> slice(0, 5) ix[[1,2,3]] -> [1,2,3] ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz) Going by Zen of Python? 'In the face of ambiguity, refuse the temptation to guess.' raise AmbiguousIndexError with integer labels? - No, prefer label-based indexing
379,884
def load_dynamic_config(config_file=DEFAULT_DYNAMIC_CONFIG_FILE): dynamic_configurations = {} sys.path.insert(0, path.dirname(path.abspath(config_file))) try: config_module = __import__() dynamic_configurations = config_module.CONFIG except ImportError: LOG.error() return dynamic_configurations
Load and parse dynamic config
379,885
def pull_commits(self, pr_number): payload = { : PER_PAGE, } commit_url = urijoin("pulls", str(pr_number), "commits") return self.fetch_items(commit_url, payload)
Get pull request commits
379,886
def get_free_diskbytes(dir_): r if WIN32: import ctypes free_bytes = ctypes.c_ulonglong(0) outvar = ctypes.pointer(free_bytes) dir_ptr = ctypes.c_wchar_p(dir_) ctypes.windll.kernel32.GetDiskFreeSpaceExW(dir_ptr, None, None, outvar) bytes_ = free_bytes.value return bytes_ else: st = os.statvfs(dir_) bytes_ = st.f_bavail * st.f_frsize return bytes_
r""" Args: dir_ (str): Returns: int: bytes_ folder/drive free space (in bytes) References:: http://stackoverflow.com/questions/51658/cross-platform-space-remaining-on-volume-using-python http://linux.die.net/man/2/statvfs CommandLine: python -m utool.util_cplat --exec-get_free_diskbytes python -m utool.util_cplat --exec-get_free_diskbytes --dir /media/raid python -m utool.util_cplat --exec-get_free_diskbytes --dir E: Example: >>> # ENABLE_DOCTEST >>> from utool.util_cplat import * # NOQA >>> import utool as ut >>> dir_ = ut.get_argval('--dir', type_=str, default=ut.truepath('~')) >>> bytes_ = get_free_diskbytes(dir_) >>> result = ('bytes_ = %s' % (str(bytes_),)) >>> print(result) >>> print('Unused space in %r = %r' % (dir_, ut.byte_str2(bytes_))) >>> print('Total space in %r = %r' % (dir_, ut.byte_str2(get_total_diskbytes(dir_))))
379,887
def paginate_queryset(self, queryset, page_size): paginator = self.get_paginator( queryset, page_size, orphans = self.get_paginate_orphans(), allow_empty_first_page = self.get_allow_empty() ) page_kwarg = self.page_kwarg page_num = self.kwargs.get(page_kwarg) or self.request.GET.get(page_kwarg) or 1 try: page = paginator.page(page_num) except PageNotAnInteger: page = paginator.page(1) except EmptyPage: page = paginator.page(paginator.num_pages) return (paginator, page, page.object_list, page.has_other_pages())
Returns tuple containing paginator instance, page instance, object list, and whether there are other pages. :param queryset: the queryset instance to paginate. :param page_size: the number of instances per page. :rtype: tuple.
379,888
def get_daemon_stats(self, details=False): res = super(BaseSatellite, self).get_daemon_stats(details=details) counters = res[] counters[] = len(self.external_commands) counters[] = len(self.arbiters) counters[] = len(self.schedulers) return res
Increase the stats provided by the Daemon base class :return: stats dictionary :rtype: dict
379,889
def extend_instance(instance, *bases, **kwargs): last = kwargs.get(, False) bases = tuple(bases) for base in bases: assert inspect.isclass(base), "bases must be classes" assert not inspect.isclass(instance) base_cls = instance.__class__ base_cls_name = instance.__class__.__name__ new_bases = (base_cls,)+bases if last else bases+(base_cls,) new_cls = type(base_cls_name, tuple(new_bases), {}) setattr(instance, , new_cls)
Apply subclass (mixin) to a class object or its instance By default, the mixin is placed at the start of bases to ensure its called first as per MRO. If you wish to have it injected last, which is useful for monkeypatching, then you can specify 'last=True'. See here: http://stackoverflow.com/a/10018792/1267398 :attr cls: Target object :type cls: Class instance :attr bases: List of new bases to subclass with :attr last: Inject new bases after existing bases :type last: bool >>> class A(object): pass >>> class B(object): pass >>> a = A() >>> b = B() >>> isinstance(b, A) False >>> extend_instance(b, A) >>> isinstance(b, A) True
379,890
def copy_figure(self): if self.figviewer and self.figviewer.figcanvas.fig: self.figviewer.figcanvas.copy_figure()
Copy figure from figviewer to clipboard.
379,891
def publishing(self, service): to_update = False status = False self.log_update(service, to_update, status, count_new_data) if to_update and status: self.update_trigger(service)
the purpose of this tasks is to get the data from the cache then publish them :param service: service object where we will publish :type service: object
379,892
def galactic_to_equatorial(gl, gb): gal = SkyCoord(gl*u.degree, gl*u.degree, frame=) transformed = gal.transform_to() return transformed.ra.degree, transformed.dec.degree
This converts from galactic coords to equatorial coordinates. Parameters ---------- gl : float or array-like Galactic longitude values(s) in decimal degrees. gb : float or array-like Galactic latitude value(s) in decimal degrees. Returns ------- tuple of (float, float) or tuple of (np.array, np.array) The equatorial coordinates (RA, DEC) for each element of the input (`gl`, `gb`) in decimal degrees. These are reported in the ICRS frame.
379,893
def get_resources(self): collection = JSONClientValidated(, collection=, runtime=self._runtime) result = collection.find(self._view_filter()).sort(, DESCENDING) return objects.ResourceList(result, runtime=self._runtime, proxy=self._proxy)
Gets all ``Resources``. In plenary mode, the returned list contains all known resources or an error results. Otherwise, the returned list may contain only those resources that are accessible through this session. return: (osid.resource.ResourceList) - a list of ``Resources`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
379,894
def get_analyzable_segments(workflow, sci_segs, cat_files, out_dir, tags=None): if tags is None: tags = [] logging.info() make_analysis_dir(out_dir) sci_ok_seg_name = "SCIENCE_OK" sci_ok_seg_dict = segments.segmentlistdict() sci_ok_segs = {} cat_sets = parse_cat_ini_opt(workflow.cp.get_opt_tags(, , tags)) if len(cat_sets) > 1: raise ValueError( ) cat_set = cat_sets[0] for ifo in workflow.ifos: curr_segs = copy.copy(sci_segs[ifo]) files = cat_files.find_output_with_ifo(ifo) for category in cat_set: veto_def_cat = cat_to_veto_def_cat(category) file_list = files.find_output_with_tag( %(veto_def_cat)) if len(file_list) > 1: err_msg = "Found more than one veto file for %s " %(ifo,) err_msg += "and category %s." %(category,) raise ValueError(err_msg) if len(file_list) == 0: err_msg = "Found no veto files for %s " %(ifo,) err_msg += "and category %s." %(category,) raise ValueError(err_msg) curr_veto_file = file_list[0] cat_segs = curr_veto_file.return_union_seglist() curr_segs -= cat_segs curr_segs.coalesce() sci_ok_seg_dict[ifo + + sci_ok_seg_name] = curr_segs sci_ok_seg_file = SegFile.from_segment_list_dict(sci_ok_seg_name, sci_ok_seg_dict, extension=, valid_segment=workflow.analysis_time, directory=out_dir, tags=tags) if workflow.cp.has_option_tags("workflow-segments", "segments-minimum-segment-length", tags): min_seg_length = int( workflow.cp.get_opt_tags("workflow-segments", "segments-minimum-segment-length", tags) ) sci_ok_seg_file.remove_short_sci_segs(min_seg_length) for ifo in workflow.ifos: sci_ok_segs[ifo] = \ sci_ok_seg_file.segment_dict[ifo + + sci_ok_seg_name] logging.info() return sci_ok_seg_file, sci_ok_segs, sci_ok_seg_name
Get the analyzable segments after applying ini specified vetoes and any other restrictions on the science segs, e.g. a minimum segment length, or demanding that only coincident segments are analysed. Parameters ----------- workflow : Workflow object Instance of the workflow object sci_segs : Ifo-keyed dictionary of glue.segmentlists The science segments for each ifo to which the vetoes, or any other restriction, will be applied. cat_files : FileList of SegFiles The category veto files generated by get_veto_segs out_dir : path Location to store output files tags : list of strings Used to retrieve subsections of the ini file for configuration options. Returns -------- sci_ok_seg_file : workflow.core.SegFile instance The segment file combined from all ifos containing the analyzable science segments. sci_ok_segs : Ifo keyed dict of ligo.segments.segmentlist instances The analyzable science segs for each ifo, keyed by ifo sci_ok_seg_name : str The name with which analyzable science segs are stored in the output XML file.
379,895
def _start_workflow_stages(pb: ProcessingBlock, pb_id: str, workflow_stage_dict: dict, workflow_stage: WorkflowStage, docker: DockerSwarmClient): stage_data = workflow_stage_dict[workflow_stage.id] stage_data[] = False if stage_data[] == : if not workflow_stage.dependencies: stage_data[] = True else: dependency_status = [] for dependency in workflow_stage.dependencies: dependency_status.append( workflow_stage_dict[dependency[]][ ] == ) stage_data[] = all(dependency_status) if stage_data[]: LOG.info(, workflow_stage.id) LOG.info() args_template = jinja2.Template(workflow_stage.args_template) stage_params = pb.workflow_parameters[workflow_stage.id] template_params = {**workflow_stage.config, **stage_params} args = args_template.render(stage=template_params) LOG.info() args = json.dumps(json.loads(args)) compose_template = jinja2.Template( workflow_stage.compose_template) compose_str = compose_template.render(stage=dict(args=args)) compose_dict = yaml.load(compose_str) service_names = compose_dict[].keys() new_service_names = [ .format(pb_id, pb.workflow_id, name) for name in service_names] for new, old in zip(new_service_names, service_names): compose_dict[][new] = \ compose_dict[].pop(old) compose_str = yaml.dump(compose_dict) service_ids = docker.create_services(compose_str) LOG.info() for service_id in service_ids: service_name = docker.get_service_name(service_id) LOG.info(" %s, %s ", service_name, service_id) stage_data[][service_id] = {} LOG.info(, service_ids) stage_data[][service_id] = dict( name=docker.get_service_name(service_id), status=, complete=False ) stage_data["status"] =
Start a workflow stage by starting a number of docker services. This function first assesses if the specified workflow stage can be started based on its dependencies. If this is found to be the case, the workflow stage is stared by first resolving and template arguments in the workflow stage configuration, and then using the Docker Swarm Client API to start workflow stage services. As part of this, the workflow_stage_dict data structure is updated accordingly. TODO(BMo) This function will need refactoring at some point as part of an update to the way workflow state metadata is stored in the configuration database. Currently the stage_data dictionary is a bit of a hack for a badly specified Configuration Database backed WorkflowStage object. This function is used by `execute_processing_block`. Args: pb (ProcessingBlock): Configuration database Processing Block data object pb_id (str): Processing Block identifier workflow_stage_dict (dict): Workflow stage metadata structure workflow_stage (WorkflowStage): Workflow state configuration database data object. docker (DockerClient): Docker Swarm Client object.
379,896
def open_pickle(path: str): try: with open(path, ) as opened_pickle: try: return pickle.load(opened_pickle) except Exception as pickle_error: logger.error(pickle_error) raise except FileNotFoundError as fnf_error: logger.error(fnf_error) raise except IOError as io_err: logger.error(io_err) raise except EOFError as eof_error: logger.error(eof_error) raise except pickle.UnpicklingError as unp_error: logger.error(unp_error) raise
Open a pickle and return loaded pickle object. :type path: str :param : path: File path to pickle file to be opened. :rtype : object
379,897
def get_members(self, role=github.GithubObject.NotSet): assert role is github.GithubObject.NotSet or isinstance(role, (str, unicode)), role url_parameters = dict() if role is not github.GithubObject.NotSet: assert role in [, , ] url_parameters["role"] = role return github.PaginatedList.PaginatedList( github.NamedUser.NamedUser, self._requester, self.url + "/members", url_parameters )
:calls: `GET /teams/:id/members <https://developer.github.com/v3/teams/members/#list-team-members>`_ :param role: string :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
379,898
def decrypt(s, base64 = False): return _cipher().decrypt(base64 and b64decode(s) or s)
对称解密函数
379,899
def _query(self): formated_property_names = ",".join(self.property_names) wql = "Select {property_names} from {class_name}{filters}".format( property_names=formated_property_names, class_name=self.class_name, filters=self.formatted_filters ) self.logger.debug(u"Querying WMI: {0}".format(wql)) try: flag_return_immediately = 0x10 flag_forward_only = 0x20 flag_use_amended_qualifiers = 0x20000 query_flags = flag_return_immediately | flag_forward_only includes_qualifiers = self.is_raw_perf_class and self._property_counter_types is None if includes_qualifiers: self._property_counter_types = CaseInsensitiveDict() query_flags |= flag_use_amended_qualifiers raw_results = self.get_connection().ExecQuery(wql, "WQL", query_flags) results = self._parse_results(raw_results, includes_qualifiers=includes_qualifiers) except pywintypes.com_error: self.logger.warning(u"Failed to execute WMI query (%s)", wql, exc_info=True) results = [] return results
Query WMI using WMI Query Language (WQL) & parse the results. Returns: List of WMI objects or `TimeoutException`.