Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
376,600
def numpymat2df(mat): return pd.DataFrame( dict(( % i, mat[:, i]) for i in range(mat.shape[1])))
Sometimes (though not very often) it is useful to convert a numpy matrix which has no column names to a Pandas dataframe for use of the Pandas functions. This method converts a 2D numpy matrix to Pandas dataframe with default column headers. Parameters ---------- mat : The numpy matrix Returns ------- A pandas dataframe with the same data as the input matrix but with columns named x0, x1, ... x[n-1] for the number of columns.
376,601
def set_settings_env(executable_folder=None): executable_folder = executable_folder or get_executable_folder() if os.path.exists(os.path.join(executable_folder, "local/total_settings.py")): print("Using total settings") os.chdir(executable_folder) os.environ["DJANGO_SETTINGS_MODULE"] = "local.total_settings" os.environ["STATIC_ROOT"] = os.path.join(executable_folder, "static") os.environ["MEDIA_ROOT"] = os.path.join(executable_folder, "media") else: os.environ.setdefault(, get_folder(get_inspection_frame(2))) os.environ["DJANGO_SETTINGS_MODULE"] = "djangoautoconf.base_settings"
Add all application folders :param executable_folder: the folder that contains local and external_app_repos :return:
376,602
def build(self): signed = bool(self.options() & Builder.Options.Signed) buildpath = self.buildPath() if not buildpath: raise errors.InvalidBuildPath(buildpath) for key, value in self.environment().items(): log.info(.format(key, value)) os.environ[key] = value if os.path.exists(buildpath): shutil.rmtree(buildpath) os.makedirs(buildpath) outpath = self.outputPath() if not os.path.exists(outpath): os.makedirs(outpath) src = self.licenseFile() if src and os.path.exists(src): targ = os.path.join(buildpath, ) shutil.copyfile(src, targ) if self.options() & Builder.Options.GenerateRevision: self.generateRevision() if self.options() & Builder.Options.GenerateDocs: self.generateDocumentation(buildpath) if self.options() & Builder.Options.GenerateSetupFile: setuppath = os.path.join(self.sourcePath(), ) egg = (self.options() & Builder.Options.GenerateEgg) != 0 self.generateSetupFile(setuppath, egg=egg) if self.options() & Builder.Options.GenerateExecutable: if not self.generateExecutable(signed=signed): return if self.options() & Builder.Options.GenerateZipFile: self.generateZipFile(self.outputPath()) if self.options() & Builder.Options.GenerateInstaller: self.generateInstaller(buildpath, signed=signed)
Builds this object into the desired output information.
376,603
def generate_key_data_from_nonce(server_nonce, new_nonce): server_nonce = server_nonce.to_bytes(16, , signed=True) new_nonce = new_nonce.to_bytes(32, , signed=True) hash1 = sha1(new_nonce + server_nonce).digest() hash2 = sha1(server_nonce + new_nonce).digest() hash3 = sha1(new_nonce + new_nonce).digest() key = hash1 + hash2[:12] iv = hash2[12:20] + hash3 + new_nonce[:4] return key, iv
Generates the key data corresponding to the given nonce
376,604
def set_parent_on_new(self, parentrefobj): refobjinter = self.get_refobjinter() old = self.get_unwrapped(self.get_root(), refobjinter) yield new = self.get_unwrapped(self.get_root(), refobjinter) - old for refobj in new: if refobjinter.get_parent(refobj) is None: refobjinter.set_parent(refobj, parentrefobj)
Contextmanager that on close will get all new unwrapped refobjects, and for every refobject with no parent sets is to the given one. :returns: None :rtype: None :raises: None
376,605
def oggvorbis(s): try: s = np.arange(s) except TypeError: s = np.arange(s[0]) i = np.sin((s + .5) / len(s) * np.pi) ** 2 f = np.sin(.5 * np.pi * i) return f * (1. / f.max())
This is taken from the ogg vorbis spec (http://xiph.org/vorbis/doc/Vorbis_I_spec.html) :param s: the total length of the window, in samples
376,606
def start(self, io_loop): for callback in self.before_run_callbacks: try: callback(self.tornado_application, io_loop) except Exception: self.logger.error(, callback, exc_info=1) self.stop(io_loop) raise for callback in self.on_start_callbacks: io_loop.spawn_callback(callback, self.tornado_application, io_loop)
Run the ``before_run`` callbacks and queue to ``on_start`` callbacks. :param tornado.ioloop.IOLoop io_loop: loop to start the app on.
376,607
def update(self, list_id, subscriber_hash, data): subscriber_hash = check_subscriber_hash(subscriber_hash) self.list_id = list_id self.subscriber_hash = subscriber_hash if not in data: raise KeyError() response = self._mc_client._post(url=self._build_path(list_id, , subscriber_hash, ), data=data) return response
Update tags for a specific subscriber. The documentation lists only the tags request body parameter so it is being documented and error-checked as if it were required based on the description of the method. The data list needs to include a "status" key. This determines if the tag should be added or removed from the user: data = { 'tags': [ {'name': 'foo', 'status': 'active'}, {'name': 'bar', 'status': 'inactive'} ] } :param list_id: The unique id for the list. :type list_id: :py:class:`str` :param subscriber_hash: The MD5 hash of the lowercase version of the list member’s email address. :type subscriber_hash: :py:class:`str` :param data: The request body parameters :type data: :py:class:`dict` data = { "tags": list* }
376,608
def get_host_port_names(self, host_name): port_names = list() host = self.get_hosts_by_name(host_name) fc_ports = host.fc_ports iscsi_ports = host.iscsi_ports port_names.extend(fc_ports.split() if fc_ports != else []) port_names.extend(iscsi_ports.split() if iscsi_ports != else []) return port_names
return a list of the port names of XIV host
376,609
def parse(chord): if len(chord) > 1 and chord[1] in ("b", " root = chord[:2] rest = chord[2:] else: root = chord[:1] rest = chord[1:] check_note(root, chord) on_chord_idx = rest.find("/") if on_chord_idx >= 0: on = rest[on_chord_idx + 1:] rest = rest[:on_chord_idx] check_note(on, chord) else: on = None if rest in QUALITY_DICT: quality = Quality(rest) else: raise ValueError("Invalid chord {}: Unknown quality {}".format(chord, rest)) appended = [] return root, quality, appended, on
Parse a string to get chord component :param str chord: str expression of a chord :rtype: (str, pychord.Quality, str, str) :return: (root, quality, appended, on)
376,610
def row_sparse_array(arg1, shape=None, ctx=None, dtype=None): if isinstance(arg1, tuple): arg_len = len(arg1) if arg_len < 2: raise ValueError("Unexpected length of input tuple: " + str(arg_len)) elif arg_len > 2: _check_shape(arg1, shape) return empty(, arg1, ctx=ctx, dtype=dtype) else: if isinstance(arg1[0], integer_types) and isinstance(arg1[1], integer_types): _check_shape(arg1, shape) return empty(, arg1, ctx=ctx, dtype=dtype) else: return _row_sparse_ndarray_from_definition(arg1[0], arg1[1], shape=shape, ctx=ctx, dtype=dtype) else: if isinstance(arg1, RowSparseNDArray): _check_shape(arg1.shape, shape) return array(arg1, ctx=ctx, dtype=dtype) elif isinstance(arg1, CSRNDArray): raise ValueError("Unexpected input type: CSRNDArray") else:
Creates a `RowSparseNDArray`, a multidimensional row sparse array with a set of \ tensor slices at given indices. The RowSparseNDArray can be instantiated in several ways: - row_sparse_array(D): to construct a RowSparseNDArray with a dense ndarray ``D`` - **D** (*array_like*) - An object exposing the array interface, an object whose \ `__array__` method returns an array, or any (nested) sequence. - **ctx** (*Context, optional*) - Device context \ (default is the current default context). - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \ The default dtype is ``D.dtype`` if ``D`` is an NDArray or numpy.ndarray, \ float32 otherwise. - row_sparse_array(S) to construct a RowSparseNDArray with a sparse ndarray ``S`` - **S** (*RowSparseNDArray*) - A sparse ndarray. - **ctx** (*Context, optional*) - Device context \ (default is the current default context). - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \ The default dtype is ``S.dtype``. - row_sparse_array((D0, D1 .. Dn)) to construct an empty RowSparseNDArray with shape ``(D0, D1, ... Dn)`` - **D0, D1 .. Dn** (*int*) - The shape of the ndarray - **ctx** (*Context, optional*) - Device context \ (default is the current default context). - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \ The default dtype is float32. - row_sparse_array((data, indices)) to construct a RowSparseNDArray based on the definition of row sparse format \ using two separate arrays, \ where the `indices` stores the indices of the row slices with non-zeros, while the values are stored in `data`. The corresponding NDArray ``dense`` represented by RowSparseNDArray ``rsp`` has \ ``dense[rsp.indices[i], :, :, :, ...] = rsp.data[i, :, :, :, ...]`` The row indices for are expected to be **sorted in ascending order.** \ - **data** (*array_like*) - An object exposing the array interface, which \ holds all the non-zero row slices of the array. - **indices** (*array_like*) - An object exposing the array interface, which \ stores the row index for each row slice with non-zero elements. - **shape** (*tuple of int, optional*) - The shape of the array. The default \ shape is inferred from the indices and indptr arrays. - **ctx** (*Context, optional*) - Device context \ (default is the current default context). - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \ The default dtype is float32. Parameters ---------- arg1 : NDArray, numpy.ndarray, RowSparseNDArray, tuple of int or tuple of array_like The argument to help instantiate the row sparse ndarray. See above for further details. shape : tuple of int, optional The shape of the row sparse ndarray. (Default value = None) ctx : Context, optional Device context (default is the current default context). dtype : str or numpy.dtype, optional The data type of the output array. (Default value = None) Returns ------- RowSparseNDArray An `RowSparseNDArray` with the `row_sparse` storage representation. Examples -------- >>> a = mx.nd.sparse.row_sparse_array(([[1, 2], [3, 4]], [1, 4]), shape=(6, 2)) >>> a.asnumpy() array([[ 0., 0.], [ 1., 2.], [ 0., 0.], [ 0., 0.], [ 3., 4.], [ 0., 0.]], dtype=float32) See Also -------- RowSparseNDArray : MXNet NDArray in row sparse format.
376,611
def storage_at_hvmv_substation(mv_grid, parameters, mode=None): storage = set_up_storage(node=mv_grid.station, parameters=parameters, operational_mode=mode) line = connect_storage(storage, mv_grid.station) return storage, line
Place storage at HV/MV substation bus bar. Parameters ---------- mv_grid : :class:`~.grid.grids.MVGrid` MV grid instance parameters : :obj:`dict` Dictionary with storage parameters. Must at least contain 'nominal_power'. See :class:`~.grid.network.StorageControl` for more information. mode : :obj:`str`, optional Operational mode. See :class:`~.grid.network.StorageControl` for possible options and more information. Default: None. Returns ------- :class:`~.grid.components.Storage`, :class:`~.grid.components.Line` Created storage instance and newly added line to connect storage.
376,612
def dict(self): SKIP_KEYS = (, , , , , , , ) return OrderedDict([(k, getattr(self, k)) for k in self.properties if k not in SKIP_KEYS])
A dict that holds key/values for all of the properties in the object. :return:
376,613
def save(self, *args, **kwargs): self.uid = .format( self.office.uid, self.cycle.uid ) name_label = .format( self.cycle.name, self.office.label ) if self.special: self.uid = .format( self.uid ) name_label = .format( name_label ) self.label = name_label self.name = name_label if not self.slug: self.slug = uuslug( name_label, instance=self, max_length=100, separator=, start_no=2 ) super(Race, self).save(*args, **kwargs)
**uid**: :code:`{office.uid}_{cycle.uid}_race`
376,614
def apply_inverse(self, y): self.recompute(quiet=False) r = np.array(y, dtype=np.float64, order="F") r = self._check_dimensions(r, check_dim=False) m = [slice(None)] + [np.newaxis for _ in range(len(r.shape) - 1)] r -= self._call_mean(self._x)[m] if len(r.shape) == 1: b = self.solver.apply_inverse(r, in_place=True).flatten() else: b = self.solver.apply_inverse(r, in_place=True) return b
Self-consistently apply the inverse of the computed kernel matrix to some vector or matrix of samples. This method subtracts the mean, sorts the samples, then returns the samples in the correct (unsorted) order. :param y: ``(nsamples, )`` or ``(nsamples, K)`` The vector (or matrix) of sample values.
376,615
def draw_graph(matrix, clusters, **kwargs): graph = nx.Graph(matrix) cluster_map = {node: i for i, cluster in enumerate(clusters) for node in cluster} colors = [cluster_map[i] for i in range(len(graph.nodes()))] if not kwargs.get("cmap", False): kwargs["cmap"] = cm.tab20 nx.draw_networkx(graph, node_color=colors, **kwargs) axis("off") show(block=False)
Visualize the clustering :param matrix: The unprocessed adjacency matrix :param clusters: list of tuples containing clusters as returned by 'get_clusters' :param kwargs: Additional keyword arguments to be passed to networkx.draw_networkx
376,616
def makedirs(path, mode=0o777, exist_ok=False): os.makedirs(path, mode, exist_ok)
A wrapper of os.makedirs().
376,617
def create_run(cls, *args, **kwargs): def f(seed_tuple): j = cls(seed_tuple, *args, **kwargs) return j.run return f
:return: a delegator function that calls the ``cls`` constructor whose arguments being a seed tuple followed by supplied ``*args`` and ``**kwargs``, then returns the object's ``run`` method. By default, a thread wrapping that ``run`` method is spawned.
376,618
def set_pump_status(self, status): self.pump_status = status _logger.info("%r partition %r", status, self.lease.partition_id)
Updates pump status and logs update to console.
376,619
def feed_data(self, data: bytes) -> None: if self._parser is not None: self._parser.feed_data(data)
代理 feed_data
376,620
def expectedLabelPosition(peptide, labelStateInfo, sequence=None, modPositions=None): if modPositions is None: modPositions = maspy.peptidemethods.returnModPositions(peptide, indexStart=0 ) if sequence is None: sequence = maspy.peptidemethods.removeModifications(peptide) currLabelMods = dict() for labelPosition, labelSymbols in viewitems(labelStateInfo[]): labelSymbols = aux.toList(labelSymbols) if labelSymbols == []: pass elif labelPosition == : currLabelMods.setdefault(0, list()) currLabelMods[0].extend(labelSymbols) else: for sequencePosition in aux.findAllSubstrings(sequence, labelPosition): currLabelMods.setdefault(sequencePosition, list()) currLabelMods[sequencePosition].extend(labelSymbols) if labelStateInfo[] is not None: for excludingMod, excludedLabelSymbol in viewitems(labelStateInfo[]): if excludingMod not in modPositions: continue for excludingModPos in modPositions[excludingMod]: if excludingModPos not in currLabelMods: continue if excludedLabelSymbol not in currLabelMods[excludingModPos]: continue if len(currLabelMods[excludingModPos]) == 1: del(currLabelMods[excludingModPos]) else: excludedModIndex = currLabelMods[excludingModPos].index(excludedLabelSymbol) currLabelMods[excludingModPos].pop(excludedModIndex) for sequencePosition in list(viewkeys(currLabelMods)): currLabelMods[sequencePosition] = sorted(currLabelMods[sequencePosition]) return currLabelMods
Returns a modification description of a certain label state of a peptide. :param peptide: Peptide sequence used to calculat the expected label state modifications :param labelStateInfo: An entry of :attr:`LabelDescriptor.labels` that describes a label state :param sequence: unmodified amino acid sequence of :var:`peptide`, if None it is generated by :func:`maspy.peptidemethods.removeModifications()` :param modPositions: dictionary describing the modification state of "peptide", if None it is generated by :func:`maspy.peptidemethods.returnModPositions()` :returns: {sequence position: sorted list of expected label modifications on that position, ... }
376,621
def ask_confirmation(): while True: print("Do you want to restart these brokers? ", end="") choice = input().lower() if choice in [, ]: return True elif choice in [, ]: return False else: print("Please respond with or ")
Ask for confirmation to the user. Return true if the user confirmed the execution, false otherwise. :returns: bool
376,622
def purge(self): def partial_file(item): "Filter out partial files" return item.completed_chunks < item.size_chunks self.cull(file_filter=partial_file, attrs=["get_completed_chunks", "get_size_chunks"])
Delete PARTIAL data files and remove torrent from client.
376,623
def QueueResponse(self, response, timestamp=None): if timestamp is None: timestamp = self.frozen_timestamp self.response_queue.append((response, timestamp))
Queues the message on the flow's state.
376,624
def _connect_to_ec2(region, credentials): conn = boto.ec2.connect_to_region( region, aws_access_key_id=credentials.access_key_id, aws_secret_access_key=credentials.secret_access_key ) if conn: return conn else: log_red() return False
:param region: The region of AWS to connect to. :param EC2Credentials credentials: The credentials to use to authenticate with EC2. :return: a connection object to AWS EC2
376,625
def is_continuous(docgraph, dominating_node): first_onset, last_offset = get_span_offsets(docgraph, dominating_node) span_range = xrange(first_onset, last_offset+1) token_offsets = (docgraph.get_offsets(tok) for tok in get_span(docgraph, dominating_node)) char_positions = set(itertools.chain.from_iterable(xrange(on, off+1) for on, off in token_offsets)) for item in span_range: if item not in char_positions: return False return True
return True, if the tokens dominated by the given node are all adjacent
376,626
def load(self, id=None): if id is not None: self.reset() self._setID(id) if not self._new and self._validID(): self._loadDB() self._updated = time.time()
Load from database. Old values will be discarded.
376,627
def as_array(self, transpose=False, items=None): if len(self.blocks) == 0: arr = np.empty(self.shape, dtype=float) return arr.transpose() if transpose else arr if items is not None: mgr = self.reindex_axis(items, axis=0) else: mgr = self if self._is_single_block and mgr.blocks[0].is_datetimetz: arr = mgr.blocks[0].get_values(dtype=object) elif self._is_single_block or not self.is_mixed_type: arr = np.asarray(mgr.blocks[0].get_values()) else: arr = mgr._interleave() return arr.transpose() if transpose else arr
Convert the blockmanager data into an numpy array. Parameters ---------- transpose : boolean, default False If True, transpose the return array items : list of strings or None Names of block items that will be included in the returned array. ``None`` means that all block items will be used Returns ------- arr : ndarray
376,628
def build_authorization_endpoint(self, request, disable_sso=None): self.load_config() redirect_to = request.GET.get(REDIRECT_FIELD_NAME, None) if not redirect_to: redirect_to = django_settings.LOGIN_REDIRECT_URL redirect_to = base64.urlsafe_b64encode(redirect_to.encode()).decode() query = QueryDict(mutable=True) query.update({ "response_type": "code", "client_id": settings.CLIENT_ID, "resource": settings.RELYING_PARTY_ID, "redirect_uri": self.redirect_uri(request), "state": redirect_to, }) if self._mode == "openid_connect": query["scope"] = "openid" if (disable_sso is None and settings.DISABLE_SSO) or disable_sso is True: query["prompt"] = "login" return "{0}?{1}".format(self.authorization_endpoint, query.urlencode())
This function returns the ADFS authorization URL. Args: request(django.http.request.HttpRequest): A django Request object disable_sso(bool): Whether to disable single sign-on and force the ADFS server to show a login prompt. Returns: str: The redirect URI
376,629
def rotate(self, count=1, with_pane_before_only=False, with_pane_after_only=False): items = [] current_pane_index = None for s in self.splits: for index, item in enumerate(s): if isinstance(item, Pane): items.append((s, index, item, s.weights[item])) if item == self.active_pane: current_pane_index = len(items) - 1 if with_pane_before_only: items = items[current_pane_index - 1:current_pane_index + 1] elif with_pane_after_only: items = items[current_pane_index:current_pane_index + 2] for i, triple in enumerate(items): split, index, pane, weight = triple new_item = items[(i + count) % len(items)][2] split[index] = new_item split.weights[new_item] = weight
Rotate panes. When `with_pane_before_only` or `with_pane_after_only` is True, only rotate with the pane before/after the active pane.
376,630
def db_from_dataframes( db_filename, dataframes, primary_keys={}, indices={}, subdir=None, overwrite=False, version=1): if not (subdir is None or isinstance(subdir, str)): raise TypeError("Expected subdir to be None or str, got %s : %s" % ( subdir, type(subdir))) db_path = build_path(db_filename, subdir) return db_from_dataframes_with_absolute_path( db_path, table_names_to_dataframes=dataframes, table_names_to_primary_keys=primary_keys, table_names_to_indices=indices, overwrite=overwrite, version=version)
Create a sqlite3 database from a collection of DataFrame objects Parameters ---------- db_filename : str Name of database file to create dataframes : dict Dictionary from table names to DataFrame objects primary_keys : dict, optional Name of primary key column for each table indices : dict, optional Dictionary from table names to list of column name tuples subdir : str, optional overwrite : bool, optional If the database already exists, overwrite it? version : int, optional
376,631
def print_yielded(func): print_all = functools.partial(map, print) print_results = compose(more_itertools.recipes.consume, print_all, func) return functools.wraps(func)(print_results)
Convert a generator into a function that prints all yielded elements >>> @print_yielded ... def x(): ... yield 3; yield None >>> x() 3 None
376,632
def add_permission(self): statement_id = .format(self.app_name, self.trigger_settings[]) principal = lambda_alias_arn = get_lambda_alias_arn(self.app_name, self.env, self.region) lambda_unqualified_arn = get_lambda_arn(self.app_name, self.env, self.region) resource_name = self.trigger_settings.get(, ) resource_name = resource_name.replace(, ) method_api_source_arn = .format( self.region, self.account_id, self.api_id, self.env, self.trigger_settings[], resource_name) global_api_source_arn = .format(self.region, self.account_id, self.api_id, resource_name) add_lambda_permissions( function=lambda_alias_arn, statement_id=statement_id + self.trigger_settings[], action=, principal=principal, env=self.env, region=self.region, source_arn=method_api_source_arn) add_lambda_permissions( function=lambda_alias_arn, statement_id=statement_id, action=, principal=principal, env=self.env, region=self.region, source_arn=global_api_source_arn) add_lambda_permissions( function=lambda_unqualified_arn, statement_id=statement_id + self.trigger_settings[], action=, principal=principal, env=self.env, region=self.region, source_arn=method_api_source_arn) add_lambda_permissions( function=lambda_unqualified_arn, statement_id=statement_id, action=, principal=principal, env=self.env, region=self.region, source_arn=global_api_source_arn)
Add permission to Lambda for the API Trigger.
376,633
def _storage_attach(self, params): args = shlex.split(params) yield from self.manager.execute("storageattach", [self._vmname] + args)
Change storage medium in this VM. :param params: params to use with sub-command storageattach
376,634
def pool_define(name, ptype, target=None, permissions=None, source_devices=None, source_dir=None, source_adapter=None, source_hosts=None, source_auth=None, source_name=None, source_format=None, transient=False, start=True, **kwargs): typecephusernameadminsecrettypeuuiduuid2ec115d7-3a88-3ceb-bc12-0ac909a6fd87typechapusernamemynamesecrettypeusageuuidmycluster_myname*mode0744owergroup*example.com conn = __get_conn(**kwargs) pool_xml = _gen_pool_xml( name, ptype, target, permissions=permissions, source_devices=source_devices, source_dir=source_dir, source_adapter=source_adapter, source_hosts=source_hosts, source_auth=source_auth, source_name=source_name, source_format=source_format ) try: if transient: pool = conn.storagePoolCreateXML(pool_xml) else: pool = conn.storagePoolDefineXML(pool_xml) if start: pool.create() except libvirtError as err: raise err finally: conn.close() return True
Create libvirt pool. :param name: Pool name :param ptype: Pool type. See `libvirt documentation <https://libvirt.org/storage.html>`_ for the possible values. :param target: Pool full path target :param permissions: Permissions to set on the target folder. This is mostly used for filesystem-based pool types. See pool-define-permissions_ for more details on this structure. :param source_devices: List of source devices for pools backed by physical devices. (Default: ``None``) Each item in the list is a dictionary with ``path`` and optionally ``part_separator`` keys. The path is the qualified name for iSCSI devices. Report to `this libvirt page <https://libvirt.org/formatstorage.html#StoragePool>`_ for more informations on the use of ``part_separator`` :param source_dir: Path to the source directory for pools of type ``dir``, ``netfs`` or ``gluster``. (Default: ``None``) :param source_adapter: SCSI source definition. The value is a dictionary with ``type``, ``name``, ``parent``, ``managed``, ``parent_wwnn``, ``parent_wwpn``, ``parent_fabric_wwn``, ``wwnn``, ``wwpn`` and ``parent_address`` keys. The ``parent_address`` value is a dictionary with ``unique_id`` and ``address`` keys. The address represents a PCI address and is itself a dictionary with ``domain``, ``bus``, ``slot`` and ``function`` properties. Report to `this libvirt page <https://libvirt.org/formatstorage.html#StoragePool>`_ for the meaning and possible values of these properties. :param source_hosts: List of source for pools backed by storage from remote servers. Each item is the hostname optionally followed by the port separated by a colon. (Default: ``None``) :param source_auth: Source authentication details. (Default: ``None``) The value is a dictionary with ``type``, ``username`` and ``secret`` keys. The type can be one of ``ceph`` for Ceph RBD or ``chap`` for iSCSI sources. The ``secret`` value links to a libvirt secret object. It is a dictionary with ``type`` and ``value`` keys. The type value can be either ``uuid`` or ``usage``. Examples: .. code-block:: python source_auth={ 'type': 'ceph', 'username': 'admin', 'secret': { 'type': 'uuid', 'uuid': '2ec115d7-3a88-3ceb-bc12-0ac909a6fd87' } } .. code-block:: python source_auth={ 'type': 'chap', 'username': 'myname', 'secret': { 'type': 'usage', 'uuid': 'mycluster_myname' } } :param source_name: Identifier of name-based sources. :param source_format: String representing the source format. The possible values are depending on the source type. See `libvirt documentation <https://libvirt.org/storage.html>`_ for the possible values. :param start: Pool start (default True) :param transient: When ``True``, the pool will be automatically undefined after being stopped. Note that a transient pool will force ``start`` to ``True``. (Default: ``False``) :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults .. _pool-define-permissions: **Permissions definition** The permissions are described by a dictionary containing the following keys: mode The octal representation of the permissions. (Default: `0711`) owner the numeric user ID of the owner. (Default: from the parent folder) group the numeric ID of the group. (Default: from the parent folder) label the SELinux label. (Default: `None`) CLI Example: Local folder pool: .. code-block:: bash salt '*' virt.pool_define somepool dir target=/srv/mypool \ permissions="{'mode': '0744' 'ower': 107, 'group': 107 }" CIFS backed pool: .. code-block:: bash salt '*' virt.pool_define myshare netfs source_format=cifs \ source_dir=samba_share source_hosts="['example.com']" target=/mnt/cifs .. versionadded:: 2019.2.0
376,635
def dump(self, indentation=0): dump = [] dump.append(.format(self.name)) printable_bytes = [ord(i) for i in string.printable if i not in string.whitespace] for keys in self.__keys__: for key in keys: val = getattr(self, key) if isinstance(val, (int, long)): if key.startswith(): val_str = % (val) else: val_str = % (val) if key == or key == : try: val_str += % time.asctime(time.gmtime(val)) except ValueError as e: val_str += else: val_str = bytearray(val) if key.startswith(): val_str = .join( [.format(i) for i in val_str.rstrip(b)]) else: val_str = .join( [chr(i) if (i in printable_bytes) else .format(i) for i in val_str.rstrip(b)]) dump.append( % ( self.__field_offsets__[key] + self.__file_offset__, self.__field_offsets__[key], key+, val_str)) return dump
Returns a string representation of the structure.
376,636
def getData(self,exten=None): if exten.lower().find() > -1: fname = self._filename else: extn = exten.split() sci_chip = self._image[self.scienceExt,int(extn[1])] fname = sci_chip.dqfile extnum = self._interpretExten(exten) if self._image[extnum].data is None: if os.path.exists(fname): _image=fileutil.openImage(fname, clobber=False, memmap=False) _data=fileutil.getExtn(_image, extn=exten).data _image.close() del _image self._image[extnum].data = _data else: _data = None else: _data = self._image[extnum].data return _data
Return just the data array from the specified extension fileutil is used instead of fits to account for non- FITS input images. openImage returns a fits object.
376,637
def next_retrieve_group_item(self, last_item=None, entry=None): next_item = None gerrit_version = self.version if gerrit_version[0] == 2 and gerrit_version[1] > 9: if last_item is None: next_item = 0 else: next_item = last_item elif gerrit_version[0] == 2 and gerrit_version[1] == 9: cause = "Gerrit 2.9.0 does not support pagination" raise BackendError(cause=cause) else: if entry is not None: next_item = entry[] return next_item
Return the item to start from in next reviews group.
376,638
def GetData(fitsfile, EPIC, campaign, clobber=False, saturation_tolerance=-0.1, bad_bits=[1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17], get_hires=False, get_nearby=False, aperture=None, **kwargs): filename = os.path.join(EVEREST_DAT, , % campaign, ( % EPIC)[:4] + , ( % EPIC)[4:], ) if not os.path.exists(os.path.dirname(filename)): os.makedirs(os.path.dirname(filename)) if not os.path.exists(filename) or clobber: log.info("Fetching data for target...") with pyfits.open(fitsfile) as f: qdata = f[1].data fitsheader = [pyfits.getheader(fitsfile, 0).cards, pyfits.getheader(fitsfile, 1).cards, pyfits.getheader(fitsfile, 2).cards] if get_hires: try: hires = GetHiResImage(EPIC) except ValueError: hires = None else: hires = None if get_nearby: try: nearby = GetSources(EPIC) except ValueError: nearby = [] else: nearby = [] cadn = np.array(qdata.field(), dtype=) time = np.array(qdata.field(), dtype=) fpix = np.array(qdata.field(), dtype=) fpix_err = np.array(qdata.field(), dtype=) qual = np.array(qdata.field(), dtype=int) naninds = np.where(np.isnan(time)) time = Interpolate(np.arange(0, len(time)), naninds, time) pc1 = np.array(qdata.field(), dtype=) pc2 = np.array(qdata.field(), dtype=) if not np.all(np.isnan(pc1)) and not np.all(np.isnan(pc2)): pc1 = Interpolate(time, np.where(np.isnan(pc1)), pc1) pc2 = Interpolate(time, np.where(np.isnan(pc2)), pc2) else: pc1 = None pc2 = None pixel_images = [fpix[0], fpix[len(fpix) // 2], fpix[len(fpix) - 1]] if aperture is None: aperture = ApertureSelector(time[::10], fpix[::10], title= % EPIC).aperture if np.sum(aperture) == 0: raise ValueError("Empty aperture!") if not os.path.exists(os.path.dirname(filename)): os.makedirs(os.path.dirname(filename)) f = NamedTemporaryFile("wb", delete=False) np.savez_compressed(f, cadn=cadn, time=time, fpix=fpix, fpix_err=fpix_err, qual=qual, aperture=aperture, pc1=pc1, pc2=pc2, fitsheader=fitsheader, pixel_images=pixel_images, nearby=nearby, hires=hires) f.flush() os.fsync(f.fileno()) f.close() shutil.move(f.name, filename) data = np.load(filename) aperture = data[][()] pixel_images = data[] nearby = data[][()] hires = data[][()] fitsheader = data[] cadn = data[] time = data[] fpix = data[] fpix_err = data[] qual = data[] pc1 = data[] pc2 = data[] tmp = np.delete(fpix[:, i, j], np.where( np.isnan(fpix[:, i, j]))) if len(tmp): f = SavGol(tmp) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) bad = np.where((f > med + 10. * MAD) | (f < med - 10. * MAD))[0] np.delete(tmp, bad) i97 = int(0.975 * len(tmp)) tmp = tmp[np.argsort(tmp)[i97]] f97[i, j] = tmp if np.nanmax(f97) <= satflx: log.info("No saturated columns detected.") saturated = False aperture[np.isnan(fpix[0])] = 0 ap = np.where(aperture & 1) fpix2D = np.array([f[ap] for f in fpix], dtype=) fpix_err2D = np.array([p[ap] for p in fpix_err], dtype=) else: saturated = True ncol = 0 fpixnew = [] ferrnew = [] for j in range(aperture.shape[1]): if np.any(f97[:, j] > satflx): marked = False collapsed = np.zeros(len(fpix[:, 0, 0])) collapsed_err2 = np.zeros(len(fpix[:, 0, 0])) for i in range(aperture.shape[0]): if aperture[i, j]: if not marked: aperture[i, j] = AP_COLLAPSED_PIXEL marked = True else: aperture[i, j] = AP_SATURATED_PIXEL collapsed += fpix[:, i, j] collapsed_err2 += fpix_err[:, i, j] ** 2 if np.any(collapsed): fpixnew.append(collapsed) ferrnew.append(np.sqrt(collapsed_err2)) ncol += 1 else: for i in range(aperture.shape[0]): if aperture[i, j]: fpixnew.append(fpix[:, i, j]) ferrnew.append(fpix_err[:, i, j]) fpix2D = np.array(fpixnew).T fpix_err2D = np.array(ferrnew).T log.info("Collapsed %d saturated column(s)." % ncol) binds = np.where(aperture ^ 1) if RemoveBackground(EPIC, campaign=campaign) and (len(binds[0]) > 0): bkg = np.nanmedian(np.array([f[binds] for f in fpix], dtype=), axis=1) bkg_err = 1.253 * np.nanmedian(np.array([e[binds] for e in fpix_err], dtype=), axis=1) \ / np.sqrt(len(binds[0])) bkg = bkg.reshape(-1, 1) bkg_err = bkg_err.reshape(-1, 1) else: bkg = 0. bkg_err = 0. fpix = fpix2D - bkg fpix_err = np.sqrt(fpix_err2D ** 2 + bkg_err ** 2) flux = np.sum(fpix, axis=1) nanmask = np.where(np.isnan(flux) | (flux == 0))[0] if type(data.mag) is pyfits.card.Undefined: data.mag = np.nan data.pixel_images = pixel_images data.nearby = nearby data.hires = hires data.saturated = saturated data.bkg = bkg return data
Returns a :py:obj:`DataContainer` instance with the raw data for the target. :param str fitsfile: The full raw target pixel file path :param bool clobber: Overwrite existing files? Default :py:obj:`False` :param float saturation_tolerance: Target is considered saturated \ if flux is within this fraction of the pixel well depth. \ Default -0.1 :param array_like bad_bits: Flagged :py:obj`QUALITY` bits to consider \ outliers when computing the model. \ Default `[1,2,3,4,5,6,7,8,9,11,12,13,14,16,17]` :param bool get_hires: Download a high resolution image of the target? \ Default :py:obj:`True` :param bool get_nearby: Retrieve location of nearby sources? \ Default :py:obj:`True`
376,639
def direct_perms_for_user(cls, instance, user, db_session=None): db_session = get_db_session(db_session, instance) query = db_session.query( cls.models_proxy.UserResourcePermission.user_id, cls.models_proxy.UserResourcePermission.perm_name, ) query = query.filter(cls.models_proxy.UserResourcePermission.user_id == user.id) query = query.filter( cls.models_proxy.UserResourcePermission.resource_id == instance.resource_id ) perms = [ PermissionTuple(user, row.perm_name, "user", None, instance, False, True) for row in query ] if instance.owner_user_id == user.id: perms.append( PermissionTuple(user, ALL_PERMISSIONS, "user", None, instance, True) ) return perms
returns permissions that given user has for this resource without ones inherited from groups that user belongs to :param instance: :param user: :param db_session: :return:
376,640
def get_decimal_time(self): return decimal_time(self.data[], self.data[], self.data[], self.data[], self.data[], self.data[])
Returns the time of the catalogue as a decimal
376,641
def is_location(v) -> (bool, str): def convert2float(value): try: float_num = float(value) return float_num except ValueError: return False if not isinstance(v, str): return False, v split_lst = v.split(":") if len(split_lst) != 5: return False, v if convert2float(split_lst[3]): longitude = abs(convert2float(split_lst[3])) if longitude > 90: return False, v if convert2float(split_lst[4]): latitude = abs(convert2float(split_lst[3])) if latitude > 180: return False, v return True, v
Boolean function for checking if v is a location format Args: v: Returns: bool
376,642
def winapi(context, names): logging.info(_()) sense = context.obj[] none = True for name in names: code = sense.query_args(name) if code: none = False print(stylify_code(code)) else: logging.warning(_(), name) sys.exit(1 if none else 0)
Query Win32 API declarations. Windows database must be prepared before using this.
376,643
def _uniform_dist(self, spread, total): fraction, fixed_increment = math.modf(total / spread) fixed_increment = int(fixed_increment) balance = 0 dist = [] for _ in range(spread): balance += fraction withdrawl = 1 if balance > 0.5 else 0 if withdrawl: balance -= withdrawl dist.append(fixed_increment + withdrawl) return dist
Produce a uniform distribution of `total` across a list of `spread` size. The result is non-random and uniform.
376,644
def maybe_dotted(module, throw=True): try: return Configurator().maybe_dotted(module) except ImportError as e: err = % (module, e) if throw: raise ImportError(err) else: log.error(err) return None
If ``module`` is a dotted string pointing to the module, imports and returns the module object.
376,645
def state_size(self): return (LSTMStateTuple(self._num_units, self._num_units) if self._state_is_tuple else 2 * self._num_units)
State size of the LSTMStateTuple.
376,646
def blpop(self, keys, timeout=0): if timeout is None: timeout = 0 keys = list_or_args(keys, None) keys.append(timeout) return self.execute_command(, *keys)
LPOP a value off of the first non-empty list named in the ``keys`` list. If none of the lists in ``keys`` has a value to LPOP, then block for ``timeout`` seconds, or until a value gets pushed on to one of the lists. If timeout is 0, then block indefinitely.
376,647
def putout(ofile, keylist, Rec): pmag_out = open(ofile, ) outstring = "" for key in keylist: try: outstring = outstring + + str(Rec[key]).strip() except: print(key, Rec[key]) outstring = outstring + pmag_out.write(outstring[1:]) pmag_out.close()
writes out a magic format record to ofile
376,648
def cmd(self, cmd, verbose=False): command = cmd.format(maildir=self.directory) if verbose: print(command) p = Popen([ "ssh", "-T", self.host, command ], stdin=PIPE, stdout=PIPE, stderr=PIPE) stdout,stderr = p.communicate() return stdout
Executes the specified command on the remote host. The cmd must be format safe, this means { and } must be doubled, thusly: echo /var/local/maildir/{{cur,new}} the cmd can include the format word 'maildir' to be replaced by self.directory. eg: echo {maildir}/{{cur,new}}
376,649
def coords2px(y, x): rows = np.rint([y[0], y[0], y[2], y[2]]).astype(int) cols = np.rint([y[1], y[3], y[1], y[3]]).astype(int) r,c,*_ = x.shape Y = np.zeros((r, c)) Y[rows, cols] = 1 return Y
Transforming coordinates to pixels. Arguments: y : np array vector in which (y[0], y[1]) and (y[2], y[3]) are the the corners of a bounding box. x : image an image Returns: Y : image of shape x.shape
376,650
def exclusive_match(self, field, value): if isinstance(value, str): value = [value] value.sort() self.exclude_range(field, "*", value[0], inclusive=False, new_group=True) self.match_range(field, value[0], value[0]) for index, val in enumerate(value[1:]): self.exclude_range(field, value[index-1], val, inclusive=False) self.match_range(field, val, val) self.exclude_range(field, value[-1], "*", inclusive=False) return self
Match exactly the given value(s), with no other data in the field. Arguments: field (str): The field to check for the value. The field must be namespaced according to Elasticsearch rules using the dot syntax. For example, ``"mdf.source_name"`` is the ``source_name`` field of the ``mdf`` dictionary. value (str or list of str): The value(s) to match exactly. Returns: SearchHelper: Self
376,651
def tags(self): tags = self.workbench.get_all_tags() if not tags: return tag_df = pd.DataFrame(tags) tag_df = self.vectorize(tag_df, ) print % (color.LightPurple, color.Normal) self.top_corr(tag_df)
Display tag information for all samples in database
376,652
def get_local_version(sigdir, sig): version = None filename = os.path.join(sigdir, % sig) if os.path.exists(filename): cmd = [, , filename] sigtool = Popen(cmd, stdout=PIPE, stderr=PIPE) while True: line = sigtool.stdout.readline() if line and line.startswith(): version = line.split()[1] break if not line: break sigtool.wait() return version
Get the local version of a signature
376,653
def _netinfo_freebsd_netbsd(): ret = {} out = __salt__[]( .format( if __grains__[] == else ), python_shell=True ) for line in out.splitlines(): user, cmd, pid, _, proto, local_addr, remote_addr = line.split() local_addr = .join(local_addr.rsplit(, 1)) remote_addr = .join(remote_addr.rsplit(, 1)) ret.setdefault( local_addr, {}).setdefault( remote_addr, {}).setdefault( proto, {}).setdefault( pid, {})[] = user ret[local_addr][remote_addr][proto][pid][] = cmd return ret
Get process information for network connections using sockstat
376,654
def write_tsv(self, path, encoding=): assert isinstance(path, (str, _oldstr)) assert isinstance(encoding, (str, _oldstr)) sep = if six.PY2: sep = sep.encode() self.to_csv( path, sep=sep, float_format=, mode=, encoding=encoding, header=True ) logger.info(, self.name, self.p, path)
Write expression matrix to a tab-delimited text file. Parameters ---------- path: str The path of the output file. encoding: str, optional The file encoding. ("UTF-8") Returns ------- None
376,655
def handle_cmd_options(): parser = OptionParser() parser.add_option("-s", "--silent", action="store_true", dest="silent", help="print any warnings", default=False) (options, args) = parser.parse_args() return options, args
Get the options from the command line.
376,656
def visibility_changed(self, enable): if self.dockwidget is None: return if enable: self.dockwidget.raise_() widget = self.get_focus_widget() if widget is not None and self.undocked_window is not None: widget.setFocus() visible = self.dockwidget.isVisible() or self.ismaximized if self.DISABLE_ACTIONS_WHEN_HIDDEN: toggle_actions(self.plugin_actions, visible) self.isvisible = enable and visible if self.isvisible: self.refresh_plugin()
Dock widget visibility has changed.
376,657
def FromTXOutputsConfirmed(outputs): uns = UnspentCoinState() uns.Items = [0] * len(outputs) for i in range(0, len(outputs)): uns.Items[i] = int(CoinState.Confirmed) return uns
Get unspent outputs from a list of transaction outputs. Args: outputs (list): of neo.Core.TX.Transaction.TransactionOutput items. Returns: UnspentCoinState:
376,658
def _get_best_prediction(self, record, train=True): if not self.trees: return best = (+1e999999, None) for tree in self.trees: best = min(best, (tree.mae.mean, tree)) _, best_tree = best prediction, tree_mae = best_tree.predict(record, train=train) return prediction.mean
Gets the prediction from the tree with the lowest mean absolute error.
376,659
def finish (self): if not self.urlqueue.empty(): self.cancel() for t in self.threads: t.stop()
Wait for checker threads to finish.
376,660
def get_formatter(name): if name in (, , ): return af_self elif name == : return af_class elif name in (, , ): return af_named elif name in (, ): return af_default elif name in (, , ): return af_keyword else: raise ValueError( % name)
Return the named formatter function. See the function "set_formatter" for details.
376,661
def get_info(df, group, info=[, ]): agg = df.groupby(group).agg(info) agg.columns = agg.columns.droplevel(0) return agg
Aggregate mean and std with the given group.
376,662
def get_option(env_name, section, opt_name, default=None): val = os.environ.get(env_name) if val is None: try: val = _pyftpsyncrc_parser.get(section, opt_name) except (compat.configparser.NoSectionError, compat.configparser.NoOptionError): pass if val is None: val = default return val
Return a configuration setting from environment var or .pyftpsyncrc
376,663
def remove_project(self, path): project_node = foundations.common.get_first_item(self.__model.get_project_nodes(path)) if not project_node: self.__engine.notifications_manager.warnify( "{0} | project is not opened!".format(self.__class__.__name__, path)) return False LOGGER.info("{0} | Removing project!".format(self.__class__.__name__, path)) self.__model.delete_project_nodes(project_node) return True
Removes a project. :param path: Project path. :type path: unicode :return: Method success. :rtype: bool
376,664
def image_load_time(self): load_times = self.get_load_times() return round(mean(load_times), self.decimal_precision)
Returns aggregate image load time for all pages.
376,665
def load(self, source_list: Iterable[List[str]], target_sentences: Iterable[List[Any]], num_samples_per_bucket: List[int]) -> : assert len(num_samples_per_bucket) == len(self.buckets) data_source = [np.full((num_samples,), self.pad_id, dtype=object) for num_samples in num_samples_per_bucket] data_target = [np.full((num_samples, target_len), self.pad_id, dtype=self.dtype) for (source_len, target_len), num_samples in zip(self.buckets, num_samples_per_bucket)] data_label = [np.full((num_samples, target_len), self.pad_id, dtype=self.dtype) for (source_len, target_len), num_samples in zip(self.buckets, num_samples_per_bucket)] bucket_sample_index = [0 for buck in self.buckets] num_tokens_target = 0 num_pad_target = 0 for source, target in zip(source_list, target_sentences): target_len = len(target) buck_index, buck = get_target_bucket(self.buckets, target_len) if buck is None: continue num_tokens_target += buck[1] num_pad_target += buck[1] - target_len sample_index = bucket_sample_index[buck_index] data_source[buck_index][sample_index] = source data_target[buck_index][sample_index, :target_len] = target data_label[buck_index][sample_index, :target_len] = target[1:] + [self.eos_id] bucket_sample_index[buck_index] += 1 for i in range(len(data_source)): data_target[i] = mx.nd.array(data_target[i], dtype=self.dtype) data_label[i] = mx.nd.array(data_label[i], dtype=self.dtype) if num_tokens_target > 0: logger.info("Created bucketed parallel data set. Introduced padding: target=%.1f%%)", num_pad_target / num_tokens_target * 100) return ParallelDataSet(data_source, data_target, data_label)
Creates a parallel dataset base on source list of strings and target sentences. Returns a `sockeye.data_io.ParallelDataSet`. :param source_list: Source list of strings (e.g., filenames). :param target_sentences: Target sentences used to do bucketing. :param num_samples_per_bucket: Number of samples per bucket. :return: Returns a parallel dataset `sockeye.data_io.ParallelDataSet`.
376,666
def __construct_list(self, list_value): array = [] for value in list_value: array.append(self.__iterate_value(value)) return array
Loop list/set/tuple and parse values
376,667
def datetime_to_numeric(array, offset=None, datetime_unit=None, dtype=float): if offset is None: offset = array.min() array = array - offset if not hasattr(array, ): array = np.array(array) if array.dtype.kind in : array = np.asarray(pd.Series(array.ravel())).reshape(array.shape) if datetime_unit: array = array / np.timedelta64(1, datetime_unit) if array.dtype.kind in : return np.where(isnull(array), np.nan, array.astype(dtype)) return array.astype(dtype)
Convert an array containing datetime-like data to an array of floats. Parameters ---------- da : np.array Input data offset: Scalar with the same type of array or None If None, subtract minimum values to reduce round off error datetime_unit: None or any of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', 'ps', 'fs', 'as'} dtype: target dtype Returns ------- array
376,668
def _field_value_html(self, field): if field in self.fields: return unicode(self.get(field)) else: return self.get_timemachine_instance(field)._object_name_html()
Return the html representation of the value of the given field
376,669
def tlg_plaintext_cleanup(text, rm_punctuation=False, rm_periods=False): remove_comp = regex.compile(r, flags=regex.VERSION1) text = remove_comp.sub(, text) new_text = None if rm_punctuation: new_text = punctuation = [, , , , "?-!*[]{}.;\n \s+ ', text) return text
Remove and substitute post-processing for Greek TLG text. TODO: Surely more junk to pull out. Please submit bugs! TODO: {.+?}|\(.+?\) working? TODO: This is a rather slow now, help in speeding up welcome.
376,670
def has_space(self, length=1, offset=0): return self.pos + (length + offset) - 1 < self.length
Returns boolean if self.pos + length < working string length.
376,671
def assert_all_of_selectors(self, selector, *locators, **kwargs): wait = kwargs[] if in kwargs else capybara.default_max_wait_time if not isinstance(selector, Hashable) or selector not in selectors: locators = (selector,) + locators selector = capybara.default_selector @self.synchronize(wait=wait) def assert_all_of_selectors(): for locator in locators: self.assert_selector(selector, locator, **kwargs) return True return assert_all_of_selectors()
Asserts that all of the provided selectors are present on the given page or descendants of the current node. If options are provided, the assertion will check that each locator is present with those options as well (other than ``wait``). :: page.assert_all_of_selectors("custom", "Tom", "Joe", visible="all") page.assert_all_of_selectors("css", "#my_dif", "a.not_clicked") It accepts all options that :meth:`find_all` accepts, such as ``text`` and ``visible``. The ``wait`` option applies to all of the selectors as a group, so all of the locators must be present within ``wait`` (defaults to :data:`capybara.default_max_wait_time`) seconds. If the given selector is not a valid selector, the first argument is assumed to be a locator and the default selector will be used. Args: selector (str, optional): The name of the selector to use. Defaults to :data:`capybara.default_selector`. *locators (str): Variable length list of locators. **kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.
376,672
def delete(self, id): return self._post( request=ApiActions.DELETE.value, uri=ApiUri.TAGS.value, params={: id} )
Delete the specified label :param id: the label's ID :type id: str :raises: This will raise a :class:`ServerException<logentries_api.exceptions.ServerException>` if there is an error from Logentries
376,673
def transform(self, X): noise = self._noise_func(*self._args, size=X.shape) results = X + noise self.relative_noise_size_ = self.relative_noise_size(X, results) return results
:X: numpy ndarray
376,674
def get_expected_bindings(self): sg_bindings = db_lib.get_baremetal_sg_bindings() all_expected_bindings = collections.defaultdict(set) for sg_binding, port_binding in sg_bindings: sg_id = sg_binding[] try: binding_profile = json.loads(port_binding.profile) except ValueError: binding_profile = {} switchports = self._get_switchports(binding_profile) for switch, intf in switchports: ingress_name = self._acl_name(sg_id, n_const.INGRESS_DIRECTION) egress_name = self._acl_name(sg_id, n_const.EGRESS_DIRECTION) all_expected_bindings[switch].add( (intf, ingress_name, a_const.INGRESS_DIRECTION)) all_expected_bindings[switch].add( (intf, egress_name, a_const.EGRESS_DIRECTION)) return all_expected_bindings
Query the neutron DB for SG->switch interface bindings Bindings are returned as a dict of bindings for each switch: {<switch1>: set([(intf1, acl_name, direction), (intf2, acl_name, direction)]), <switch2>: set([(intf1, acl_name, direction)]), ..., }
376,675
def Log(self, format_str, *args): log_entry = rdf_flow_objects.FlowLogEntry( client_id=self.rdf_flow.client_id, flow_id=self.rdf_flow.flow_id, hunt_id=self.rdf_flow.parent_hunt_id, message=format_str % args) data_store.REL_DB.WriteFlowLogEntries([log_entry]) if self.rdf_flow.parent_hunt_id: db_compat.ProcessHuntFlowLog(self.rdf_flow, format_str % args)
Logs the message using the flow's standard logging. Args: format_str: Format string *args: arguments to the format string
376,676
def _private_packages_allowed(): if not HAVE_PAYMENTS or TEAM_ID: return True customer = _get_or_create_customer() plan = _get_customer_plan(customer) return plan != PaymentPlan.FREE
Checks if the current user is allowed to create private packages. In the public cloud, the user needs to be on a paid plan. There are no restrictions in other deployments.
376,677
def _get_provider_manager(self, osid, local=False): return get_provider_manager(osid, runtime=self._runtime, proxy=getattr(self, , None), local=local)
Gets the most appropriate provider manager depending on config.
376,678
def stationary_distribution_sensitivity(T, j): r n = len(T) lEV = numpy.ones(n) rEV = stationary_distribution(T) eVal = 1.0 T = numpy.transpose(T) vecA = numpy.zeros(n) vecA[j] = 1.0 matA = T - eVal * numpy.identity(n) matA = numpy.concatenate((matA, [lEV])) phi = numpy.linalg.lstsq(numpy.transpose(matA), vecA, rcond=-1) phi = numpy.delete(phi[0], -1) sensitivity = -numpy.outer(rEV, phi) + numpy.dot(phi, rEV) * numpy.outer(rEV, lEV) return sensitivity
r"""Calculate the sensitivity matrix for entry j the stationary distribution vector given transition matrix T. Parameters ---------- T : numpy.ndarray shape = (n, n) Transition matrix j : int entry of stationary distribution for which the sensitivity is to be computed Returns ------- x : ndarray, shape=(n, n) Sensitivity matrix for entry index around transition matrix T. Reversibility is not assumed. Remark ------ Note, that this function uses a different normalization convention for the sensitivity compared to eigenvector_sensitivity. See there for further information.
376,679
def load(source, semi=None): if hasattr(source, ): return _load(source, semi) else: with open(source, ) as fh: return _load(fh, semi)
Read a variable-property mapping from *source* and return the VPM. Args: source: a filename or file-like object containing the VPM definitions semi (:class:`~delphin.mrs.semi.SemI`, optional): if provided, it is passed to the VPM constructor Returns: a :class:`VPM` instance
376,680
def convert(self, obj): if self.pristine_if_invalid: raise NotImplementedError() nex = get_nexml_el(obj) assert nex otusById = nex[] otusElementOrder = nex[] otus = self.convert_otus(otusById, otusElementOrder) nex[] = otus treesById = nex[] treesElementOrder = nex[] trees = self.convert_trees(treesById, treesElementOrder) nex[] = trees nex[] = str(DIRECT_HONEY_BADGERFISH) if self.remove_old_structs: del nex[] del nex[] del nex[] del nex[] return obj
Takes a dict corresponding to the honeybadgerfish JSON blob of the 1.2.* type and converts it to DIRECT_HONEY_BADGERFISH version. The object is modified in place and returned.
376,681
def get_best(self): svc = SVC(kernel="linear") rfecv = RFECV( estimator=svc, step=1, cv=StratifiedKFold(self.y_train, 2), scoring="log_loss" ) rfecv.fit(self.x_train, self.y_train) return rfecv.n_features_, rfecv.ranking_
Finds the optimal number of features :return: optimal number of features and ranking
376,682
def consume(exchange, queue_name, routing_key, callback, app_name): bindings = config.conf["bindings"] queues = config.conf["queues"] if queues == config.DEFAULTS["queues"]: queues[config._default_queue_name]["durable"] = True queues[config._default_queue_name]["auto_delete"] = False if queue_name: queues = {queue_name: config.conf["queues"][config._default_queue_name]} for binding in bindings: binding["queue"] = queue_name if exchange: for binding in bindings: binding["exchange"] = exchange if routing_key: for binding in bindings: binding["routing_keys"] = routing_key callback_path = callback or config.conf["callback"] if not callback_path: raise click.ClickException( "A Python path to a callable object that accepts the message must be provided" ) try: module, cls = callback_path.strip().split(":") except ValueError: raise click.ClickException( "Unable to parse the callback path ({}); the " .format(callback_path) ) try: module = importlib.import_module(module) except ImportError as e: provider = "--callback argument" if callback else "configuration file" raise click.ClickException( "Failed to import the callback module ({}) provided in the {}".format( str(e), provider ) ) try: callback = getattr(module, cls) except AttributeError as e: raise click.ClickException( "Unable to import {} ({}); is the package installed? The python path should " .format( callback_path, str(e) ) ) if app_name: config.conf["client_properties"]["app"] = app_name _log.info("Starting consumer with %s callback", callback_path) try: deferred_consumers = api.twisted_consume( callback, bindings=bindings, queues=queues ) deferred_consumers.addCallback(_consume_callback) deferred_consumers.addErrback(_consume_errback) except ValueError as e: click_version = pkg_resources.get_distribution("click").parsed_version if click_version < pkg_resources.parse_version("7.0"): raise click.exceptions.BadOptionUsage(str(e)) else: raise click.exceptions.BadOptionUsage("callback", str(e)) reactor.run() sys.exit(_exit_code)
Consume messages from an AMQP queue using a Python callback.
376,683
def ensure_caches_alive(max_retries: int = 100, retry_timeout: int = 5, exit_on_failure: bool = True) -> bool: for cache_alias in settings.CACHES.keys(): cache = caches[cache_alias] wf( % cache_alias, False) for i in range(max_retries): try: cache.set(, ) assert cache.get() == cache.delete() wf() break except Exception as e: wf(str(e) + ) sleep(retry_timeout) else: wf( % max_retries) exit_on_failure and exit(1) return False return True
Checks every cache backend alias in ``settings.CACHES`` until it becomes available. After ``max_retries`` attempts to reach any backend are failed it returns ``False``. If ``exit_on_failure`` is set it shuts down with ``exit(1)``. It sets the ``django-docker-helpers:available-check`` key for every cache backend to ensure it's receiving connections. If check is passed the key is deleted. :param exit_on_failure: set to ``True`` if there's no sense to continue :param int max_retries: a number of attempts to reach cache backend, default is ``100`` :param int retry_timeout: a timeout in seconds between attempts, default is ``5`` :return: ``True`` if all backends are available ``False`` if any backend check failed
376,684
def plot_results(fout_img, goea_results, **kws): if "{NS}" not in fout_img: plt_goea_results(fout_img, goea_results, **kws) else: ns2goea_results = cx.defaultdict(list) for rec in goea_results: ns2goea_results[rec.NS].append(rec) for ns_name, ns_res in ns2goea_results.items(): fout = fout_img.format(NS=ns_name) plt_goea_results(fout, ns_res, **kws)
Given a list of GOEA results, plot result GOs up to top.
376,685
def get_attachment_content(self, request, queryset): return self.dump_report_content(request, self.get_report_data_rows(request, queryset))
Returns the generated file content. :param request: The request being processed. :param queryset: The model class being processed. :return: The report content (usually expressed in raw bytes but could be unicode as well).
376,686
def findFileParam(self, comp): for p in self._parameters: if p[] == and comp in p[]: return p[]
Finds the filename auto-parameter that component *comp* is in, and returns all the filenames for that parameter. Notes this assumes that *comp* will only be in a single filename auto-parameter. :param comp: Component to search parameter membership for :type comp: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>` :returns: list<str> -- filenames the found parameter will loop through
376,687
def _round(self, number): sign = 1 if number >= 0 else -1 rounded = int(round(number)) nextRounded = int(round(number + 1 * sign)) if nextRounded == rounded: return rounded elif nextRounded == rounded + 1 * sign: return rounded elif nextRounded == rounded + 2 * sign: return rounded + 1 * sign else: raise RuntimeError("Could not round {}".format(number))
Helper function for rounding-as-taught-in-school (X.5 rounds to X+1 if positive). Python 3 now rounds 0.5 to whichever side is even (i.e. 2.5 rounds to 2). :param int number: a float to round. :return: closest integer to number, rounding ties away from 0.
376,688
def get_sdf(identifier, namespace=, domain=,operation=None, searchtype=None, **kwargs): try: return get(identifier, namespace, domain, operation, , searchtype, **kwargs).decode() except NotFoundError as e: log.info(e) return None
Request wrapper that automatically parses SDF response and supresses NotFoundError.
376,689
def rs(data, n, unbiased=True): data = np.asarray(data) total_N = len(data) m = total_N // n data = data[:total_N - (total_N % n)] seqs = np.reshape(data, (m, n)) means = np.mean(seqs, axis=1) y = seqs - means.reshape((m, 1)) y = np.cumsum(y, axis=1) r = np.max(y, axis=1) - np.min(y, axis=1) s = np.std(seqs, axis=1, ddof=1 if unbiased else 0) idx = np.where(r != 0) r = r[idx] s = s[idx] if len(r) == 0: return np.nan else: return np.mean(r / s)
Calculates an individual R/S value in the rescaled range approach for a given n. Note: This is just a helper function for hurst_rs and should not be called directly. Args: data (array-like of float): time series n (float): size of the subseries in which data should be split Kwargs: unbiased (boolean): if True, the standard deviation based on the unbiased variance (1/(N-1) instead of 1/N) will be used. This should be the default choice, since the true mean of the sequences is not known. This parameter should only be changed to recreate results of other implementations. Returns: float: (R/S)_n
376,690
def page(request, slug, template=u"pages/page.html", extra_context=None): from yacms.pages.middleware import PageMiddleware if not PageMiddleware.installed(): raise ImproperlyConfigured("yacms.pages.middleware.PageMiddleware " "(or a subclass of it) is missing from " + "settings.MIDDLEWARE_CLASSES or " + "settings.MIDDLEWARE") if not hasattr(request, "page") or request.page.slug != slug: raise Http404 template_name = str(slug) if slug != home_slug() else "index" templates = [u"pages/%s.html" % template_name] method_template = request.page.get_content_model().get_template_name() if method_template: templates.insert(0, method_template) if request.page.content_model is not None: templates.append(u"pages/%s/%s.html" % (template_name, request.page.content_model)) for parent in request.page.get_ascendants(for_user=request.user): parent_template_name = str(parent.slug) if request.page.content_model is not None: templates.append(u"pages/%s.html" % request.page.content_model) templates.append(template) return TemplateResponse(request, templates, extra_context or {})
Select a template for a page and render it. The request object should have a ``page`` attribute that's added via ``yacms.pages.middleware.PageMiddleware``. The page is loaded earlier via middleware to perform various other functions. The urlpattern that maps to this view is a catch-all pattern, in which case the page attribute won't exist, so raise a 404 then. For template selection, a list of possible templates is built up based on the current page. This list is order from most granular match, starting with a custom template for the exact page, then adding templates based on the page's parent page, that could be used for sections of a site (eg all children of the parent). Finally at the broadest level, a template for the page's content type (it's model class) is checked for, and then if none of these templates match, the default pages/page.html is used.
376,691
def do_request(self, line): def f(p, method, params): result = p.call(method, params) print("RESULT %s" % result) self._request(line, f)
request <peer> <method> <params> send a msgpack-rpc request and print a response. <params> is a python code snippet, it should be eval'ed to a list.
376,692
def draw_text(self, video_name, out, start, end, x, y, text, color=, show_background=0, background_color=, size=16): cfilter = (r"[0:0]drawtext=fontfile=/Library/Fonts/AppleGothic.ttf:" r"x={x}:y={y}:fontcolor=:" r"box={show_background}:" r"boxcolor=:" r"text=:fontsize={size}:" r"enable=[vout];" r"[0:1]apad=pad_len=0[aout]")\ .format(x=x, y=y, font_color=color, show_background=show_background, background_color=background_color, text=text, start=start, end=end, size=size) command = [, , video_name, , , , , cfilter, , , , , , , out] if self.verbose: print .format( text, video_name, out, ) print .join(command) call(command)
Draws text over a video @param video_name : name of video input file @param out : name of video output file @param start : start timecode to draw text hh:mm:ss @param end : end timecode to draw text hh:mm:ss @param x : x position of text (px) @param y : y position of text (px) @param text : text content to draw @param color : text color @param show_background : boolean to show a background box behind the text @param background_color : color of background box
376,693
def is_ancestor(self, ancestor_rev, rev): try: self.git.merge_base(ancestor_rev, rev, is_ancestor=True) except GitCommandError as err: if err.status == 1: return False raise return True
Check if a commit is an ancestor of another :param ancestor_rev: Rev which should be an ancestor :param rev: Rev to test against ancestor_rev :return: ``True``, ancestor_rev is an accestor to rev.
376,694
def get_conf(cls, builder, doctree=None): result = { : builder.config.slide_theme, : builder.config.autoslides, : [], } if doctree: conf_node = cls.get(doctree) if conf_node: result.update(conf_node.attributes) return result
Return a dictionary of slide configuration for this doctree.
376,695
def _get_error_page_callback(self): if self.response.status in self._error_handlers: return self._error_handlers[self.response.status] elif None in self._error_handlers: return self._error_handlers[None] else: self.response.media_type = return lambda: self.response.status_line
Return an error page for the current response status.
376,696
def l2traceroute_result_output_l2traceroutedone(self, **kwargs): config = ET.Element("config") l2traceroute_result = ET.Element("l2traceroute_result") config = l2traceroute_result output = ET.SubElement(l2traceroute_result, "output") l2traceroutedone = ET.SubElement(output, "l2traceroutedone") l2traceroutedone.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
376,697
def _registerPickleType(name, typedef): NamedStruct._pickleNames[typedef] = name NamedStruct._pickleTypes[name] = typedef
Register a type with the specified name. After registration, NamedStruct with this type (and any sub-types) can be successfully pickled and transfered.
376,698
def rename(self, oldkey, newkey): if oldkey in self.scalars: the_list = self.scalars elif oldkey in self.sections: the_list = self.sections else: raise KeyError( % oldkey) pos = the_list.index(oldkey) val = self[oldkey] dict.__delitem__(self, oldkey) dict.__setitem__(self, newkey, val) the_list.remove(oldkey) the_list.insert(pos, newkey) comm = self.comments[oldkey] inline_comment = self.inline_comments[oldkey] del self.comments[oldkey] del self.inline_comments[oldkey] self.comments[newkey] = comm self.inline_comments[newkey] = inline_comment
Change a keyname to another, without changing position in sequence. Implemented so that transformations can be made on keys, as well as on values. (used by encode and decode) Also renames comments.
376,699
def reload_input_standby(self, **kwargs): config = ET.Element("config") reload = ET.Element("reload") config = reload input = ET.SubElement(reload, "input") standby = ET.SubElement(input, "standby") callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code