Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
2,100
def del_pipes(self, pipes, *args, **kwargs): for pipe in pipes: self.del_pipe(pipe * args, **kwargs)
Deletes a sequence of pipes from the ``Dagger`` in the specified order. Takes optional arguments for ``Dagger.del_pipe``. Arguments: - pipes(sequence of valid ``del_pipe`` arguments) Sequence of pipes or other valid ``Dagger.del_pipe`` arguments to be removed from the ``Dagger`` in the left to right order.
2,101
def _parse_topic(client, command, actor, args): channel, _, topic = args.partition(" :") channel = client.server.get_channel(channel) channel.topic = topic or None if actor: actor = User(actor) client.dispatch_event("TOPIC", actor, channel, topic)
Parse a TOPIC and update channel state, then dispatch a TOPIC event.
2,102
def get_opcodes(self): if not self.opcodes: d, m, opcodes = edit_distance_backpointer(self.seq1, self.seq2, action_function=self.action_function, test=self.test) if self.dist: assert d == self.dist if self._matches: assert m == self._matches self.dist = d self._matches = m self.opcodes = opcodes return self.opcodes
Returns a list of opcodes. Opcodes are the same as defined by :py:mod:`difflib`.
2,103
def get(self, key, get_cas=False): for server in self.servers: value, cas = server.get(key) if value is not None: if get_cas: return value, cas else: return value if get_cas: return None, None
Get a key from server. :param key: Key's name :type key: six.string_types :param get_cas: If true, return (value, cas), where cas is the new CAS value. :type get_cas: boolean :return: Returns a key data from server. :rtype: object
2,104
def create_ogr_field_from_definition(field_definition): if isinstance(field_definition[], list): field_type = field_definition[][0] else: field_type = field_definition[] field_type = field_type_converter.get(field_type, ogr.OFTString) return ogr.FieldDefn(field_definition[], field_type)
Helper to create a field from definition. :param field_definition: The definition of the field (see: safe.definitions.fields). :type field_definition: dict :return: The new ogr field definition. :rtype: ogr.FieldDefn
2,105
def create_object(self, data, view_kwargs): self.before_create_object(data, view_kwargs) relationship_fields = get_relationships(self.resource.schema, model_field=True) nested_fields = get_nested_fields(self.resource.schema, model_field=True) join_fields = relationship_fields + nested_fields obj = self.model(**{key: value for (key, value) in data.items() if key not in join_fields}) self.apply_relationships(data, obj) self.apply_nested_fields(data, obj) self.session.add(obj) try: self.session.commit() except JsonApiException as e: self.session.rollback() raise e except Exception as e: self.session.rollback() raise JsonApiException("Object creation error: " + str(e), source={: }) self.after_create_object(obj, data, view_kwargs) return obj
Create an object through sqlalchemy :param dict data: the data validated by marshmallow :param dict view_kwargs: kwargs from the resource view :return DeclarativeMeta: an object from sqlalchemy
2,106
def select(self, selection_specs=None, **kwargs): if selection_specs is not None and not isinstance(selection_specs, (list, tuple)): selection_specs = [selection_specs] selection = super(DynamicMap, self).select(selection_specs, **kwargs) def dynamic_select(obj, **dynkwargs): if selection_specs is not None: matches = any(obj.matches(spec) for spec in selection_specs) else: matches = True if matches: return obj.select(**kwargs) return obj if not isinstance(selection, DynamicMap): return dynamic_select(selection) else: from ..util import Dynamic dmap = Dynamic(self, operation=dynamic_select, streams=self.streams) dmap.data = selection.data return dmap
Applies selection by dimension name Applies a selection along the dimensions of the object using keyword arguments. The selection may be narrowed to certain objects using selection_specs. For container objects the selection will be applied to all children as well. Selections may select a specific value, slice or set of values: * value: Scalar values will select rows along with an exact match, e.g.: ds.select(x=3) * slice: Slices may be declared as tuples of the upper and lower bound, e.g.: ds.select(x=(0, 3)) * values: A list of values may be selected using a list or set, e.g.: ds.select(x=[0, 1, 2]) Args: selection_specs: List of specs to match on A list of types, functions, or type[.group][.label] strings specifying which objects to apply the selection on. **selection: Dictionary declaring selections by dimension Selections can be scalar values, tuple ranges, lists of discrete values and boolean arrays Returns: Returns an Dimensioned object containing the selected data or a scalar if a single value was selected
2,107
def outer_product(vec0: QubitVector, vec1: QubitVector) -> QubitVector: R = vec0.rank R1 = vec1.rank N0 = vec0.qubit_nb N1 = vec1.qubit_nb if R != R1: raise ValueError() if not set(vec0.qubits).isdisjoint(vec1.qubits): raise ValueError() qubits: Qubits = tuple(vec0.qubits) + tuple(vec1.qubits) tensor = bk.outer(vec0.tensor, vec1.tensor) tensor = bk.reshape(tensor, ([2**N0] * R) + ([2**N1] * R)) perm = [idx for ij in zip(range(0, R), range(R, 2*R)) for idx in ij] tensor = bk.transpose(tensor, perm) return QubitVector(tensor, qubits)
Direct product of qubit vectors The tensor ranks must match and qubits must be disjoint.
2,108
def send_extended(self, address, timestamp, value): if self.marquise_ctx is None: raise ValueError("Attempted to write to a closed Marquise handle.") self.__debug("Supplied address: %s" % address) if value is None: raise TypeError("Can%s' with length of %d" % (value, c_length)) success = MARQUISE_SEND_EXTENDED(self.marquise_ctx, c_address, c_timestamp, c_value, c_length) if success != 0: self.__debug("send_extended returned %d, raising exception" % success) raise RuntimeError("send_extended was unsuccessful, errno is %d" % FFI.errno) self.__debug("send_extended returned %d" % success) return True
Queue an extended datapoint (ie. a string), return True/False for success. Arguments: address -- uint64_t representing a unique metric. timestamp -- uint64_t representing number of nanoseconds (10^-9) since epoch. value -- string value being stored.
2,109
def create_fd (self): if self.filename is None: return i18n.get_encoded_writer(encoding=self.output_encoding, errors=self.codec_errors) return codecs.open(self.filename, "wb", self.output_encoding, self.codec_errors)
Create open file descriptor.
2,110
def plugins_show(what=None, name=None, version=None, details=False): global pluginmgr return pluginmgr.show(what, name, version, details)
Show details of available plugins Parameters ---------- what: Class of plugins e.g., backend name: Name of the plugin e.g., s3 version: Version of the plugin details: Show details be shown?
2,111
def _wmorlet(f0, sd, sampling_rate, ns=5): st = 1. / (2. * pi * sd) w_sz = float(int(ns * st * sampling_rate)) t = arange(-w_sz, w_sz + 1, dtype=float) / sampling_rate w = (exp(-t ** 2 / (2. * st ** 2)) * exp(2j * pi * f0 * t) / sqrt(sqrt(pi) * st * sampling_rate)) return w
adapted from nitime returns a complex morlet wavelet in the time domain Parameters ---------- f0 : center frequency sd : standard deviation of frequency sampling_rate : samplingrate ns : window length in number of standard deviations
2,112
def pdf(self, mu): if self.transform is not None: mu = self.transform(mu) return ss.poisson.pmf(mu, self.lmd0)
PDF for Poisson prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - p(mu)
2,113
def add_group(self, name, desc, status): existing_group = get_session().query(ResourceGroup).filter(ResourceGroup.name==name, ResourceGroup.network_id==self.id).first() if existing_group is not None: raise HydraError("A resource group with name %s is already in network %s"%(name, self.id)) group_i = ResourceGroup() group_i.name = name group_i.description = desc group_i.status = status get_session().add(group_i) self.resourcegroups.append(group_i) return group_i
Add a new group to a network.
2,114
def component_acting_parent_tag(parent_tag, tag): if parent_tag.name == "fig-group": if (len(tag.find_previous_siblings("fig")) > 0): acting_parent_tag = first(extract_nodes(parent_tag, "fig")) else: return None else: acting_parent_tag = parent_tag return acting_parent_tag
Only intended for use in getting components, look for tag name of fig-group and if so, find the first fig tag inside it as the acting parent tag
2,115
def face_adjacency(faces=None, mesh=None, return_edges=False): if mesh is None: edges, edges_face = faces_to_edges(faces, return_index=True) edges.sort(axis=1) else: edges = mesh.edges_sorted edges_face = mesh.edges_face face_adjacency = edges_face[edge_groups] face_adjacency.sort(axis=1) if return_edges: face_adjacency_edges = edges[edge_groups[:, 0]] return face_adjacency, face_adjacency_edges return face_adjacency
Returns an (n,2) list of face indices. Each pair of faces in the list shares an edge, making them adjacent. Parameters ---------- faces : (n, 3) int, or None List of vertex indices representing triangles mesh : Trimesh object If passed will used cached edges instead of faces return_edges : bool Return the edges shared by adjacent faces Returns --------- adjacency : (m,2) int Indexes of faces that are adjacent edges: (m,2) int Only returned if return_edges is True Indexes of vertices which make up the edges shared by the adjacent faces Examples ---------- This is useful for lots of things such as finding face- connected components: >>> graph = nx.Graph() >>> graph.add_edges_from(mesh.face_adjacency) >>> groups = nx.connected_components(graph_connected)
2,116
def mmGetPlotConnectionsPerColumn(self, title="Connections per Columns"): plot = Plot(self, title) connectedCounts = numpy.ndarray(self.getNumColumns(), dtype=uintType) self.getConnectedCounts(connectedCounts) plot.addGraph(sorted(connectedCounts.tolist(), reverse=True), position=211, xlabel="column", ylabel=" plot.addHistogram(connectedCounts.tolist(), position=212, bins=len(connectedCounts) / 10, xlabel=" return plot
Returns plot of # connections per column. @return (Plot) plot
2,117
def simplify_types(types): flattened = flatten_types(types) items = filter_ignored_items(flattened) items = [simplify_recursive(item) for item in items] items = merge_items(items) items = dedupe_types(items) items = remove_redundant_items(items) if len(items) > 3: return [AnyType()] else: return items
Given some types, give simplified types representing the union of types.
2,118
def create_build_system(working_dir, buildsys_type=None, package=None, opts=None, write_build_scripts=False, verbose=False, build_args=[], child_build_args=[]): from rez.plugin_managers import plugin_manager if not buildsys_type: clss = get_valid_build_systems(working_dir, package=package) if not clss: raise BuildSystemError( "No build system is associated with the path %s" % working_dir) if len(clss) != 1: s = .join(x.name() for x in clss) raise BuildSystemError(("Source could be built with one of: %s; " "Please specify a build system") % s) buildsys_type = iter(clss).next().name() cls_ = plugin_manager.get_plugin_class(, buildsys_type) return cls_(working_dir, opts=opts, package=package, write_build_scripts=write_build_scripts, verbose=verbose, build_args=build_args, child_build_args=child_build_args)
Return a new build system that can build the source in working_dir.
2,119
def find(self, **filter_args): obj_list = self.findall(**filter_args) num_objs = len(obj_list) if num_objs == 0: raise NotFound(filter_args, self) elif num_objs > 1: raise NoUniqueMatch(filter_args, self, obj_list) else: return obj_list[0]
Find exactly one resource in scope of this manager, by matching resource properties against the specified filter arguments, and return its Python resource object (e.g. for a CPC, a :class:`~zhmcclient.Cpc` object is returned). Any resource property may be specified in a filter argument. For details about filter arguments, see :ref:`Filtering`. The zhmcclient implementation handles the specified properties in an optimized way: Properties that can be filtered on the HMC are actually filtered there (this varies by resource type), and the remaining properties are filtered on the client side. If the "name" property is specified as the only filter argument, an optimized lookup is performed that uses a name-to-URI cache in this manager object. This this optimized lookup uses the specified match value for exact matching and is not interpreted as a regular expression. Authorization requirements: * see the `list()` method in the derived classes. Parameters: \\**filter_args: All keyword arguments are used as filter arguments. Specifying no keyword arguments causes no filtering to happen. See the examples for usage details. Returns: Resource object in scope of this manager object that matches the filter arguments. This resource object has a minimal set of properties. Raises: :exc:`~zhmcclient.NotFound`: No matching resource found. :exc:`~zhmcclient.NoUniqueMatch`: More than one matching resource found. : Exceptions raised by the `list()` methods in derived resource manager classes (see :ref:`Resources`). Examples: * The following example finds a CPC by its name. Because the 'name' resource property is also a valid Python variable name, there are two ways for the caller to specify the filter arguments for this method: As named parameters:: cpc = client.cpcs.find(name='CPC001') As a parameter dictionary:: filter_args = {'name': 'CPC0001'} cpc = client.cpcs.find(**filter_args) * The following example finds a CPC by its object ID. Because the 'object-id' resource property is not a valid Python variable name, the caller can specify the filter argument only as a parameter dictionary:: filter_args = {'object-id': '12345-abc...de-12345'} cpc = client.cpcs.find(**filter_args)
2,120
def _host_libc(self): libc_dir_option = self.get_options().libc_dir if libc_dir_option: maybe_libc_crti = os.path.join(libc_dir_option, self._LIBC_INIT_OBJECT_FILE) if os.path.isfile(maybe_libc_crti): return HostLibcDev(crti_object=maybe_libc_crti, fingerprint=hash_file(maybe_libc_crti)) raise self.HostLibcDevResolutionError( "Could not locate {} in directory {} provided by the --libc-dir option." .format(self._LIBC_INIT_OBJECT_FILE, libc_dir_option)) return self._get_host_libc_from_host_compiler()
Use the --libc-dir option if provided, otherwise invoke a host compiler to find libc dev.
2,121
def _addSpecfile(self, specfile, path): self.info[specfile] = {: path, : None, : None, : None, : None, : None } self.container[specfile] = dict()
Adds a new specfile entry to SiiContainer.info. See also :class:`SiiContainer.addSpecfile()`. :param specfile: the name of an ms-run file :param path: filedirectory for loading and saving the ``siic`` files
2,122
def set_preference(data, chunk_size): N_samples, N_features = data.shape rng = np.arange(0, N_samples, dtype = int) medians = [] for i in range(15): selected_samples = np.random.choice(N_samples, size = chunk_size, replace = False) samples = data[selected_samples, :] S = - euclidean_distances(samples, data, squared = True) n = chunk_size * N_samples - (chunk_size * (chunk_size + 1) / 2) rows = np.zeros(0, dtype = int) for i in range(chunk_size): rows = np.append(rows, np.full(N_samples - i, i, dtype = int)) cols = np.zeros(0, dtype = int) for i in range(chunk_size): cols = np.append(cols, np.delete(rng, selected_samples[:i+1])) triu_indices = tuple((rows, cols)) preference = np.median(S, overwrite_input = True) medians.append(preference) del S if i % 4 == 3: gc.collect() preference = np.median(medians) return preference
Return the median of the distribution of pairwise L2 Euclidean distances between samples (the rows of 'data') as the default preference parameter for Affinity Propagation clustering. Parameters ---------- data : array of shape (N_samples, N_features) The data-set submitted for Affinity Propagation clustering. chunk_size : int The size of random subsamples from the data-set whose similarity matrix is computed. The resulting median of the distribution of pairwise distances between the data-points selected as part of a given subsample is stored into a list of medians. Returns ------- preference : float The preference parameter for Affinity Propagation clustering is computed as the median of the list of median pairwise distances between the data-points selected as part of each of 15 rounds of random subsampling.
2,123
def create(self, **kwargs): response = self.ghost.execute_post( % self._type_name, json={ self._type_name: [ kwargs ] }) return self._model_type(response.get(self._type_name)[0])
Creates a new resource. :param kwargs: The properties of the resource :return: The created item returned by the API wrapped as a `Model` object
2,124
def institute(self, institute_id): LOG.debug("Fetch institute {}".format(institute_id)) institute_obj = self.institute_collection.find_one({ : institute_id }) if institute_obj is None: LOG.debug("Could not find institute {0}".format(institute_id)) return institute_obj
Featch a single institute from the backend Args: institute_id(str) Returns: Institute object
2,125
def cholesky(L, b, P=None): = L L logger.debug(.format(len(b))) is_csr = scipy.sparse.isspmatrix_csr(L) is_csc = scipy.sparse.isspmatrix_csc(L) if not is_csr and not is_csc: warnings.warn(, scipy.sparse.SparseEfficiencyWarning) if is_csc: U = L.transpose() if not is_csr: L = L.tocsr() if not is_csc: U = L.transpose().tocsr() assert scipy.sparse.isspmatrix_csr(L) assert scipy.sparse.isspmatrix_csr(U) return LU(L, U, b, P=P)
P A P' = L L'
2,126
def wasb_log_exists(self, remote_log_location): try: return self.hook.check_for_blob(self.wasb_container, remote_log_location) except Exception: pass return False
Check if remote_log_location exists in remote storage :param remote_log_location: log's location in remote storage :return: True if location exists else False
2,127
def forward(self, x, w): if self.bias is None: return (w.unsqueeze(2) * self.weight(x)).sum(dim=1) else: return (w.unsqueeze(2) * self.weight(x)).sum(dim=1) + self.bias
Forward function. :param x: Feature indices. :type x: torch.Tensor of shape (batch_size * length) :param w: Feature weights. :type w: torch.Tensor of shape (batch_size * length) :return: Output of linear layer. :rtype: torch.Tensor of shape (batch_size, num_classes)
2,128
def bin_number(datapoint, intervals): index = numpy.searchsorted(intervals, datapoint) return [0 if index != i else 1 for i in range(len(intervals) + 1)]
Given a datapoint and intervals representing bins, returns the number represented in binned form, where the bin including the value is set to 1 and all others are 0.
2,129
def validate(self): try: self._resource = self.schema(self._resource) except MultipleInvalid as e: errors = [format_error(err, self.resource_type) for err in e.errors] raise exceptions.ValidationError({: errors}) yield self.check_unique()
Validate the resource using its voluptuous schema
2,130
async def _get_response(self, message): view = self.discovery_view(message) if not view: return if inspect.iscoroutinefunction(view): response = await view(message) else: response = view(message) return self.prepare_response(response, message)
Get response running the view with await syntax if it is a coroutine function, otherwise just run it the normal way.
2,131
def _connect_signals(self, model): for signal in self._signals: receiver = self._signals[signal] signal.connect(receiver, sender=model, dispatch_uid=self._dispatch_uid(signal, model))
Connect signals for the model.
2,132
def count_divisors(n): if not isinstance(n, int): raise TypeError("Expecting a strictly positive integer") if n <= 0: raise ValueError("Expecting a strictly positive integer") number_of_divisors = 1 remain = n for p in prime_generator(): if p > n: return number_of_divisors exponent = 1 while remain % p == 0: remain = remain // p exponent += 1 number_of_divisors *= exponent if remain == 1: return number_of_divisors
Count the number of divisors of an integer n Args: n (int): strictly positive integer Returns: The number of distinct divisors of n Raises: TypeError: if n is not an integer ValueError: if n is negative
2,133
def xpathNextAncestor(self, cur): if cur is None: cur__o = None else: cur__o = cur._o ret = libxml2mod.xmlXPathNextAncestor(self._o, cur__o) if ret is None:raise xpathError() __tmp = xmlNode(_obj=ret) return __tmp
Traversal function for the "ancestor" direction the ancestor axis contains the ancestors of the context node; the ancestors of the context node consist of the parent of context node and the parent's parent and so on; the nodes are ordered in reverse document order; thus the parent is the first node on the axis, and the parent's parent is the second node on the axis
2,134
def wnexpd(left, right, window): assert isinstance(window, stypes.SpiceCell) assert window.dtype == 1 left = ctypes.c_double(left) right = ctypes.c_double(right) libspice.wnexpd_c(left, right, ctypes.byref(window)) return window
Expand each of the intervals of a double precision window. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wnexpd_c.html :param left: Amount subtracted from each left endpoint. :type left: float :param right: Amount added to each right endpoint. :type right: float :param window: Window to be expanded. :type window: spiceypy.utils.support_types.SpiceCell :return: Expanded Window. :rtype: spiceypy.utils.support_types.SpiceCell
2,135
def _partition_estimators(n_estimators, n_jobs): if n_jobs == -1: n_jobs = min(cpu_count(), n_estimators) else: n_jobs = min(n_jobs, n_estimators) n_estimators_per_job = (n_estimators // n_jobs) * np.ones(n_jobs, dtype=np.int) n_estimators_per_job[:n_estimators % n_jobs] += 1 starts = np.cumsum(n_estimators_per_job) return n_jobs, n_estimators_per_job.tolist(), [0] + starts.tolist()
Private function used to partition estimators between jobs.
2,136
def append_main_thread(self): thread = MainThread(main_queue=self.main_queue, main_spider=self.main_spider, branch_spider=self.branch_spider) thread.daemon = True thread.start()
create & start main thread :return: None
2,137
def idf(posting, document_count): documents_with_term = 0 for field_name in posting: if field_name == "_index": continue documents_with_term += len(posting[field_name].keys()) x = (document_count - documents_with_term + 0.5) / (documents_with_term + 0.5) return math.log(1 + abs(x))
A function to calculate the inverse document frequency for a posting. This is shared between the builder and the index.
2,138
def wiki_pages(self, extra_params=None): return self.api._get_json( WikiPage, space=self, rel_path=self._build_rel_path(), extra_params=extra_params, )
All Wiki Pages with access to this Space
2,139
def transition(prior_state, next_state): if next_state not in STATES[prior_state][TRANSITION]: acceptable = STATES[prior_state][TRANSITION] err = "cannot {}->{} may only {}->{}".format(prior_state, next_state, prior_state, acceptable) raise InvalidStateTransition(err) return next_state
Transitions to a non-standard state Raises InvalidStateTransition if next_state is not allowed. :param prior_state: <str> :param next_state: <str> :return: <str>
2,140
def to_(self, attrvals): attributes = [] for key, value in attrvals.items(): key = key.lower() attributes.append(factory(saml.Attribute, name=key, name_format=self.name_format, attribute_value=do_ava(value))) return attributes
Create a list of Attribute instances. :param attrvals: A dictionary of attributes and values :return: A list of Attribute instances
2,141
def get_reminders_per_page(self, per_page=1000, page=1, params=None): return self._get_resource_per_page(resource=REMINDERS, per_page=per_page, page=page, params=params)
Get reminders per page :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :param params: Search parameters. Default: {} :return: list
2,142
def protege_data(datas_str, sens): return bytes(datas_str, encoding="utf8") if sens else str(datas_str, encoding="utf8")
Used to crypt/decrypt data before saving locally. Override if securit is needed. bytes -> str when decrypting str -> bytes when crypting :param datas_str: When crypting, str. when decrypting bytes :param sens: True to crypt, False to decrypt
2,143
async def kick(self, channel, target, reason=None): if not self.in_channel(channel): raise NotInChannel(channel) if reason: await self.rawmsg(, channel, target, reason) else: await self.rawmsg(, channel, target)
Kick user from channel.
2,144
def createStatus(self, change_id, revision_id, name, value, abstain=None, rerun=None, comment=None, url=None, reporter=None, category=None, duration=None): payload = {: name, : value} if abstain is not None: payload[] = abstain if rerun is not None: payload[] = rerun if comment is not None: payload[] = comment if url is not None: payload[] = url if reporter is not None: payload[] = reporter if category is not None: payload[] = category if duration is not None: payload[] = duration if self._verbose: log.debug( , change_id=change_id, revision_id=revision_id, data=payload) return self._http.post( .join([ , str(change_id), , str(revision_id), ]), json=payload)
Abstract the POST REST api documented here: https://gerrit.googlesource.com/plugins/verify-status/+/master/src/main/resources/Documentation/rest-api-changes.md :param change_id: The change_id for the change tested (can be in the long form e.g: myProject~master~I8473b95934b5732ac55d26311a706c9c2bde9940 or in the short integer form). :param revision_id: the revision_id tested can be the patchset number or the commit id (short or long). :param name: The name of the job. :param value: The pass/fail result for this job: -1: fail 0: unstable, 1: succeed :param abstain: Whether the value counts as a vote (defaults to false) :param rerun: Whether this result is from a re-test on the same patchset :param comment: A short comment about this job :param url: The url link to more info about this job :reporter: The user that verified this job :category: A category for this job "duration": The time it took to run this job :return: A deferred with the result from Gerrit.
2,145
def complain(self, id, is_spam): r = self.request(, , data={: id, : if is_spam else }) return True
http://api.yandex.ru/cleanweb/doc/dg/concepts/complain.xml
2,146
def create(path, value=, acls=None, ephemeral=False, sequence=False, makepath=False, profile=None, hosts=None, scheme=None, username=None, password=None, default_acl=None): 127.0.0.1:2181) scheme Scheme to authenticate with (Default: ) username Username to authenticate (Default: None) password Password to authenticate (Default: None) default_acl Default acls to assign if a node is created in this connection (Default: None) CLI Example: .. code-block:: bash salt minion1 zookeeper.create /test/name daniel profile=prod ' if acls is None: acls = [] acls = [make_digest_acl(**acl) for acl in acls] conn = _get_zk_conn(profile=profile, hosts=hosts, scheme=scheme, username=username, password=password, default_acl=default_acl) return conn.create(path, salt.utils.stringutils.to_bytes(value), acls, ephemeral, sequence, makepath)
Create Znode path path of znode to create value value to assign to znode (Default: '') acls list of acl dictionaries to be assigned (Default: None) ephemeral indicate node is ephemeral (Default: False) sequence indicate node is suffixed with a unique index (Default: False) makepath Create parent paths if they do not exist (Default: False) profile Configured Zookeeper profile to authenticate with (Default: None) hosts Lists of Zookeeper Hosts (Default: '127.0.0.1:2181) scheme Scheme to authenticate with (Default: 'digest') username Username to authenticate (Default: None) password Password to authenticate (Default: None) default_acl Default acls to assign if a node is created in this connection (Default: None) CLI Example: .. code-block:: bash salt minion1 zookeeper.create /test/name daniel profile=prod
2,147
def _create_polynomial_model( name: str, symbol: str, degree: int, ds: DataSet, dss: dict): ds_name = ds.name.split(".")[0].lower() file_name = f"{name.lower()}-{symbol.lower()}-polynomialmodelt-{ds_name}" newmod = PolynomialModelT.create(ds, symbol, degree) newmod.plot(dss, _path(f"data/{file_name}.pdf"), False) newmod.write(_path(f"data/{file_name}.json"))
Create a polynomial model to describe the specified property based on the specified data set, and save it to a .json file. :param name: material name. :param symbol: property symbol. :param degree: polynomial degree. :param ds: the source data set. :param dss: dictionary of all datasets.
2,148
def excluded(filename): basename = os.path.basename(filename) for pattern in options.exclude: if fnmatch(basename, pattern): return True
Check if options.exclude contains a pattern that matches filename.
2,149
def save(self, *args, **kwargs): self.geocode() return super(GeoMixin, self).save(*args, **kwargs)
Extends model ``save()`` to allow dynamic geocoding
2,150
def __getHyperSearchJobIDFilePath(cls, permWorkDir, outputLabel): basePath = permWorkDir filename = "%s_HyperSearchJobID.pkl" % (outputLabel,) filepath = os.path.join(basePath, filename) return filepath
Returns filepath where to store HyperSearch JobID Parameters: ---------------------------------------------------------------------- permWorkDir: Directory path for saved jobID file outputLabel: Label string for incorporating into file name for saved jobID retval: Filepath where to store HyperSearch JobID
2,151
def delete_exchange_for_vhost(self, exchange, vhost, if_unused=False): self._api_delete( .format( urllib.parse.quote_plus(vhost), urllib.parse.quote_plus(exchange)), params={ : if_unused }, )
Delete an individual exchange. You can add the parameter ``if_unused=True``. This prevents the delete from succeeding if the exchange is bound to a queue or as a source to another exchange. :param exchange: The exchange name :type exchange: str :param vhost: The vhost name :type vhost: str :param if_unused: Set to ``True`` to only delete if it is unused :type if_unused: bool
2,152
def open(self): try: connection = self.transport_class( host=self.hostname, username=self.username, password=self.password, timeout=self.timeout, **self.eapi_kwargs ) if self.device is None: self.device = pyeapi.client.Node(connection, enablepwd=self.enablepwd) self.device.run_commands(["show clock"], encoding="text") except ConnectionError as ce: raise ConnectionException(py23_compat.text_type(ce))
Implementation of NAPALM method open.
2,153
def set_velocities(self, velocities): assert len(velocities) == len(self.mol) self.params["velocity"] = velocities
:param velocities (au): list of list of atom velocities :return:
2,154
def _apply_to_data(data, func, unpack_dict=False): apply_ = partial(_apply_to_data, func=func, unpack_dict=unpack_dict) if isinstance(data, dict): if unpack_dict: return [apply_(v) for v in data.values()] return {k: apply_(v) for k, v in data.items()} if isinstance(data, (list, tuple)): try: return [apply_(x) for x in data] except TypeError: return func(data) return func(data)
Apply a function to data, trying to unpack different data types.
2,155
def weights_prepend_inputs_to_targets(labels): past_first_zero = tf.cumsum(to_float(tf.equal(labels, 0)), axis=1) nonzero = to_float(labels) return to_float(tf.not_equal(past_first_zero * nonzero, 0))
Assign weight 1.0 to only the "targets" portion of the labels. Weight 1.0 is assigned to all nonzero labels past the first zero. See prepend_mode in common_hparams.py Args: labels: A Tensor of int32s. Returns: A Tensor of floats.
2,156
def _add_person_to_group(person, group): from karaage.datastores import add_accounts_to_group from karaage.datastores import add_accounts_to_project from karaage.datastores import add_accounts_to_institute a_list = person.account_set add_accounts_to_group(a_list, group) for project in group.project_set.all(): add_accounts_to_project(a_list, project) for institute in group.institute_set.all(): add_accounts_to_institute(a_list, institute)
Call datastores after adding a person to a group.
2,157
def add_latlonalt(self, lat, lon, altitude, terrain_alt=False): if terrain_alt: frame = mavutil.mavlink.MAV_FRAME_GLOBAL_TERRAIN_ALT else: frame = mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT p = mavutil.mavlink.MAVLink_mission_item_message(self.target_system, self.target_component, 0, frame, mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, 0, 0, 0, 0, 0, 0, lat, lon, altitude) self.add(p)
add a point via latitude/longitude/altitude
2,158
def get_route(ip): * cmd = .format(ip) out = __salt__[](cmd, shell=, python_shell=True) regexp = re.compile( r"^IPAddress\s+:\s(?P<source>[\d\.:]+)?.*" r"^InterfaceAlias\s+:\s(?P<interface>[\w\.\:\-\ ]+)?.*" r"^NextHop\s+:\s(?P<gateway>[\d\.:]+)", flags=re.MULTILINE | re.DOTALL ) m = regexp.search(out) ret = { : ip, : m.group(), : m.group(), : m.group() } return ret
Return routing information for given destination ip .. versionadded:: 2016.11.5 CLI Example:: salt '*' network.get_route 10.10.10.10
2,159
def _explain(self, tree): self._explaining = True self._call_list = [] old_call = self.connection.call def fake_call(command, **kwargs): if command == "describe_table": return old_call(command, **kwargs) self._call_list.append((command, kwargs)) raise ExplainSignal self.connection.call = fake_call try: ret = self._run(tree[1]) try: list(ret) except TypeError: pass finally: self.connection.call = old_call self._explaining = False
Set up the engine to do a dry run of a query
2,160
def _run_checks(self): if self.recipe is not None: if not os.path.exists(self.recipe): bot.error("Cannot find %s, is the path correct?" %self.recipe) sys.exit(1) self.recipe = os.path.abspath(self.recipe)
basic sanity checks for the file name (and others if needed) before attempting parsing.
2,161
def _filter_by_pattern(self, pattern): try: _len = len(pattern) except TypeError: raise TypeError("pattern is not a list of Booleans. Got {}".format( type(pattern))) _filt_values = [d for i, d in enumerate(self._values) if pattern[i % _len]] _filt_datetimes = [d for i, d in enumerate(self.datetimes) if pattern[i % _len]] return _filt_values, _filt_datetimes
Filter the Filter the Data Collection based on a list of booleans.
2,162
def do_handshake(self): _logger.debug("Initiating handshake...") try: self._wrap_socket_library_call( lambda: SSL_do_handshake(self._ssl.value), ERR_HANDSHAKE_TIMEOUT) except openssl_error() as err: if err.ssl_error == SSL_ERROR_SYSCALL and err.result == -1: raise_ssl_error(ERR_PORT_UNREACHABLE, err) raise self._handshake_done = True _logger.debug("...completed handshake")
Perform a handshake with the peer This method forces an explicit handshake to be performed with either the client or server peer.
2,163
def merge_partition(self, partition, path, value): dct = self.partitions[partition] *heads, tail = path for part in heads: dct = dct.setdefault(part, dict()) dct[tail] = value
Merge a value into a partition for a key path.
2,164
def run( self, inputs: Dict[str, Union[float, Iterable]], covers: Dict[str, Union[float, Iterable]], torch_size: Optional[int] = None, ) -> Union[float, Iterable]: if len(covers) != len(self.cover_nodes): raise ValueError("Incorrect number of cover values.") for node_name, val in covers.items(): self.nodes[node_name]["value"] = val return super().run(inputs, torch_size)
Executes the FIB over a particular set of inputs and returns the result. Args: inputs: Input set where keys are the names of input nodes in the GrFN and each key points to a set of input values (or just one). Returns: A set of outputs from executing the GrFN, one for every set of inputs.
2,165
def configuration(t0: date, t1: Optional[date] = None, steps_per_day: int = None) -> Tuple[np.ndarray, np.ndarray]: if steps_per_day is None: steps_per_day = 1 dt: float = 1.0 / float(steps_per_day) if t1 is not None: jd0: int = julian_day(t0) jd1: int = julian_day(t1) else: jd0: int = julian_day(t0) jd1: int = jd0 + dt jd: np.ndarray = np.arange(jd0, jd1, dt) N: int = len(jd) B: int = len(bodies) dims: int = B * 3 q: np.ndarray = np.zeros((N, dims)) v: np.ndarray = np.zeros((N, dims)) body_ids: List[int] = [jpl_body_id[body] for body in bodies] for i, body_id in enumerate(body_ids): slice_i = slice(3*i, 3*(i+1)) qi, vi = jpl_kernel[0, body_id].compute_and_differentiate(jd) q[:, slice_i] = qi.T * km2m v[:, slice_i] = vi.T * (km2m / day2sec) return q, v
Get the positions and velocities of the sun and eight planets Returned as a tuple q, v q: Nx3 array of positions (x, y, z) in the J2000.0 coordinate frame.
2,166
def readline(self, limit=-1, delim=b): self._check_readable() chunks = [] while True: chunk = self._buffer.get_chunk(limit, delim) if not chunk: break chunks.append(chunk) if chunk.endswith(delim): break if limit >= 0: limit -= len(chunk) if limit == 0: break if not chunks and not self._buffer.eof and self._buffer.error: raise compat.saved_exc(self._buffer.error) return b.join(chunks)
Read a single line. If EOF is reached before a full line can be read, a partial line is returned. If *limit* is specified, at most this many bytes will be read.
2,167
def iter_all_repos(self, number=-1, since=None, etag=None, per_page=None): url = self._build_url() return self._iter(int(number), url, Repository, params={: since, : per_page}, etag=etag)
Iterate over every repository in the order they were created. :param int number: (optional), number of repositories to return. Default: -1, returns all of them :param int since: (optional), last repository id seen (allows restarting this iteration) :param str etag: (optional), ETag from a previous request to the same endpoint :param int per_page: (optional), number of repositories to list per request :returns: generator of :class:`Repository <github3.repos.Repository>`
2,168
def cmd(send, msg, args): if not msg: send() return parser = arguments.ArgParser(args[]) parser.add_argument(, nargs=) group = parser.add_mutually_exclusive_group() group.add_argument(, dest=, default=None) group.add_argument(, dest=, type=int, default=1) group.add_argument(, action=, default=False) try: cmdargs = parser.parse_args(msg) except arguments.ArgumentException as e: send(str(e)) return api = get_search_api(args[]) query = TwitterSearchOrder() keywords = [.join(cmdargs.query)] if cmdargs.user: keywords += [.format(cmdargs.user)] query.set_keywords(keywords) query.set_language() query.set_result_type() query.set_include_entities(False) query.set_count(cmdargs.count) results = list(api.search_tweets_iterable(query)) if not results: send() return if cmdargs.random: shuffle(results) max_chan_tweets = 5 max_pm_tweets = 25 if cmdargs.count > max_pm_tweets: send("Thats a lot of tweets! The maximum allowed in a channel is {}".format(max_chan_tweets)) for i in range(0, min(cmdargs.count, max_pm_tweets)): if cmdargs.count <= max_chan_tweets: send(tweet_text(results[i])) else: send(tweet_text(results[i]), target=args[])
Search the Twitter API. Syntax: {command} <query> <--user username> <--count 1>
2,169
def p_unary_6(self, program): if program[1].name not in self.external_functions: raise QasmError("Illegal external function call: ", str(program[1].name)) program[0] = node.External([program[1], program[3]])
unary : id '(' expression ')'
2,170
def calc_cost(y, yhat, cost_matrix): return np.mean(cost_matrix[list(y), list(yhat)])
Calculate the cost with given cost matrix y : ground truth yhat : estimation cost_matrix : array-like, shape=(n_classes, n_classes) The ith row, jth column represents the cost of the ground truth being ith class and prediction as jth class.
2,171
def finish_registration(self, heart): try: (eid,queue,reg,purge) = self.incoming_registrations.pop(heart) except KeyError: self.log.error("registration::tried to finish nonexistant registration", exc_info=True) return self.log.info("registration::finished registering engine %i:%r", eid, queue) if purge is not None: purge.stop() control = queue self.ids.add(eid) self.keytable[eid] = queue self.engines[eid] = EngineConnector(id=eid, queue=queue, registration=reg, control=control, heartbeat=heart) self.by_ident[queue] = eid self.queues[eid] = list() self.tasks[eid] = list() self.completed[eid] = list() self.hearts[heart] = eid content = dict(id=eid, queue=self.engines[eid].queue.decode()) if self.notifier: self.session.send(self.notifier, "registration_notification", content=content) self.log.info("engine::Engine Connected: %i", eid)
Second half of engine registration, called after our HeartMonitor has received a beat from the Engine's Heart.
2,172
def kibana_config(self): uncomment("/etc/kibana/kibana.yml", " sed(, , .format(env.host_string), use_sudo=True) sudo() sudo() sudo() sudo()
config kibana :return:
2,173
def _validate_auths(self, path, obj, app): errs = [] for k, v in six.iteritems(obj.authorizations or {}): if k not in app.raw.authorizations: errs.append(.format(k)) if app.raw.authorizations[k].type in (, ) and v != []: errs.append(.format(k)) return path, obj.__class__.__name__, errs
make sure that apiKey and basicAuth are empty list in Operation object.
2,174
def _get_line_array_construct(self): from_bus = integer.setResultsName("fbus") to_bus = integer.setResultsName("tbus") s_rating = real.setResultsName("s_rating") v_rating = real.setResultsName("v_rating") f_rating = real.setResultsName("f_rating") length = real.setResultsName("length") v_ratio = real.setResultsName("v_ratio") r = real.setResultsName("r") x = real.setResultsName("x") b = real.setResultsName("b") tap_ratio = real.setResultsName("tap") phase_shift = real.setResultsName("shift") i_limit = Optional(real).setResultsName("i_limit") p_limit = Optional(real).setResultsName("p_limit") s_limit = Optional(real).setResultsName("s_limit") status = Optional(boolean).setResultsName("status") line_data = from_bus + to_bus + s_rating + v_rating + \ f_rating + length + v_ratio + r + x + b + tap_ratio + \ phase_shift + i_limit + p_limit + s_limit + status + scolon line_data.setParseAction(self.push_line) line_array = Literal("Line.con") + "=" + "[" + "..." + \ ZeroOrMore(line_data + Optional("]" + scolon)) return line_array
Returns a construct for an array of line data.
2,175
def _compute_metric_names(self): session_runs = self._build_session_runs_set() metric_names_set = set() run_to_tag_to_content = self.multiplexer.PluginRunToTagToContent( scalar_metadata.PLUGIN_NAME) for (run, tag_to_content) in six.iteritems(run_to_tag_to_content): session = _find_longest_parent_path(session_runs, run) if not session: continue group = os.path.relpath(run, session) if group == ".": group = "" metric_names_set.update((tag, group) for tag in tag_to_content.keys()) metric_names_list = list(metric_names_set) metric_names_list.sort() return metric_names_list
Computes the list of metric names from all the scalar (run, tag) pairs. The return value is a list of (tag, group) pairs representing the metric names. The list is sorted in Python tuple-order (lexicographical). For example, if the scalar (run, tag) pairs are: ("exp/session1", "loss") ("exp/session2", "loss") ("exp/session2/eval", "loss") ("exp/session2/validation", "accuracy") ("exp/no-session", "loss_2"), and the runs corresponding to sessions are "exp/session1", "exp/session2", this method will return [("loss", ""), ("loss", "/eval"), ("accuracy", "/validation")] More precisely, each scalar (run, tag) pair is converted to a (tag, group) metric name, where group is the suffix of run formed by removing the longest prefix which is a session run. If no session run is a prefix of 'run', the pair is skipped. Returns: A python list containing pairs. Each pair is a (tag, group) pair representing a metric name used in some session.
2,176
def yearly_plots( df, variable, renormalize = True, horizontal_axis_labels_days = False, horizontal_axis_labels_months = True, plot = True, scatter = False, linestyle = "-", linewidth = 1, s = 1 ): if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]: log.error("index is not datetime") return False years = [] for group in df.groupby(df.index.year): years.append(group[1]) scaler = MinMaxScaler() plt.xlabel("days") plt.ylabel(variable); for year in years: if renormalize: values = scaler.fit_transform(year[[variable]]) else: values = year[variable] if plot: plt.plot(year["days_through_year"], values, linestyle = linestyle, linewidth = linewidth, label = year.index.year.values[0]) if scatter: plt.scatter(year["days_through_year"], values, s = s) if horizontal_axis_labels_months: plt.xticks( [ 15.5, 45, 74.5, 105, 135.5, 166, 196.5, 227.5, 258, 288.5, 319, 349.5], ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"] ) plt.legend()
Create yearly plots of a variable in a DataFrame, optionally renormalized. It is assumed that the DataFrame index is datetime.
2,177
def _validate_names(self, name=None, names=None, deep=False): from copy import deepcopy if names is not None and name is not None: raise TypeError("Can only provide one of `names` and `name`") elif names is None and name is None: return deepcopy(self.names) if deep else self.names elif names is not None: if not is_list_like(names): raise TypeError("Must pass list-like as `names`.") return names else: if not is_list_like(name): return [name] return name
Handles the quirks of having a singular 'name' parameter for general Index and plural 'names' parameter for MultiIndex.
2,178
def date(ctx, year, month, day): return _date(conversions.to_integer(year, ctx), conversions.to_integer(month, ctx), conversions.to_integer(day, ctx))
Defines a date value
2,179
def path(self): node = self path = [] while node: path.append((node.action, node.state)) node = node.parent return list(reversed(path))
Path (list of nodes and actions) from root to this node.
2,180
def mutate(self, info_in): if self.failed: raise ValueError("{} cannot mutate as it has failed.".format(self)) from transformations import Mutation info_out = type(info_in)(origin=self, contents=info_in._mutated_contents()) Mutation(info_in=info_in, info_out=info_out)
Replicate an info + mutation. To mutate an info, that info must have a method called ``_mutated_contents``.
2,181
def ball(rmax=3, rmin=0, shape=128, limits=[-4, 4], draw=True, show=True, **kwargs): import ipyvolume.pylab as p3 __, __, __, r, _theta, _phi = xyz(shape=shape, limits=limits, spherical=True) data = r * 0 data[(r < rmax) & (r >= rmin)] = 0.5 if "data_min" not in kwargs: kwargs["data_min"] = 0 if "data_max" not in kwargs: kwargs["data_max"] = 1 data = data.T if draw: vol = p3.volshow(data=data, **kwargs) if show: p3.show() return vol else: return data
Show a ball.
2,182
def newText(content): ret = libxml2mod.xmlNewText(content) if ret is None:raise treeError() return xmlNode(_obj=ret)
Creation of a new text node.
2,183
def assign(self, V, py): if isinstance(py, (bytes, unicode)): for i,C in enumerate(V[] or self._choices): if py==C: V[] = i return V[] = py
Store python value in Value
2,184
def set_title(self, title): self._my_map[] = self._get_display_text(title, self.get_title_metadata())
Sets the title. arg: title (string): the new title raise: InvalidArgument - ``title`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``title`` is ``null`` *compliance: mandatory -- This method must be implemented.*
2,185
def explore_path_encompass(self, task_num, dirpath): base, path = dirpath directories = [] nondirectories = [] self._printer("Task: " + str(task_num) + " >>> Explored path: " + path, stream=True) for filename in os.listdir(base + os.sep + path): fullname = os.path.join(path, filename) if os.path.isdir(base + os.sep + fullname): directories.append((base, fullname)) else: nondirectories.append((base, fullname)) self.add_path(nondirectories) return directories
Explore path to discover unsearched directories and save filepaths :param task_num: Processor ID :param dirpath: Tuple (base directory, path), path information pulled from unsearched Queue :return: Directories to add to unsearched Queue
2,186
def _detect_content_type(self, filename): name, ext = os.path.splitext(filename) if not ext: raise MessageError() ext = ext.lower() if ext.lstrip() in self._banned_extensions: err = raise MessageError(err.format(ext)) if not mimetypes.inited: mimetypes.init() return mimetypes.types_map.get(ext, self._default_content_type)
Determine the mimetype for a file. :param filename: Filename of file to detect.
2,187
def get_html_column(self): panel_id = "panel_{}".format(self.name) return ["<h2>{}</h2>".format(self.title) + .format(self.tar_fn())] + [ (" <br />" + os.linesep).join( [ .format( bam_name=bam.get_name(), bam_html=bam.html_fn(), bam_svg=bam.svg_fn(), panel_id=panel_id, ) for bam in self.bams ] ) + .format(self.tar_fn()), .format( html=self.bams[0]._html_fn, svg=self.bams[0]._svg_fn, panel_id=panel_id, ), ] + [ .format( svg=svg, gp=self._gp_fn, ) for svg in self._svg_fns ]
Get a HTML column for this panel.
2,188
def channel_angle(im, chanapproxangle=None, *, isshiftdftedge=False, truesize=None): im = np.asarray(im) if not isshiftdftedge: im = edge(im) return reg.orientation_angle(im, isshiftdft=isshiftdftedge, approxangle=chanapproxangle, truesize=truesize)
Extract the channel angle from the rfft Parameters: ----------- im: 2d array The channel image chanapproxangle: number, optional If not None, an approximation of the result isshiftdftedge: boolean, default False If The image has already been treated: (edge, dft, fftshift), set to True truesize: 2 numbers, required if isshiftdftedge is True The true size of the image Returns: -------- angle: number The channel angle
2,189
def _process_feature_dbxref(self, limit): if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 raw = .join((self.rawdir, )) LOG.info("processing feature_dbxref mappings") with open(raw, ) as f: f.readline() filereader = csv.reader(f, delimiter=, quotechar=) for line in filereader: (feature_dbxref_id, feature_id, dbxref_id, is_current) = line if is_current == : continue feature_key = feature_id if self.test_mode and int(feature_key) not in \ self.test_keys[] + self.test_keys[]: continue if feature_key not in self.idhash[]: continue feature_id = self.idhash[][feature_key] dbxref_key = dbxref_id dbxrefs = self.dbxrefs.get(dbxref_key) if dbxrefs is not None: for d in dbxrefs: if did == feature_id: continue dlabel = self.label_hash.get(did) if re.search(r, feature_id): if not re.match(r, did): pass elif did is not None and dlabel is not None \ and feature_id is not None: model.addIndividualToGraph(did, dlabel) model.addXref(feature_id, did) line_counter += 1 if not self.test_mode \ and limit is not None and line_counter > limit: break return
This is the mapping between the flybase features and external repositories. Generally we want to leave the flybase feature id as the primary identifier. But we need to make the equivalences/sameAs. :param limit: :return:
2,190
def is_searchable(self): first = alpha_chars(self.first or u) last = alpha_chars(self.last or u) raw = alpha_chars(self.raw or u) return (len(first) >= 2 and len(last) >= 2) or len(raw) >= 4
A bool value that indicates whether the name is a valid name to search by.
2,191
def is_affirmative(self, section, option): return self.has_option(section, option) and \ lib.is_affirmative(self.get(section, option))
Return true if the section option combo exists and it is set to a truthy value.
2,192
def get_updates( self, display_all_distributions=False, verbose=False ): if verbose: logging.basicConfig( stream=sys.stdout, level=logging.INFO, format=, ) logging.info() updates = self._get_environment_updates( display_all_distributions=display_all_distributions ) if updates: for update in updates: logging.info(update) if updates and self._csv_file_name: self.write_updates_to_csv(updates) if updates and self._new_config: self.write_new_config(updates) return updates
When called, get the environment updates and write updates to a CSV file and if a new config has been provided, write a new configuration file. Args: display_all_distributions (bool): Return distribution even if it is up-to-date. verbose (bool): If ``True``, log to terminal to terminal.
2,193
def __isListOfTexts(self, docs): return isinstance(docs, list) and \ all(isinstance(d, (basestring, Text)) for d in docs)
Checks whether the input is a list of strings or Text-s;
2,194
def simxGetCollisionHandle(clientID, collisionObjectName, operationMode): handle = ct.c_int() if (sys.version_info[0] == 3) and (type(collisionObjectName) is str): collisionObjectName=collisionObjectName.encode() return c_GetCollisionHandle(clientID, collisionObjectName, ct.byref(handle), operationMode), handle.value
Please have a look at the function description/documentation in the V-REP user manual
2,195
def draw(self, time, frametime, target): for effect in self.effects: value = effect.rocket_timeline_track.time_value(time) if value > 0.5: effect.draw(time, frametime, target)
Fetch track value for every runnable effect. If the value is > 0.5 we draw it.
2,196
def call_fn(self, what, *args, **kwargs): logger.debug(.format( self.call_stack_level, * 4 * self.call_stack_level, what, arguments_as_string(args, kwargs))) port, fn_name = self._what(what) if port not in self[]: self._call_fn(port, ) self[].append(port) return self._call_fn(port, fn_name, *args, **kwargs)
Lazy call init_adapter then call the function
2,197
def create_tag(self, tag_name=None, **properties): tag = Gtk.TextTag(name=tag_name, **properties) self._get_or_create_tag_table().add(tag) return tag
Creates a tag and adds it to the tag table of the TextBuffer. :param str tag_name: Name of the new tag, or None :param **properties: Keyword list of properties and their values :returns: A new tag. This is equivalent to creating a Gtk.TextTag and then adding the tag to the buffer's tag table. The returned tag is owned by the buffer's tag table. If ``tag_name`` is None, the tag is anonymous. If ``tag_name`` is not None, a tag called ``tag_name`` must not already exist in the tag table for this buffer. Properties are passed as a keyword list of names and values (e.g. foreground='DodgerBlue', weight=Pango.Weight.BOLD)
2,198
def get_dataset(self, key, info): logger.debug("Getting raw data") res = super(HRITGOESFileHandler, self).get_dataset(key, info) self.mda[] = self._get_calibration_params() res = self.calibrate(res, key.calibration) new_attrs = info.copy() new_attrs.update(res.attrs) res.attrs = new_attrs res.attrs[] = self.platform_name res.attrs[] = return res
Get the data from the files.
2,199
def set_pending_boot_mode(self, boot_mode): sushy_system = self._get_sushy_system(PROLIANT_SYSTEM_ID) if boot_mode.upper() not in BOOT_MODE_MAP_REV.keys(): msg = (( ) % {: boot_mode}) raise exception.IloInvalidInputError(msg) try: sushy_system.bios_settings.pending_settings.set_pending_boot_mode( BOOT_MODE_MAP_REV.get(boot_mode.upper())) except sushy.exceptions.SushyError as e: msg = (self._( ) % {: boot_mode, : str(e)}) LOG.debug(msg) raise exception.IloError(msg)
Sets the boot mode of the system for next boot. :param boot_mode: either 'uefi' or 'legacy'. :raises: IloInvalidInputError, on an invalid input. :raises: IloError, on an error from iLO.