Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
28,200
def delete_permanent(self, list_id, subscriber_hash): subscriber_hash = check_subscriber_hash(subscriber_hash) self.list_id = list_id self.subscriber_hash = subscriber_hash return self._mc_client._post(url=self._build_path(list_id, , subscriber_hash, , ))
Delete permanently a member from a list. :param list_id: The unique id for the list. :type list_id: :py:class:`str` :param subscriber_hash: The MD5 hash of the lowercase version of the list member’s email address. :type subscriber_hash: :py:class:`str`
28,201
def rdf_suffix(fmt: str) -> str: for k, v in SUFFIX_FORMAT_MAP.items(): if fmt == v: return k return
Map the RDF format to the approproate suffix
28,202
def prepare_token_revocation_request(self, revocation_url, token, token_type_hint="access_token", body=, callback=None, **kwargs): if not is_secure_transport(revocation_url): raise InsecureTransportError() return prepare_token_revocation_request(revocation_url, token, token_type_hint=token_type_hint, body=body, callback=callback, **kwargs)
Prepare a token revocation request. :param revocation_url: Provider token revocation endpoint URL. :param token: The access or refresh token to be revoked (string). :param token_type_hint: ``"access_token"`` (default) or ``"refresh_token"``. This is optional and if you wish to not pass it you must provide ``token_type_hint=None``. :param body: :param callback: A jsonp callback such as ``package.callback`` to be invoked upon receiving the response. Not that it should not include a () suffix. :param kwargs: Additional parameters to included in the request. :returns: The prepared request tuple with (url, headers, body). Note that JSONP request may use GET requests as the parameters will be added to the request URL query as opposed to the request body. An example of a revocation request .. code-block: http POST /revoke HTTP/1.1 Host: server.example.com Content-Type: application/x-www-form-urlencoded Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW token=45ghiukldjahdnhzdauz&token_type_hint=refresh_token An example of a jsonp revocation request .. code-block: http GET /revoke?token=agabcdefddddafdd&callback=package.myCallback HTTP/1.1 Host: server.example.com Content-Type: application/x-www-form-urlencoded Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW and an error response .. code-block: http package.myCallback({"error":"unsupported_token_type"}); Note that these requests usually require client credentials, client_id in the case for public clients and provider specific authentication credentials for confidential clients.
28,203
def create_pipeline_stage(self, pipeline_key, name, **kwargs): if not (pipeline_key and name): return requests.codes.bad_request, None uri = .join([ self.api_uri, self.pipelines_suffix, pipeline_key, self.stages_suffix]) kwargs.update({:name}) new_box = StreakStage(**kwargs) code, data = self._req(, uri, new_box.to_dict(rw = True)) return code, data
Creates a pipeline stage with the provided attributes. Args: name required name string kwargs {..} see StreakStage object for details return (status code, stage dict)
28,204
def remove(self, auto_confirm=False): if not self._can_uninstall(): return if not self.paths: logger.info( "Can%sUninstalling %s-%s:yProceed (y/n)? ynNot removing or modifying (outside of prefix):y-uninstallpip-Removing file or directory %sSuccessfully uninstalled %s-%s', self.dist.project_name, self.dist.version )
Remove paths in ``self.paths`` with confirmation (unless ``auto_confirm`` is True).
28,205
def key_changed(self): if self.key_name.get() and self.key_val.get(): self.button_key_add.state(["!disabled"]) else: self.button_key_add.state(["disabled"])
Checks if the key name and value fields have been set, and updates the add key button
28,206
def is_valid_resource_name(rname, exception_type=None): match = _ARMNAME_RE.match(rname) if match: return True if exception_type: raise exception_type() return False
Validates the given resource name to ARM guidelines, individual services may be more restrictive. :param rname: The resource name being validated. :type rname: str :param exception_type: Raises this Exception if invalid. :type exception_type: :class:`Exception` :returns: A boolean describing whether the name is valid. :rtype: bool
28,207
def user_list(self, params=None): uri = if params: uri += % urllib.urlencode(params) resp, body = self.get(uri) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body)
Lists all users within the tenant.
28,208
def ipaddr(value, options=None): ipv4_obj = ipv4(value, options=options) ipv6_obj = ipv6(value, options=options) if ipv4_obj is None or ipv6_obj is None: return ipv4_obj or ipv6_obj else: return ipv4_obj + ipv6_obj
Filters and returns only valid IP objects.
28,209
def _check_for_dictionary_key(self, logical_id, dictionary, keys): for key in keys: if key not in dictionary: raise InvalidResourceException(logical_id, .format(key))
Checks a dictionary to make sure it has a specific key. If it does not, an InvalidResourceException is thrown. :param string logical_id: logical id of this resource :param dict dictionary: the dictionary to check :param list keys: list of keys that should exist in the dictionary
28,210
def modify(self, management_address=None, username=None, password=None, connection_type=None): req_body = self._cli.make_body( managementAddress=management_address, username=username, password=password, connectionType=connection_type) resp = self.action(, **req_body) resp.raise_if_err() return resp
Modifies a remote system for remote replication. :param management_address: same as the one in `create` method. :param username: username for accessing the remote system. :param password: password for accessing the remote system. :param connection_type: same as the one in `create` method.
28,211
def load(store): store = normalize_store_arg(store) if contains_array(store, path=None): return Array(store=store, path=None)[...] elif contains_group(store, path=None): grp = Group(store=store, path=None) return LazyLoader(grp)
Load data from an array or group into memory. Parameters ---------- store : MutableMapping or string Store or path to directory in file system or name of zip file. Returns ------- out If the store contains an array, out will be a numpy array. If the store contains a group, out will be a dict-like object where keys are array names and values are numpy arrays. See Also -------- save, savez Notes ----- If loading data from a group of arrays, data will not be immediately loaded into memory. Rather, arrays will be loaded into memory as they are requested.
28,212
def process_requests(self, requests, **context): ds = self._data_serializer responses = [] for method, params, request_id, error in requests: if error: if error.request_id: request_id=request_id, message=ex.message ) )) return responses
Turns a list of request objects into a list of response objects. :param requests: A list of tuples describing the RPC call :type requests: list[list[callable,object,object,list]] :param context: A dict with additional parameters passed to handle_request_string and process_requests Allows wrapping code to pass additional parameters deep into parsing stack, override this method and fold the parameters as needed into tha method call. Imagine capturing authentication / permissions data from headers, converting them into actionable / flag objects and putting them into **context. Then override this method and fold the arguments into the call (which may be a decorated function, where decorator unfolds the params and calls the actual method) By default, context is not passed to method call below.
28,213
def send_zone_event(self, zone_id, event_name, *args): cmd = "EVENT %s!%s %s" % ( zone_id.device_str(), event_name, " ".join(str(x) for x in args)) return (yield from self._send_cmd(cmd))
Send an event to a zone.
28,214
def unzip_file_to_dir(path_to_zip, output_directory): z = ZipFile(path_to_zip, ) z.extractall(output_directory) z.close()
Extract a ZIP archive to a directory
28,215
def naturalize_string(key): return [int(text) if text.isdigit() else text.lower() for text in re.split(numregex, key)]
Analyzes string in a human way to enable natural sort :param nodename: The node name to analyze :returns: A structure that can be consumed by 'sorted'
28,216
def bookDF(symbol, token=, version=): x = book(symbol, token, version) df = _bookToDF(x) return df
Book data https://iextrading.com/developer/docs/#book realtime during Investors Exchange market hours Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: DataFrame: result
28,217
def apply(self, func, shortcut=False, args=(), **kwargs): if shortcut: grouped = self._iter_grouped_shortcut() else: grouped = self._iter_grouped() applied = (maybe_wrap_array(arr, func(arr, *args, **kwargs)) for arr in grouped) return self._combine(applied, shortcut=shortcut)
Apply a function over each array in the group and concatenate them together into a new array. `func` is called like `func(ar, *args, **kwargs)` for each array `ar` in this group. Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how to stack together the array. The rule is: 1. If the dimension along which the group coordinate is defined is still in the first grouped array after applying `func`, then stack over this dimension. 2. Otherwise, stack over the new dimension given by name of this grouping (the argument to the `groupby` function). Parameters ---------- func : function Callable to apply to each array. shortcut : bool, optional Whether or not to shortcut evaluation under the assumptions that: (1) The action of `func` does not depend on any of the array metadata (attributes or coordinates) but only on the data and dimensions. (2) The action of `func` creates arrays with homogeneous metadata, that is, with the same dimensions and attributes. If these conditions are satisfied `shortcut` provides significant speedup. This should be the case for many common groupby operations (e.g., applying numpy ufuncs). args : tuple, optional Positional arguments passed to `func`. **kwargs Used to call `func(ar, **kwargs)` for each array `ar`. Returns ------- applied : DataArray or DataArray The result of splitting, applying and combining this array.
28,218
def Main(url): web_scrape = WebScraping() document = web_scrape.scrape(url) nlp_base = NlpBase() nlp_base.tokenizable_doc = MeCabTokenizer() sentence_list = nlp_base.listup_sentence(document) batch_size = 10 if len(sentence_list) < batch_size: raise ValueError("The number of extracted sentences is insufficient.") all_token_list = [] for i in range(len(sentence_list)): nlp_base.tokenize(sentence_list[i]) all_token_list.extend(nlp_base.token) sentence_list[i] = nlp_base.token vectorlizable_sentence = LSTMRTRBM() vectorlizable_sentence.learn( sentence_list=sentence_list, token_master_list=list(set(all_token_list)), hidden_neuron_count=1000, batch_size=batch_size, learning_rate=1e-03, seq_len=5 ) test_list = sentence_list[:batch_size] feature_points_arr = vectorlizable_sentence.vectorize(test_list) print("Feature points (Top 5 sentences):") print(feature_points_arr)
Entry Point. Args: url: target url.
28,219
def _load_wurlitzer(self): if not os.name == : from IPython.core.getipython import get_ipython try: get_ipython().run_line_magic(, ) except Exception: pass
Load wurlitzer extension.
28,220
def _op_generic_HAdd(self, args): components = [] for a, b in self.vector_args(args): if self.is_signed: a = a.sign_extend(self._vector_size) b = b.sign_extend(self._vector_size) else: a = a.zero_extend(self._vector_size) b = b.zero_extend(self._vector_size) components.append((a + b)[self._vector_size:1]) return claripy.Concat(*components)
Halving add, for some ARM NEON instructions.
28,221
def send(scope, data): conn = scope.get() for line in data: conn.send(line) return True
Like exec(), but does not wait for a response of the remote host after sending the command. :type data: string :param data: The data that is sent.
28,222
def get_date_type(calendar): try: import cftime except ImportError: raise ImportError( ) else: calendars = { : cftime.DatetimeNoLeap, : cftime.Datetime360Day, : cftime.DatetimeNoLeap, : cftime.DatetimeAllLeap, : cftime.DatetimeGregorian, : cftime.DatetimeProlepticGregorian, : cftime.DatetimeJulian, : cftime.DatetimeAllLeap, : cftime.DatetimeGregorian } return calendars[calendar]
Return the cftime date type for a given calendar name.
28,223
def get_network(ipv4addr=None, network=None, return_fields=None, **api_opts): infoblox = _get_infoblox(**api_opts) return infoblox.get_network(ipv4addr=ipv4addr, network=network, return_fields=return_fields)
Get list of all networks. This is helpful when looking up subnets to use with func:nextavailableip This call is offen slow and not cached! some return_fields comment,network,network_view,ddns_domainname,disable,enable_ddns CLI Example: .. code-block:: bash salt-call infoblox.get_network
28,224
def requestSubsystem(self, subsystem): data = common.NS(subsystem) return self.sendRequest(, data, wantReply=True)
Request a subsystem and return a deferred reply.
28,225
def drop_dose(self): doses = np.array(self.individual_doses) responses = np.array(self.responses) mask = doses != doses.max() self.individual_doses = doses[mask].tolist() self.responses = responses[mask].tolist() self.set_summary_data() self._validate()
Drop the maximum dose and related response values.
28,226
def DbExportDevice(self, argin): self._log.debug("In DbExportDevice()") if len(argin) < 5: self.warn_stream("DataBase::DbExportDevice(): insufficient export info for device ") th_exc(DB_IncorrectArguments, "insufficient export info for device", "DataBase::ExportDevice()") dev_name, IOR, host, pid, version = argin[:5] dev_name = dev_name.lower() if pid.lower() == : pid = "-1" self.db.export_device(dev_name, IOR, host, pid, version)
Export a device to the database :param argin: Str[0] = Device name Str[1] = CORBA IOR Str[2] = Device server process host name Str[3] = Device server process PID or string ``null`` Str[4] = Device server process version :type: tango.DevVarStringArray :return: :rtype: tango.DevVoid
28,227
def feed(f, limit=25): if not in f: raise ValueError( ) user, f = f.split() return Feed().show(user, f, limit=limit)
Pull a feed :param f: feed name (eg: csirtgadgetes/correlated) :param limit: return value limit (default 25) :return: Feed dict
28,228
def is_identifier(s): ensure_string(s) if not IDENTIFIER_FORM_RE.match(s): return False if is_keyword(s): return False if s == and not IS_PY3: return False return True
Check whether given string is a valid Python identifier. Note that this excludes language keywords, even though they exhibit a general form of an identifier. See also :func:`has_identifier_form`. :param s: String to check :return: Whether ``s`` is a valid Python identifier
28,229
def detect_loud_glitches(strain, psd_duration=4., psd_stride=2., psd_avg_method=, low_freq_cutoff=30., threshold=50., cluster_window=5., corrupt_time=4., high_freq_cutoff=None, output_intermediates=False): logging.info() taper_length = int(corrupt_time * strain.sample_rate) w = numpy.arange(taper_length) / float(taper_length) strain[0:taper_length] *= pycbc.types.Array(w, dtype=strain.dtype) strain[(len(strain)-taper_length):] *= pycbc.types.Array(w[::-1], dtype=strain.dtype) indices = numpy.where(mag > threshold)[0] cluster_idx = pycbc.events.findchirp_cluster_over_window( indices, numpy.array(mag[indices]), int(cluster_window*strain.sample_rate)) times = [idx * strain.delta_t + strain.start_time \ for idx in indices[cluster_idx]] return times
Automatic identification of loud transients for gating purposes. This function first estimates the PSD of the input time series using the FindChirp Welch method. Then it whitens the time series using that estimate. Finally, it computes the magnitude of the whitened series, thresholds it and applies the FindChirp clustering over time to the surviving samples. Parameters ---------- strain : TimeSeries Input strain time series to detect glitches over. psd_duration : {float, 4} Duration of the segments for PSD estimation in seconds. psd_stride : {float, 2} Separation between PSD estimation segments in seconds. psd_avg_method : {string, 'median'} Method for averaging PSD estimation segments. low_freq_cutoff : {float, 30} Minimum frequency to include in the whitened strain. threshold : {float, 50} Minimum magnitude of whitened strain for considering a transient to be present. cluster_window : {float, 5} Length of time window to cluster surviving samples over, in seconds. corrupt_time : {float, 4} Amount of time to be discarded at the beginning and end of the input time series. high_frequency_cutoff : {float, None} Maximum frequency to include in the whitened strain. If given, the input series is downsampled accordingly. If omitted, the Nyquist frequency is used. output_intermediates : {bool, False} Save intermediate time series for debugging.
28,230
def get_mnist_iter(args, kv): (train_lbl, train_img) = read_data( , ) (val_lbl, val_img) = read_data( , ) train = mx.io.NDArrayIter( to4d(train_img), train_lbl, args.batch_size, shuffle=True) val = mx.io.NDArrayIter( to4d(val_img), val_lbl, args.batch_size) return (train, val)
create data iterator with NDArrayIter
28,231
def search_meta(self, attr, value=None, stronly=False): if stronly: qry = self.ses.query(Symbol.name).join(SymbolMeta) else: qry = self.ses.query(Symbol).join(SymbolMeta) crits = [] if value is None: crits.append(SymbolMeta.attr == attr) else: if isinstance(value, str): values = [value] elif isinstance(value, (tuple, list)): values = value for v in values: crits.append(and_(SymbolMeta.attr == attr, SymbolMeta.value.like(value))) if len(crits): qry = qry.filter(or_(*crits)) qry = qry.order_by(Symbol.name) if stronly: return [sym[0] for sym in qry.distinct()] else: return [sym for sym in qry.distinct()]
Get a list of Symbols by searching a specific meta attribute, and optionally the value. Parameters ---------- attr : str The meta attribute to query. value : None, str or list The meta attribute to query. If you pass a float, or an int, it'll be converted to a string, prior to searching. stronly : bool, optional, default True Return only a list of symbol names, as opposed to the (entire) Symbol objects. Returns ------- List of Symbols or empty list
28,232
def request_pdu(self): if None in [self.address, self.value]: raise Exception return struct.pack(, self.function_code, self.address, self._value)
Build request PDU to write single coil. :return: Byte array of 5 bytes with PDU.
28,233
def atleast(cls, lits, bound=1, top_id=None, encoding=EncType.seqcounter): if encoding < 0 or encoding > 9: raise(NoSuchEncodingError(encoding)) if not top_id: top_id = max(map(lambda x: abs(x), lits)) ret = CNFPlus() if encoding == 9: ret.atmosts, ret.nv = [([-l for l in lits], len(lits) - bound)], top_id return ret def_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_DFL) res = pycard.encode_atleast(lits, bound, top_id, encoding) def_sigint_handler = signal.signal(signal.SIGINT, def_sigint_handler) if res: ret.clauses, ret.nv = res return ret
This method can be used for creating a CNF encoding of an AtLeastK constraint, i.e. of :math:`\sum_{i=1}^{n}{x_i}\geq k`. The method takes 1 mandatory argument ``lits`` and 3 default arguments can be specified: ``bound``, ``top_id``, and ``encoding``. :param lits: a list of literals in the sum. :param bound: the value of bound :math:`k`. :param top_id: top variable identifier used so far. :param encoding: identifier of the encoding to use. :type lits: iterable(int) :type bound: int :type top_id: integer or None :type encoding: integer Parameter ``top_id`` serves to increase integer identifiers of auxiliary variables introduced during the encoding process. This is helpful when augmenting an existing CNF formula with the new cardinality encoding to make sure there is no collision between identifiers of the variables. If specified the identifiers of the first auxiliary variable will be ``top_id+1``. The default value of ``encoding`` is :attr:`Enctype.seqcounter`. The method *translates* the AtLeast constraint into an AtMost constraint by *negating* the literals of ``lits``, creating a new bound :math:`n-k` and invoking :meth:`CardEnc.atmost` with the modified list of literals and the new bound. :raises CardEnc.NoSuchEncodingError: if encoding does not exist. :rtype: a :class:`.CNFPlus` object where the new \ clauses (or the new native atmost constraint) are stored.
28,234
def export_file(self, data_object, destination_directory=None, destination_filename=None, retry=False, export_metadata=False, export_raw_file=True): if not destination_directory: destination_directory = os.getcwd() if not destination_filename: destination_filename = data_object[][] destination_file_url = os.path.join(destination_directory, destination_filename) logger.info( % ( data_object[][], data_object[])) if export_raw_file: destination = File( destination_file_url, self.storage_settings, retry=retry) if destination.exists(): raise FileAlreadyExistsError( % destination_file_url) logger.info( % ( destination.get_url())) file_resource = data_object.get() md5 = file_resource.get() source_url = data_object[][] File(source_url, self.storage_settings, retry=retry).copy_to( destination, expected_md5=md5) data_object[] = self._create_new_file_resource( data_object[], destination.get_url()) else: logger.info() if export_metadata: data_object[].pop(, None) data_object[].pop(, None) destination_metadata_url = os.path.join( destination_file_url + ) logger.info( % destination_metadata_url) metadata = yaml.safe_dump(data_object, default_flow_style=False) metadata_file = File(destination_metadata_url, self.storage_settings, retry=retry) metadata_file.write(metadata) else: logger.info() logger.info()
Export a file from Loom to some file storage location. Default destination_directory is cwd. Default destination_filename is the filename from the file data object associated with the given file_id.
28,235
def flip(self, axis=HORIZONTAL): if axis == HORIZONTAL: self.img = self.img.transpose(Image.FLIP_LEFT_RIGHT) if axis == VERTICAL: self.img = self.img.transpose(Image.FLIP_TOP_BOTTOM)
Flips the layer, either HORIZONTAL or VERTICAL.
28,236
def _add_meta_info(self, eopatch, request_params, service_type): for param, eoparam in zip([, , ], [, , ]): if eoparam not in eopatch.meta_info: eopatch.meta_info[eoparam] = request_params[param] if not in eopatch.meta_info: eopatch.meta_info[] = service_type.value for param in [, ]: if param not in eopatch.meta_info: eopatch.meta_info[param] = getattr(self, param) if eopatch.bbox is None: eopatch.bbox = request_params[]
Adds any missing metadata info to EOPatch
28,237
def get_gitlab_project(self): self.server = gitlab.Gitlab(GIT_URL, private_token=GITLAB_TOKEN, api_version=4) project = self.server.projects.get(self.git_short) if not project: raise GitLabApiError(.format(self.git_short)) self.project = project return self.project
Get numerical GitLab Project ID. Returns: int: Project ID number. Raises: foremast.exceptions.GitLabApiError: GitLab responded with bad status code.
28,238
def _load(self, scale=0.001): ncf = Dataset(self.path, ) bandnum = OLCI_BAND_NAMES.index(self.bandname) resp = ncf.variables[ ][bandnum, :] wvl = ncf.variables[ ][bandnum, :] * scale self.rsr = {: wvl, : resp}
Load the OLCI relative spectral responses
28,239
def MakeClass(descriptor): if descriptor in MESSAGE_CLASS_CACHE: return MESSAGE_CLASS_CACHE[descriptor] attributes = {} for name, nested_type in descriptor.nested_types_by_name.items(): attributes[name] = MakeClass(nested_type) attributes[GeneratedProtocolMessageType._DESCRIPTOR_KEY] = descriptor result = GeneratedProtocolMessageType( str(descriptor.name), (message.Message,), attributes) MESSAGE_CLASS_CACHE[descriptor] = result return result
Construct a class object for a protobuf described by descriptor. Composite descriptors are handled by defining the new class as a member of the parent class, recursing as deep as necessary. This is the dynamic equivalent to: class Parent(message.Message): __metaclass__ = GeneratedProtocolMessageType DESCRIPTOR = descriptor class Child(message.Message): __metaclass__ = GeneratedProtocolMessageType DESCRIPTOR = descriptor.nested_types[0] Sample usage: file_descriptor = descriptor_pb2.FileDescriptorProto() file_descriptor.ParseFromString(proto2_string) msg_descriptor = descriptor.MakeDescriptor(file_descriptor.message_type[0]) msg_class = reflection.MakeClass(msg_descriptor) msg = msg_class() Args: descriptor: A descriptor.Descriptor object describing the protobuf. Returns: The Message class object described by the descriptor.
28,240
def one_hot2indices(one_hots): indices = [] for one_hot in one_hots: indices.append(argmax(one_hot)) return indices
Convert an iterable of one-hot encoded targets to a list of indices. Parameters ---------- one_hot : list Returns ------- indices : list Examples -------- >>> one_hot2indices([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) [0, 1, 2] >>> one_hot2indices([[1, 0], [1, 0], [0, 1]]) [0, 0, 1]
28,241
def IsDir(directory): s doc for performance issues information ftp': from ._exceptions import NotImplementedProtocol raise NotImplementedProtocol(target_url.scheme) else: from ._exceptions import NotImplementedProtocol raise NotImplementedProtocol(directory_url.scheme)
:param unicode directory: A path :rtype: bool :returns: Returns whether the given path points to an existent directory. :raises NotImplementedProtocol: If the path protocol is not local or ftp .. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
28,242
async def clean_up_clients_async(self): if self.partition_receiver: if self.eh_client: await self.eh_client.stop_async() self.partition_receiver = None self.partition_receive_handler = None self.eh_client = None
Resets the pump swallows all exceptions.
28,243
def display(obj, detail=): try: return obj.display(detail=detail) except AttributeError: return str(obj)
Friendly string for volume, using sink paths.
28,244
def errors(self): errors = [] for meta in self: errors.extend(meta.errors) return errors
get all the errors >>> gres = PlayMeta("operation") >>> res_plus = BasicPlayMeta(Composable(name="plus")) >>> gres.append(res_plus) >>> res_plus.add_error(ValueError("invalid data")) >>> res_moins = BasicPlayMeta(Composable(name="moins")) >>> gres.append(res_moins) >>> res_plus.add_error(RuntimeError("server not anwsering")) >>> gres.errors [ValueError('invalid data',), RuntimeError('server not anwsering',)]
28,245
def apply_op(input_layer, operation, *op_args, **op_kwargs): return input_layer.with_tensor( operation(input_layer.tensor, *op_args, **op_kwargs))
Applies the given operation to this before without adding any summaries. Args: input_layer: The input layer for this op. operation: An operation that takes a tensor and the supplied args. *op_args: Extra arguments for operation. **op_kwargs: Keyword arguments for the operation. Returns: A new layer with operation applied.
28,246
def _enforce_txt_record_maxlen(key, value): =... if len(key) + len(value) + 1 > 255: return value[:251 - len(key)] + return value
Enforces the TXT record maximum length of 255 characters. TXT record length includes key, value, and '='. :param str key: Key of the TXT record :param str value: Value of the TXT record :rtype: str :return: The value of the TXT record. It may be truncated if it exceeds the maximum permitted length. In case of truncation, '...' is appended to indicate that the entire value is not present.
28,247
def parse_homer_findpeaks(self): self.homer_findpeaks = dict() for f in self.find_log_files(, filehandles=True): self.parse_findPeaks(f) self.homer_findpeaks = self.ignore_samples(self.homer_findpeaks) if len(self.homer_findpeaks) > 0: self.write_data_file(self.homer_findpeaks, ) stats_headers = OrderedDict() stats_headers[] = { : , : , : 0, : 100, : , : } stats_headers[] = { : , : 0, : , : } stats_headers[] = { : , : , : 0, : , : } self.general_stats_addcols(self.homer_findpeaks, stats_headers, ) return len(self.homer_findpeaks)
Find HOMER findpeaks logs and parse their data
28,248
def seek_previous_line(self): where = self.file.tell() offset = 0 while True: if offset == where: break read_size = self.read_size if self.read_size <= where else where self.file.seek(where - offset - read_size, SEEK_SET) data_len, data = self.read(read_size) if b in self.LINE_TERMINATORS and data[0] == b[0]: terminator_where = self.file.tell() if terminator_where > data_len + 1: self.file.seek(where - offset - data_len - 1, SEEK_SET) terminator_len, terminator_data = self.read(1) if terminator_data[0] == b[0]: data_len += 1 data = b + data self.file.seek(terminator_where) data_where = data_len while data_where > 0: terminator = self.suffix_line_terminator(data[:data_where]) if terminator and offset == 0 and data_where == data_len: data_where -= len(terminator) elif terminator: self.file.seek(where - offset - (data_len - data_where)) return self.file.tell() else: data_where -= 1 offset += data_len if where == 0: return -1 else: self.file.seek(0) return 0
Seek previous line relative to the current file position. :return: Position of the line or -1 if previous line was not found.
28,249
def is_active(self): start = self.start_date() return not self.is_completed() and (not start or start <= date.today())
Returns True when the start date is today or in the past and the task has not yet been completed.
28,250
def _generate_soma(self): radius = self._obj.soma.radius return _square_segment(radius, (0., -radius))
soma
28,251
def _on_message(channel, method, header, body): print "Message:" print "\t%r" % method print "\t%r" % header print "\t%r" % body channel.basic_ack(method.delivery_tag) channel.stop_consuming()
Invoked by pika when a message is delivered from RabbitMQ. The channel is passed for your convenience. The basic_deliver object that is passed in carries the exchange, routing key, delivery tag and a redelivered flag for the message. The properties passed in is an instance of BasicProperties with the message properties and the body is the message that was sent. :param pika.channel.Channel channel: The channel object :param pika.Spec.Basic.Deliver method: The Deliver method :param pika.Spec.BasicProperties properties: The client properties :param str|unicode body: The message body
28,252
def tail_disconnect(self, index): try: for device in self.devices[index + 1:]: device.connected = False except IndexError: pass
Mark all devices disconnected except target in the chain.
28,253
def parseUrl(url): scheme, netloc, url, params, query, fragment = urllib.parse.urlparse(url) query_dict = { k: sorted(v) if len(v) > 1 else v[0] for k, v in list(urllib.parse.parse_qs(query).items()) } return { : scheme, : netloc, : url, : params, : query_dict, : fragment, }
Return a dict containing scheme, netloc, url, params, query, fragment keys. query is a dict where the values are always lists. If the query key appears only once in the URL, the list will have a single value.
28,254
def do_repository_work(self, repo_name, repo_tag=None, docker_executable=, password=None, force=None, loglevel=logging.DEBUG, note=None, tag=None, push=None, export=None, save=None): shutit_global.shutit_global_object.yield_to_draw() self.handle_note(note) shutit_pexpect_session = self.get_current_shutit_pexpect_session() if tag is None: tag = self.repository[] if push is None: push = self.repository[] if export is None: export = self.repository[] if save is None: save = self.repository[] if not (push or export or save or tag): self.push_repository(repository, docker_executable=docker_executable, expect=expect, shutit_pexpect_child=shutit_pexpect_child) self.build[] = (self.build[] + + repository) self.handle_note_after(note) return True
Commit, tag, push, tar a docker container based on the configuration we have. @param repo_name: Name of the repository. @param docker_executable: Defaults to 'docker' @param password: @param force: @type repo_name: string @type docker_executable: string @type password: string @type force: boolean
28,255
def trigger(self, identifier, force=True): self.debug(identifier) url = "{base}/{identifier}".format( base=self.local_base_url, identifier=identifier ) param = {} if force: param[] = force encode = urllib.urlencode(param) if encode: url += "?" url += encode return self.core.update(url, {})
Trigger an upgrade task.
28,256
def set_slippage(self, us_equities=None, us_futures=None): if self.initialized: raise SetSlippagePostInit() if us_equities is not None: if Equity not in us_equities.allowed_asset_types: raise IncompatibleSlippageModel( asset_type=, given_model=us_equities, supported_asset_types=us_equities.allowed_asset_types, ) self.blotter.slippage_models[Equity] = us_equities if us_futures is not None: if Future not in us_futures.allowed_asset_types: raise IncompatibleSlippageModel( asset_type=, given_model=us_futures, supported_asset_types=us_futures.allowed_asset_types, ) self.blotter.slippage_models[Future] = us_futures
Set the slippage models for the simulation. Parameters ---------- us_equities : EquitySlippageModel The slippage model to use for trading US equities. us_futures : FutureSlippageModel The slippage model to use for trading US futures. See Also -------- :class:`zipline.finance.slippage.SlippageModel`
28,257
def clean_up_datetime(obj_map): clean_map = {} for key, value in obj_map.items(): if isinstance(value, datetime.datetime): clean_map[key] = { : value.year, : value.month, : value.day, : value.hour, : value.minute, : value.second, : value.microsecond, : value.tzinfo } elif isinstance(value, dict): clean_map[key] = clean_up_datetime(value) elif isinstance(value, list): if key not in clean_map: clean_map[key] = [] if len(value) > 0: for index, list_value in enumerate(value): if isinstance(list_value, dict): clean_map[key].append(clean_up_datetime(list_value)) else: clean_map[key].append(list_value) else: clean_map[key] = value else: clean_map[key] = value return clean_map
convert datetime objects to dictionaries for storage
28,258
def parse_0134_013b(v): try: trim_val = trim_obd_value(v) val_ab = int(trim_val[0:2], 16) val_cd = int(trim_val[2:4], 16) return (2 / 65536) * val_ab, val_cd - 128 except ValueError: return None, None
Parses the O2 Sensor Value (0134 - 013B) and returns two values parsed from it: 1. Fuel-Air Equivalence [Ratio] as a float from 0 - 2 2. Current in [mA] as a float from -128 - 128 :param str v: :return tuple of float, float:
28,259
def set_base_prompt( self, pri_prompt_terminator=":", alt_prompt_terminator=">", delay_factor=2 ): super(CoriantSSH, self).set_base_prompt( pri_prompt_terminator=pri_prompt_terminator, alt_prompt_terminator=alt_prompt_terminator, delay_factor=delay_factor, ) return self.base_prompt
Sets self.base_prompt: used as delimiter for stripping of trailing prompt in output.
28,260
def apply_chords(im, spacing=1, axis=0, trim_edges=True, label=False): r if im.ndim != im.squeeze().ndim: warnings.warn( + str(im.shape) + + ) if spacing < 0: raise Exception() if spacing == 0: label = True result = sp.zeros(im.shape, dtype=int) slxyz = [slice(None, None, spacing*(axis != i) + 1) for i in [0, 1, 2]] slices = tuple(slxyz[:im.ndim]) s = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] if im.ndim == 3: s = sp.pad(sp.atleast_3d(s), pad_width=((0, 0), (0, 0), (1, 1)), mode=, constant_values=0) im = im[slices] s = sp.swapaxes(s, 0, axis) chords = spim.label(im, structure=s)[0] if trim_edges: chords = clear_border(chords) result[slices] = chords if label is False: result = result > 0 return result
r""" Adds chords to the void space in the specified direction. The chords are separated by 1 voxel plus the provided spacing. Parameters ---------- im : ND-array An image of the porous material with void marked as ``True``. spacing : int Separation between chords. The default is 1 voxel. This can be decreased to 0, meaning that the chords all touch each other, which automatically sets to the ``label`` argument to ``True``. axis : int (default = 0) The axis along which the chords are drawn. trim_edges : bool (default = ``True``) Whether or not to remove chords that touch the edges of the image. These chords are artifically shortened, so skew the chord length distribution. label : bool (default is ``False``) If ``True`` the chords in the returned image are each given a unique label, such that all voxels lying on the same chord have the same value. This is automatically set to ``True`` if spacing is 0, but is ``False`` otherwise. Returns ------- image : ND-array A copy of ``im`` with non-zero values indicating the chords. See Also -------- apply_chords_3D
28,261
def parse_error(text: str) -> Any: try: data = json.loads(text) jsonschema.validate(data, ERROR_SCHEMA) except (TypeError, json.decoder.JSONDecodeError) as e: raise jsonschema.ValidationError("Could not parse json : {0}".format(str(e))) return data
Validate and parse the BMA answer from websocket :param text: the bma error :return: the json data
28,262
def login(self, email, password): payload = { : email, : password } code, msg, rv = self.request( , payload ) if code == : accessToken = rv[][][] self.set_access_token(accessToken) return rv
:password: user password md5 digest
28,263
def clear_worker_output(self): print all_c = self.database.collection_names() for collection in all_c: self.database.drop_collection(collection)
Drops all of the worker output collections
28,264
def multi_packages(self, logins=None, platform=None, package_type=None, type_=None, access=None): logger.debug() method = self._multi_packages new_client = True try: self._anaconda_client_api.user_packages(access=) except Exception: new_client = False return self._create_worker(method, logins=logins, platform=platform, package_type=package_type, type_=type_, access=access, new_client=new_client)
Return the private packages for a given set of usernames/logins.
28,265
def _to_ned(self): if self.ref_frame is : return utils.use_to_ned(self.tensor), \ utils.use_to_ned(self.tensor_sigma) elif self.ref_frame is : return self.tensor, self.tensor_sigma else: raise ValueError( % self.ref_frame)
Switches the reference frame to NED
28,266
def find_numeration(line): patterns = ( re_numeration_vol_page_yr, re_numeration_vol_nucphys_page_yr, re_numeration_nucphys_vol_page_yr, re_numeration_vol_subvol_nucphys_yr_page, re_numeration_vol_nucphys_yr_subvol_page, re_numeration_vol_yr_page, re_numeration_nucphys_vol_yr_page, re_numeration_vol_nucphys_series_yr_page, re_numeration_vol_series_nucphys_page_yr, re_numeration_vol_nucphys_series_page_yr, re_numeration_yr_vol_page, ) for pattern in patterns: match = pattern.match(line) if match: info = match.groupdict() series = info.get(, None) if not series: series = extract_series_from_volume(info[]) if not info[]: info[] = info[] if not info[]: info[] = info[] return {: info.get(, None), : series, : info[], : info[] or info[], : info[], : match.end()} return None
Given a reference line, attempt to locate instances of citation 'numeration' in the line. @param line: (string) the reference line. @return: (string) the reference line after numeration has been checked and possibly recognized/marked-up.
28,267
def _shape_union(shapes): return Shape(sorted(list(set(sum([s.dims for s in shapes], [])))))
A shape containing the union of all dimensions in the input shapes. Args: shapes: a list of Shapes Returns: a Shape
28,268
def group(self, labels): unique_labels, groupxs = self.group_indicies(labels) groups = [self.take(idxs) for idxs in groupxs] return unique_labels, groups
group as list
28,269
def create(self, obj, ref=None): if isinstance(ref, Variable): ref = ref.name elif isinstance(ref, string_types) and ref.startswith(): ref = ref[3:].lower() if hasattr(obj, ): obj = obj._shader_object() if isinstance(obj, ShaderObject): if isinstance(obj, Variable) and obj.name is None: obj.name = ref elif isinstance(obj, string_types): obj = TextExpression(obj) else: obj = Variable(ref, obj) if obj.vtype and obj.vtype[0] in : obj.name = obj.vtype[0] + + obj.name return obj
Convert *obj* to a new ShaderObject. If the output is a Variable with no name, then set its name using *ref*.
28,270
def reloading_meta_metaclass_factory(BASE_TYPE=type): class ReloadingMetaclass2(BASE_TYPE): def __init__(metaself, name, bases, dct): super(ReloadingMetaclass2, metaself).__init__(name, bases, dct) metaself.rrr = reload_class return ReloadingMetaclass2
hack for pyqt
28,271
def add_remote_subnet(self, context_id, subnet_id): return self.context.addCustomerSubnetToNetworkTunnel(subnet_id, id=context_id)
Adds a remote subnet to a tunnel context. :param int context_id: The id-value representing the context instance. :param int subnet_id: The id-value representing the remote subnet. :return bool: True if remote subnet addition was successful.
28,272
def config_create(self, kernel=None, label=None, devices=[], disks=[], volumes=[], **kwargs): from .volume import Volume hypervisor_prefix = if self.hypervisor == else device_names = [hypervisor_prefix + string.ascii_lowercase[i] for i in range(0, 8)] device_map = {device_names[i]: None for i in range(0, len(device_names))} if devices and (disks or volumes): raise ValueError( ) if not devices: if not isinstance(disks, list): disks = [disks] if not isinstance(volumes, list): volumes = [volumes] devices = [] for d in disks: if d is None: devices.append(None) elif isinstance(d, Disk): devices.append(d) else: devices.append(Disk(self._client, int(d), self.id)) for v in volumes: if v is None: devices.append(None) elif isinstance(v, Volume): devices.append(v) else: devices.append(Volume(self._client, int(v))) if not devices: raise ValueError() for i, d in enumerate(devices): if d is None: pass elif isinstance(d, Disk): device_map[device_names[i]] = {: d.id } elif isinstance(d, Volume): device_map[device_names[i]] = {: d.id } else: raise TypeError() params = { : kernel.id if issubclass(type(kernel), Base) else kernel, : label if label else "{}_config_{}".format(self.label, len(self.configs)), : device_map, } params.update(kwargs) result = self._client.post("{}/configs".format(Instance.api_endpoint), model=self, data=params) self.invalidate() if not in result: raise UnexpectedResponseError(, json=result) c = Config(self._client, result[], self.id, result) return c
Creates a Linode Config with the given attributes. :param kernel: The kernel to boot with. :param label: The config label :param disks: The list of disks, starting at sda, to map to this config. :param volumes: The volumes, starting after the last disk, to map to this config :param devices: A list of devices to assign to this config, in device index order. Values must be of type Disk or Volume. If this is given, you may not include disks or volumes. :param **kwargs: Any other arguments accepted by the api. :returns: A new Linode Config
28,273
def get_content_type(obj, field_name=False): content_type = ContentType.objects.get_for_model(obj) if field_name: return getattr(content_type, field_name, ) return content_type
Returns the content type of an object. :param obj: A model instance. :param field_name: Field of the object to return.
28,274
def compile_file(env, src_path, dst_path, encoding=, base_dir=): src_file = file(src_path, ) source = src_file.read().decode(encoding) name = src_path.replace(base_dir, ) raw = env.compile(source, name=name, filename=name, raw=True) src_file.close() dst_file = open(dst_path, ) dst_file.write(raw) dst_file.close()
Compiles a Jinja2 template to python code. :param env: a Jinja2 Environment instance. :param src_path: path to the source file. :param dst_path: path to the destination file. :param encoding: template encoding. :param base_dir: the base path to be removed from the compiled template filename.
28,275
def _parse_labels(self, labels, element): r if labels is None: raise Exception() if type(labels) is str: labels = [labels] parsed_labels = [] for label in labels: if element in label: label = label.split()[-1] if in label: Ls = [L.split()[-1] for L in self.labels(element=element)] if label.startswith(): temp = [L for L in Ls if L.endswith(label.strip())] if label.endswith(): temp = [L for L in Ls if L.startswith(label.strip())] temp = [element++L for L in temp] elif element++label in self.keys(): temp = [element++label] else: temp = [element++label] parsed_labels.extend(temp) [parsed_labels.remove(L) for L in parsed_labels if parsed_labels.count(L) > 1] return parsed_labels
r""" This private method is used for converting \'labels\' to a proper format, including dealing with wildcards (\*). Parameters ---------- labels : string or list of strings The label or list of labels to be parsed. Note that the \* can be used as a wildcard. Returns ------- A list of label strings, with all wildcard matches included if applicable.
28,276
def check_directory_path(self, path): if os.path.isdir(path) is not True: msg = "Directory Does Not Exist {}".format(path) raise OSError(msg)
Ensure directory exists at the provided path :type path: string :param path: path to directory to check
28,277
def _fastfood_list(args): template_pack = pack.TemplatePack(args.template_pack) if args.stencil_set: stencil_set = template_pack.load_stencil_set(args.stencil_set) print("Available Stencils for %s:" % args.stencil_set) for stencil in stencil_set.stencils: print(" %s" % stencil) else: print() for name, vals in template_pack.stencil_sets.items(): print(" %12s - %12s" % (name, vals[]))
Run on `fastfood list`.
28,278
def get_section_header(self, section): self._ensure_section_headers_loaded() if type(section) is int: return self._section_headers_by_index[section] else: return self._section_headers_by_name[section]
Get a specific section header by index or name. Args: section(int or str): The index or name of the section header to return. Returns: :class:`~ELF.SectionHeader`: The section header. Raises: KeyError: The requested section header does not exist.
28,279
def upload(identifier, files, metadata=None, headers=None, access_key=None, secret_key=None, queue_derive=None, verbose=None, verify=None, checksum=None, delete=None, retries=None, retries_sleep=None, debug=None, request_kwargs=None, **get_item_kwargs): item = get_item(identifier, **get_item_kwargs) return item.upload(files, metadata=metadata, headers=headers, access_key=access_key, secret_key=secret_key, queue_derive=queue_derive, verbose=verbose, verify=verify, checksum=checksum, delete=delete, retries=retries, retries_sleep=retries_sleep, debug=debug, request_kwargs=request_kwargs)
Upload files to an item. The item will be created if it does not exist. :type identifier: str :param identifier: The globally unique Archive.org identifier for a given item. :param files: The filepaths or file-like objects to upload. This value can be an iterable or a single file-like object or string. :type metadata: dict :param metadata: (optional) Metadata used to create a new item. If the item already exists, the metadata will not be updated -- use ``modify_metadata``. :type headers: dict :param headers: (optional) Add additional HTTP headers to the request. :type access_key: str :param access_key: (optional) IA-S3 access_key to use when making the given request. :type secret_key: str :param secret_key: (optional) IA-S3 secret_key to use when making the given request. :type queue_derive: bool :param queue_derive: (optional) Set to False to prevent an item from being derived after upload. :type verbose: bool :param verbose: (optional) Display upload progress. :type verify: bool :param verify: (optional) Verify local MD5 checksum matches the MD5 checksum of the file received by IAS3. :type checksum: bool :param checksum: (optional) Skip uploading files based on checksum. :type delete: bool :param delete: (optional) Delete local file after the upload has been successfully verified. :type retries: int :param retries: (optional) Number of times to retry the given request if S3 returns a 503 SlowDown error. :type retries_sleep: int :param retries_sleep: (optional) Amount of time to sleep between ``retries``. :type debug: bool :param debug: (optional) Set to True to print headers to stdout, and exit without sending the upload request. :param \*\*kwargs: Optional arguments that ``get_item`` takes. :returns: A list of :py:class:`requests.Response` objects.
28,280
def register_service_agreement_consumer(storage_path, publisher_address, agreement_id, did, service_agreement, service_definition_id, price, encrypted_files, consumer_account, condition_ids, consume_callback=None, start_time=None): if start_time is None: start_time = int(datetime.now().timestamp()) record_service_agreement( storage_path, agreement_id, did, service_definition_id, price, encrypted_files, start_time) process_agreement_events_consumer( publisher_address, agreement_id, did, service_agreement, price, consumer_account, condition_ids, consume_callback )
Registers the given service agreement in the local storage. Subscribes to the service agreement events. :param storage_path: storage path for the internal db, str :param publisher_address: ethereum account address of publisher, hex str :param agreement_id: id of the agreement, hex str :param did: DID, str :param service_agreement: ServiceAgreement instance :param service_definition_id: identifier of the service inside the asset DDO, str :param price: Asset price, int :param encrypted_files: resutl of the files encrypted by the secret store, hex str :param consumer_account: Account instance of the consumer :param condition_ids: is a list of bytes32 content-addressed Condition IDs, bytes32 :param consume_callback: :param start_time: start time, int
28,281
def do_delete(endpoint, access_token): headers = {"Authorization": + access_token} headers[] = get_user_agent() return requests.delete(endpoint, headers=headers)
Do an HTTP GET request and return JSON. Args: endpoint (str): Azure Resource Manager management endpoint. access_token (str): A valid Azure authentication token. Returns: HTTP response.
28,282
def process_rawq(self): buf = b try: while self.rawq: char = self.rawq_getchar() if char != IAC: buf = buf + char continue command = self.rawq_getchar() if command == theNULL: self.msg() continue elif command == IAC: self.msg() buf = buf + command continue elif command == DO: opt = self.rawq_getchar() self.msg(, ord(opt)) if opt == TTYPE: self.sock.send(IAC+WILL+opt) elif opt == NAWS: self.sock.send(IAC+WILL+opt) self.can_naws = True if self.window_size: self.set_window_size(*self.window_size) else: self.sock.send(IAC+WONT+opt) self.msg( , ord(iac), ord(end)) self.msg() ttype = self.termtype.encode() self.msg(, ttype) self.sock.send(IAC+SB+TTYPE+theNULL+ttype+IAC+SE) elif command in (WILL, WONT): opt = self.rawq_getchar() self.msg(, command == WILL and or , ord(opt)) if opt == ECHO: self.sock.send(IAC+DO+opt) else: self.sock.send(IAC+DONT+opt) else: self.msg( % ord(command)) except EOFError: pass buf = buf.decode(self.encoding) self.cookedq.write(buf) if self.data_callback is not None: self.data_callback(buf, **self.data_callback_kwargs)
Transfer from raw queue to cooked queue. Set self.eof when connection is closed. Don't block unless in the midst of an IAC sequence.
28,283
def refresh(self): for cbar in self.colorbars: cbar.draw_all() self.canvas.draw()
Refresh the current figure
28,284
def parse_python_file(filepath): with _AST_CACHE_LOCK: if filepath not in _AST_CACHE: source = read_file(filepath) _AST_CACHE[filepath] = ast.parse(source, filename=filepath) return _AST_CACHE[filepath]
Retrieves the AST of the specified file. This function performs simple caching so that the same file isn't read or parsed more than once per process. :param filepath: the file to parse :type filepath: str :returns: ast.AST
28,285
def make_translations(unique_name, node): introspectable = not node.attrib.get() == if node.tag == core_ns(): __TRANSLATED_NAMES[][unique_name] = unique_name if introspectable: components = get_gi_name_components(node) components[-1] = components[-1].upper() gi_name = .join(components) __TRANSLATED_NAMES[][unique_name] = gi_name __TRANSLATED_NAMES[][unique_name] = gi_name elif c_ns() in node.attrib: __TRANSLATED_NAMES[][unique_name] = unique_name if introspectable: components = get_gi_name_components(node) gi_name = .join(components) __TRANSLATED_NAMES[][unique_name] = gi_name components[-1] = % components[-1] __TRANSLATED_NAMES[][unique_name] = .join(components) elif c_ns() in node.attrib: components = get_gi_name_components(node) gi_name = .join(components) __TRANSLATED_NAMES[][unique_name] = unique_name if introspectable: __TRANSLATED_NAMES[][unique_name] = gi_name __TRANSLATED_NAMES[][unique_name] = gi_name elif node.tag == core_ns(): components = [] get_field_c_name_components(node, components) display_name = .join(components[1:]) __TRANSLATED_NAMES[][unique_name] = display_name if introspectable: __TRANSLATED_NAMES[][unique_name] = display_name __TRANSLATED_NAMES[][unique_name] = display_name elif node.tag == core_ns(): display_name = node.attrib[] __TRANSLATED_NAMES[][unique_name] = display_name if introspectable: __TRANSLATED_NAMES[][unique_name] = % display_name __TRANSLATED_NAMES[][unique_name] = % display_name elif node.tag == core_ns(): display_name = node.attrib[] __TRANSLATED_NAMES[][unique_name] = display_name if introspectable: __TRANSLATED_NAMES[][unique_name] = display_name __TRANSLATED_NAMES[][unique_name] = display_name.replace(, ) else: __TRANSLATED_NAMES[][unique_name] = node.attrib.get() if introspectable: __TRANSLATED_NAMES[][unique_name] = node.attrib.get() __TRANSLATED_NAMES[][unique_name] = node.attrib.get()
Compute and store the title that should be displayed when linking to a given unique_name, eg in python when linking to test_greeter_greet() we want to display Test.Greeter.greet
28,286
def get(self, identity): return SyncMapPermissionContext( self._version, service_sid=self._solution[], map_sid=self._solution[], identity=identity, )
Constructs a SyncMapPermissionContext :param identity: Identity of the user to whom the Sync Map Permission applies. :returns: twilio.rest.sync.v1.service.sync_map.sync_map_permission.SyncMapPermissionContext :rtype: twilio.rest.sync.v1.service.sync_map.sync_map_permission.SyncMapPermissionContext
28,287
def read( handle, id=None ): from Bio.PDB import PDBParser if not id: id = os.path.basename(handle).split()[0] p = PDBParser() s = p.get_structure(id, handle) return s
Reads a structure via PDBParser. Simplifies life..
28,288
def spell_check(T, w): assert T is not None dist = 0 while True: u = search(T, dist, w) if u is not None: return u dist += 1
Spellchecker :param T: trie encoding the dictionary :param w: given word :returns: a closest word from the dictionary :complexity: linear if distance was constant
28,289
def feature(self,feat=None,searchforit=False,init=None): if feat==None: return self.feats if not init: init=self init.tick=0 init._matches=[] feat=feat.strip() if feat.startswith("+"): init._eval=True feat=feat[1:] elif feat.startswith("-"): init._eval=False feat=feat[1:] else: init._eval=None if (hasattr(self,)) and (feat in self.feats): if type(self.feats[feat]) == type([]): if len(self.feats[feat]) > 1: return self.feats[feat] else: return self.feats[feat][0] else: return self.feats[feat] else: if searchforit: for child in self.descendants(): init.tick+=1 x=child.feature(feat,searchforit,init) if x==None: continue init._matches.append ( (child,x) ) else: return None if self==init: if init._eval==None: return init._matches else: return [ x for (x,y) in init._matches if bool(y)==init._eval ]
Returns value of self.feats[feat]. If searchforit==True, will search in this object's children recursively. If not found, returns None.
28,290
def do_fontsave(self, arg): from os import path import json fullpath = path.expanduser(arg) data = { "fonts": self.curargs["fonts"], "ticks": self.curargs["ticks"] } with open(fullpath, ) as f: json.dump(data, f) msg.okay("Saved current font settings to {}".format(fullpath))
Saves the session variables to a file so that the same analysis can be continued later.
28,291
def dumps(self): def _is_initial(author_name): return len(author_name) == 1 or u in author_name def _ensure_dotted_initials(author_name): if _is_initial(author_name) \ and u not in author_name: seq = (author_name, u) author_name = u.join(seq) return author_name def _ensure_dotted_suffixes(author_suffix): if u not in author_suffix: seq = (author_suffix, u) author_suffix = u.join(seq) return author_suffix def _is_roman_numeral(suffix): valid_roman_numerals = [u, u, u, u, u, u, u, u, u] return all(letters in valid_roman_numerals for letters in suffix.upper()) first_and_middle_names = iter(_ensure_dotted_initials(name) for name in self.first_list) try: prev = next(first_and_middle_names) except StopIteration: LOGGER.warning(u"Cannot process %s properly", self._parsed_name.original) prev = self._parsed_name.original names_with_spaces = [prev] for name in first_and_middle_names: if not _is_initial(name) or not _is_initial(prev): names_with_spaces.append() prev = name names_with_spaces.append(prev) normalized_names = u.join(names_with_spaces) if _is_roman_numeral(self.suffix): suffix = self.suffix.upper() else: suffix = _ensure_dotted_suffixes(self.suffix) final_name = u.join( part for part in (self.last, normalized_names.strip(), suffix) if part) final_name = final_name.replace(u, ') return final_name
Dump the name to string, after normalizing it.
28,292
def AnalizarXml(self, xml=""): "Analiza un mensaje XML (por defecto la respuesta)" try: if not xml: xml = self.XmlResponse self.xml = SimpleXMLElement(xml) return True except Exception, e: self.Excepcion = u"%s" % (e) return False
Analiza un mensaje XML (por defecto la respuesta)
28,293
def diag_jacobian(xs, ys=None, sample_shape=None, fn=None, parallel_iterations=10, name=None): with tf.compat.v1.name_scope(name, , [xs, ys]): if sample_shape is None: sample_shape = [1] jacobians_diag_res = [] xs = list(xs) if _is_list_like(xs) else [xs] xs = [tf.convert_to_tensor(value=x) for x in xs] if not tf.executing_eagerly(): if ys is None: if fn is None: raise ValueError() else: ys = fn(*xs) ys = list(ys) if _is_list_like(ys) else [ys] if len(xs) != len(ys): raise ValueError() for y, x in zip(ys, xs): y_ = y + tf.zeros_like(x) y_ = tf.reshape(y, tf.concat([sample_shape, [-1]], -1)) n = tf.size(input=x) / tf.cast( tf.reduce_prod(input_tensor=sample_shape), dtype=tf.int32) n = tf.cast(n, dtype=tf.int32) loop_vars = [ 0, tf.TensorArray(x.dtype, n) ] def loop_body(j): res = tf.gradients(ys=y_[..., j], xs=x)[0] if res is None: res = tf.zeros(tf.concat([sample_shape, [1]], -1), dtype=x.dtype) else: res = tf.reshape(res, tf.concat([sample_shape, [-1]], -1)) res = tf.expand_dims(res, 0) res = res[..., j] return res _, jacobian_diag_res = tf.while_loop( cond=lambda j, _: j < n, body=lambda j, result: (j + 1, result.write(j, loop_body(j))), loop_vars=loop_vars, parallel_iterations=parallel_iterations) shape_x = tf.shape(input=x) reshaped_jacobian_diag = tf.transpose(a=jacobian_diag_res.stack()) reshaped_jacobian_diag = tf.reshape(reshaped_jacobian_diag, shape_x) jacobians_diag_res.append(reshaped_jacobian_diag) else: if fn is None: raise ValueError( ) if ys is None: ys = fn(*xs) def fn_slice(i, j): def fn_broadcast(*state): res = fn(*state) res = list(res) if _is_list_like(res) else [res] if len(res) != len(state): res *= len(state) res = [tf.reshape(r + tf.zeros_like(s), tf.concat([sample_shape, [-1]], -1)) for r, s in zip(res, state)] return res return lambda *state: tf.expand_dims(fn_broadcast(*state)[i], 0)[..., j] def make_loop_body(i, x): def _fn(j, result): res = value_and_gradient(fn_slice(i, j), xs)[1][i] if res is None: res = tf.zeros(tf.concat([sample_shape, [1]], -1), dtype=x.dtype) else: res = tf.reshape(res, tf.concat([sample_shape, [-1]], -1)) res = res[..., j] return j + 1, result.write(j, res) return _fn for i, x in enumerate(xs): n = tf.size(input=x) / tf.cast( tf.reduce_prod(input_tensor=sample_shape), dtype=tf.int32) n = tf.cast(n, dtype=tf.int32) loop_vars = [ 0, tf.TensorArray(x.dtype, n) ] _, jacobian_diag_res = tf.while_loop( cond=lambda j, _: j < n, body=make_loop_body(i, x), loop_vars=loop_vars, parallel_iterations=parallel_iterations) shape_x = tf.shape(input=x) reshaped_jacobian_diag = tf.transpose(a=jacobian_diag_res.stack()) reshaped_jacobian_diag = tf.reshape(reshaped_jacobian_diag, shape_x) jacobians_diag_res.append(reshaped_jacobian_diag) return ys, jacobians_diag_res
Computes diagonal of the Jacobian matrix of `ys=fn(xs)` wrt `xs`. If `ys` is a tensor or a list of tensors of the form `(ys_1, .., ys_n)` and `xs` is of the form `(xs_1, .., xs_n)`, the function `jacobians_diag` computes the diagonal of the Jacobian matrix, i.e., the partial derivatives `(dys_1/dxs_1,.., dys_n/dxs_n`). For definition details, see https://en.wikipedia.org/wiki/Jacobian_matrix_and_determinant #### Example ##### Diagonal Hessian of the log-density of a 3D Gaussian distribution In this example we sample from a standard univariate normal distribution using MALA with `step_size` equal to 0.75. ```python import tensorflow as tf import tensorflow_probability as tfp import numpy as np tfd = tfp.distributions dtype = np.float32 with tf.Session(graph=tf.Graph()) as sess: true_mean = dtype([0, 0, 0]) true_cov = dtype([[1, 0.25, 0.25], [0.25, 2, 0.25], [0.25, 0.25, 3]]) chol = tf.linalg.cholesky(true_cov) target = tfd.MultivariateNormalTriL(loc=true_mean, scale_tril=chol) # Assume that the state is passed as a list of tensors `x` and `y`. # Then the target function is defined as follows: def target_fn(x, y): # Stack the input tensors together z = tf.concat([x, y], axis=-1) - true_mean return target.log_prob(z) sample_shape = [3, 5] state = [tf.ones(sample_shape + [2], dtype=dtype), tf.ones(sample_shape + [1], dtype=dtype)] fn_val, grads = tfp.math.value_and_gradient(target_fn, state) # We can either pass the `sample_shape` of the `state` or not, which impacts # computational speed of `diag_jacobian` _, diag_jacobian_shape_passed = diag_jacobian( xs=state, ys=grads, sample_shape=tf.shape(fn_val)) _, diag_jacobian_shape_none = diag_jacobian( xs=state, ys=grads) diag_jacobian_shape_passed_ = sess.run(diag_jacobian_shape_passed) diag_jacobian_shape_none_ = sess.run(diag_jacobian_shape_none) print('hessian computed through `diag_jacobian`, sample_shape passed: ', np.concatenate(diag_jacobian_shape_passed_, -1)) print('hessian computed through `diag_jacobian`, sample_shape skipped', np.concatenate(diag_jacobian_shape_none_, -1)) ``` Args: xs: `Tensor` or a python `list` of `Tensors` of real-like dtypes and shapes `sample_shape` + `event_shape_i`, where `event_shape_i` can be different for different tensors. ys: `Tensor` or a python `list` of `Tensors` of the same dtype as `xs`. Must broadcast with the shape of `xs`. Can be omitted if `fn` is provided. sample_shape: A common `sample_shape` of the input tensors of `xs`. If not, provided, assumed to be `[1]`, which may result in a slow performance of `jacobians_diag`. fn: Python callable that takes `xs` as an argument (or `*xs`, if it is a list) and returns `ys`. Might be skipped if `ys` is provided and `tf.enable_eager_execution()` is disabled. parallel_iterations: `int` that specifies the allowed number of coordinates of the input tensor `xs`, for which the partial derivatives `dys_i/dxs_i` can be computed in parallel. name: Python `str` name prefixed to `Ops` created by this function. Default value: `None` (i.e., "diag_jacobian"). Returns: ys: a list, which coincides with the input `ys`, when provided. If the input `ys` is None, `fn(*xs)` gets computed and returned as a list. jacobians_diag_res: a `Tensor` or a Python list of `Tensor`s of the same dtypes and shapes as the input `xs`. This is the diagonal of the Jacobian of ys wrt xs. Raises: ValueError: if lists `xs` and `ys` have different length or both `ys` and `fn` are `None`, or `fn` is None in the eager execution mode.
28,294
def _requirements_sanitize(req_list): filtered_req_list = ( _requirement_find_lowest_possible(req) for req in (pkg_resources.Requirement.parse(s) for s in req_list) if _requirement_filter_by_marker(req) ) return [" ".join(req) for req in filtered_req_list]
Cleanup a list of requirement strings (e.g. from requirements.txt) to only contain entries valid for this platform and with the lowest required version only. Example ------- >>> from sys import version_info >>> _requirements_sanitize([ ... 'foo>=3.0', ... "monotonic>=1.0,>0.1;python_version=='2.4'", ... "bar>1.0;python_version=='{}.{}'".format(version_info[0], version_info[1]) ... ]) ['foo >= 3.0', 'bar > 1.0']
28,295
def parse(inp, format=None, encoding=, force_types=True): proper_inp = inp if hasattr(inp, ): proper_inp = inp.read() if isinstance(proper_inp, six.text_type): proper_inp = proper_inp.encode(encoding) fname = None if hasattr(inp, ): fname = inp.name fmt = _get_format(format, fname, proper_inp) proper_inp = six.BytesIO(proper_inp) try: res = _do_parse(proper_inp, fmt, encoding, force_types) except Exception as e: raise AnyMarkupError(e, traceback.format_exc()) if res is None: res = {} return res
Parse input from file-like object, unicode string or byte string. Args: inp: file-like object, unicode string or byte string with the markup format: explicitly override the guessed `inp` markup format encoding: `inp` encoding, defaults to utf-8 force_types: if `True`, integers, floats, booleans and none/null are recognized and returned as proper types instead of strings; if `False`, everything is converted to strings if `None`, backend return value is used Returns: parsed input (dict or list) containing unicode values Raises: AnyMarkupError if a problem occurs while parsing or inp
28,296
def pkcs12_kdf(hash_algorithm, password, salt, iterations, key_length, id_): if not isinstance(password, byte_cls): raise TypeError(pretty_message( , type_name(password) )) if not isinstance(salt, byte_cls): raise TypeError(pretty_message( , type_name(salt) )) if not isinstance(iterations, int_types): raise TypeError(pretty_message( , type_name(iterations) )) if iterations < 1: raise ValueError(pretty_message( , repr(iterations) )) if not isinstance(key_length, int_types): raise TypeError(pretty_message( , type_name(key_length) )) if key_length < 1: raise ValueError(pretty_message( , repr(key_length) )) if hash_algorithm not in set([, , , , , ]): raise ValueError(pretty_message( , repr(hash_algorithm) )) if id_ not in set([1, 2, 3]): raise ValueError(pretty_message( , repr(id_) )) utf16_password = password.decode().encode() + b algo = getattr(hashlib, hash_algorithm) u = { : 16, : 20, : 28, : 32, : 48, : 64 }[hash_algorithm] if hash_algorithm in [, ]: v = 128 else: v = 64 d = chr_cls(id_) * v s = b if salt != b: s_len = v * int(math.ceil(float(len(salt)) / v)) while len(s) < s_len: s += salt s = s[0:s_len] p = b if utf16_password != b: p_len = v * int(math.ceil(float(len(utf16_password)) / v)) while len(p) < p_len: p += utf16_password p = p[0:p_len] i = s + p c = int(math.ceil(float(key_length) / u)) a = b * (c * u) for num in range(1, c + 1): a2 = algo(d + i).digest() for _ in range(2, iterations + 1): a2 = algo(a2).digest() if num < c: b = b while len(b) < v: b += a2 b = int_from_bytes(b[0:v]) + 1 for num2 in range(0, len(i) // v): start = num2 * v end = (num2 + 1) * v i_num2 = i[start:end] i_num2 = int_to_bytes(int_from_bytes(i_num2) + b) i_num2_l = len(i_num2) if i_num2_l > v: i_num2 = i_num2[i_num2_l - v:] i = i[0:start] + i_num2 + i[end:] begin = (num - 1) * u to_copy = min(key_length, u) a = a[0:begin] + a2[0:to_copy] + a[begin + to_copy:] return a[0:key_length]
KDF from RFC7292 appendix b.2 - https://tools.ietf.org/html/rfc7292#page-19 :param hash_algorithm: The string name of the hash algorithm to use: "md5", "sha1", "sha224", "sha256", "sha384", "sha512" :param password: A byte string of the password to use an input to the KDF :param salt: A cryptographic random byte string :param iterations: The numbers of iterations to use when deriving the key :param key_length: The length of the desired key in bytes :param id_: The ID of the usage - 1 for key, 2 for iv, 3 for mac :return: The derived key as a byte string
28,297
def get_or_create(self, defaults=None, **kwargs): try: return self.get(**kwargs), False except ObjectDoesNotExist: pass data = defaults or {} data.update(kwargs) return self._model_class(**data).blocking_save(), True
Looks up an object with the given kwargs, creating a new one if necessary. Args: defaults (dict): Used when we create a new object. Must map to fields of the model. \*\*kwargs: Used both for filtering and new object creation. Returns: A tuple of (object, created), where created is a boolean variable specifies whether the object was newly created or not. Example: In the following example, *code* and *name* fields are used to query the DB. .. code-block:: python obj, is_new = Permission.objects.get_or_create({'description': desc}, code=code, name=name) {description: desc} dict is just for new creations. If we can't find any records by filtering on *code* and *name*, then we create a new object by using all of the inputs.
28,298
def density_2d(self, x, y, rho0, Rs, center_x=0, center_y=0): x_ = x - center_x y_ = y - center_y r = np.sqrt(x_**2 + y_**2) X = r/Rs sigma0 = self.rho2sigma(rho0, Rs) if isinstance(X, int) or isinstance(X, float): if X == 1: X = 1.000001 else: X[X == 1] = 1.000001 sigma = sigma0 / (X**2-1)**2 * (-3 + (2+X**2)*self._F(X)) return sigma
projected density :param x: :param y: :param rho0: :param a: :param s: :param center_x: :param center_y: :return:
28,299
def cmd_cminv(self, ch=None): viewer = self.get_viewer(ch) if viewer is None: self.log("No current viewer/channel.") return viewer.invert_cmap()
cminv ch=chname Invert the color map in the channel/viewer