Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
12,000
def find_session(self, session_name): if not isinstance(session_name, basestring): raise TypeError("session_name can only be an instance of type basestring") sessions = self._call("findSession", in_p=[session_name]) sessions = [IGuestSession(a) for a in sessions] return sessions
Finds guest sessions by their friendly name and returns an interface array with all found guest sessions. in session_name of type str The session's friendly name to find. Wildcards like ? and * are allowed. return sessions of type :class:`IGuestSession` Array with all guest sessions found matching the name specified.
12,001
def _convert_old_schema(self, parameters): merged = [] for parameter in parameters: segments = parameter.name.split() _merge_associative_list(merged, segments, parameter) result = [self._inner_convert_old_schema(node, 1) for node in merged] return result
Convert an ugly old schema, using dotted names, to the hot new schema, using List and Structure. The old schema assumes that every other dot implies an array. So a list of two parameters, [Integer("foo.bar.baz.quux"), Integer("foo.bar.shimmy")] becomes:: [List( "foo", item=Structure( fields={"baz": List(item=Integer()), "shimmy": Integer()}))] By design, the old schema syntax ignored the names "bar" and "quux".
12,002
def canonical_new_peer_list( self, peers_to_add ): new_peers = list(set(self.new_peers + peers_to_add)) random.shuffle( new_peers ) tmp = [] for peer in new_peers: tmp.append( self.canonical_peer(peer) ) new_peers = tmp if self.my_hostport in new_peers: new_peers.remove(self.my_hostport) return new_peers
Make a list of canonical new peers, using the self.new_peers and the given peers to add Return a shuffled list of canonicalized host:port strings.
12,003
def to_unix_ts(start_time): if isinstance(start_time, datetime): if is_timezone_aware(start_time): start_time = start_time.astimezone(pytz.utc) else: log.warning( "Non timezone-aware datetime object passed to IncrementalEndpoint. " "The Zendesk API expects UTC time, if this is not the case results will be incorrect!" ) unix_time = calendar.timegm(start_time.timetuple()) else: unix_time = start_time return int(unix_time)
Given a datetime object, returns its value as a unix timestamp
12,004
def get_target(self): with open( % self.path) as f: for line in f.readlines(): matches = re.findall(r, line) if len(matches) == 0: continue return matches[0].replace(, ) return % (config.sdk_version)
Reads the android target based on project.properties file. Returns A string containing the project target (android-23 being the default if none is found)
12,005
def event_later(self, delay, data_tuple): return self._base.event_later(delay, self.make_event_data(*data_tuple))
Schedule an event to be emitted after a delay. :param delay: number of seconds :param data_tuple: a 2-tuple (flavor, data) :return: an event object, useful for cancelling.
12,006
def trash_for(self, user): return self.filter( recipient=user, recipient_deleted_at__isnull=False, ) | self.filter( sender=user, sender_deleted_at__isnull=False, )
Returns all messages that were either received or sent by the given user and are marked as deleted.
12,007
def remove_global_exception_handler(handler): for i, cb in enumerate(state.global_exception_handlers): cb = cb() if cb is not None and cb is handler: state.global_exception_handlers.pop(i) log.info("removing a global exception handler") return True return False
remove a callback from the list of global exception handlers :param handler: the callback, previously added via :func:`global_exception_handler`, to remove :type handler: function :returns: bool, whether the handler was found (and therefore removed)
12,008
def emit(_): if not initialized: raise NotInitialized view = { : __version__, : {}, : {}, : {}, : {}, : {}, } for (ty, module, name), metric in six.iteritems(all_metrics): view[ty][ % (module, name)] = metric.view() marshalled_view = marshal.dumps(view) if len(marshalled_view) > MAX_MARSHALLED_VIEW_SIZE: log.warn( % (len(marshalled_view), MAX_MARSHALLED_VIEW_SIZE)) return marshalled_metrics_mmap.seek(0) try: uwsgi.lock() marshalled_metrics_mmap.write(marshalled_view) finally: uwsgi.unlock()
Serialize metrics to the memory mapped buffer.
12,009
def refresh_save_all_action(self): editorstack = self.get_current_editorstack() if editorstack: state = any(finfo.editor.document().isModified() or finfo.newly_created for finfo in editorstack.data) self.save_all_action.setEnabled(state)
Enable 'Save All' if there are files to be saved
12,010
def codons(self, frame): start = frame while start + 3 <= self.size: yield self.sequence[start : start + 3], start start += 3
A generator that yields DNA in one codon blocks "frame" counts for 0. This function yields a tuple (triplet, index) with index relative to the original DNA sequence
12,011
def get_coord_system_name(header): try: ctype = header[].strip().upper() except KeyError: try: ra = header[] try: equinox = float(header[]) if equinox < 1984.0: radecsys = else: radecsys = except KeyError: radecsys = return radecsys.lower() except KeyError: return match = re.match(r, ctype) if match: return match = re.match(r, ctype) if match: return match = re.match(r, ctype) if match: hdkey = try: radecsys = header[hdkey] except KeyError: try: hdkey = radecsys = header[hdkey] except KeyError: try: equinox = float(header[]) if equinox < 1984.0: radecsys = else: radecsys = except KeyError: radecsys = radecsys = radecsys.strip() return radecsys.lower() match = re.match(r, ctype) if match: return match = re.match(r, ctype) if match: return match = re.match(r, ctype) if match: return match = re.match(r, ctype) if match: return return
Return an appropriate key code for the axes coordinate system by examining the FITS header.
12,012
def get_path_to_repo(self, repo: str) -> Path: return Path(self.base_dir) / "repos" / self.repo_id(repo)
Returns a :class:`Path <pathlib.Path>` to the location where all the branches from this repo are stored. :param repo: Repo URL :return: Path to where branches from this repository are cloned.
12,013
def _infer_unknown_dims(old_shape, shape_spec): numerator_elements = [x if x else 0 for x in old_shape] denominator = 1 unknowns = 0 normalized_shape_spec = [] for s in shape_spec: (old_shape, shape_spec)) if numerator and unknowns: unknown_elements = int(numerator / denominator) return [unknown_elements if x == -1 else x for x in result] else: return result
Attempts to replace DIM_REST (if present) with a value. Because of `pt.DIM_SAME`, this has more information to compute a shape value than the default reshape's shape function. Args: old_shape: The current shape of the Tensor as a list. shape_spec: A shape spec, see `pt.reshape`. Returns: A list derived from `shape_spec` with `pt.DIM_SAME` replaced by the value from old_shape (if possible) and `pt.DIM_REST` computed (if possible). Raises: ValueError: If there are two many unknown dimensions or the shape_spec requires out of range DIM_SAME. TypeError: If shape_spec if not iterable.
12,014
def syncTree(self, recursive=False, blockSignals=True): tree = self.treeWidget() if not tree: return items = [self] if recursive: items += list(self.children(recursive=True)) if blockSignals and not tree.signalsBlocked(): blocked = True tree.blockSignals(True) else: blocked = False date_format = self.ganttWidget().dateFormat() for item in items: for c, col in enumerate(tree.columns()): value = item.property(col, ) item.setData(c, Qt.EditRole, wrapVariant(value)) if blocked: tree.blockSignals(False)
Syncs the information from this item to the tree.
12,015
def _replace_envvar(s, _): e = s.split(":") if len(e) > 3 or len(e) == 1 or e[0] != "env": raise ValueError() elif len(e) == 2: return os.environ[e[1]] else: return os.environ.get(e[1], e[2])
env:KEY or env:KEY:DEFAULT
12,016
def branches(self): branches = [] if self._taken_branch: branches += [(self._taken_branch, )] if self._not_taken_branch: branches += [(self._not_taken_branch, )] if self._direct_branch: branches += [(self._direct_branch, )] return branches
Get basic block branches.
12,017
def extend_to_data(self, data, **kwargs): kernel = self.build_kernel_to_data(data, **kwargs) if sparse.issparse(kernel): pnm = sparse.hstack( [sparse.csr_matrix(kernel[:, self.clusters == i].sum( axis=1)) for i in np.unique(self.clusters)]) else: pnm = np.array([np.sum( kernel[:, self.clusters == i], axis=1).T for i in np.unique(self.clusters)]).transpose() pnm = normalize(pnm, norm=, axis=1) return pnm
Build transition matrix from new data to the graph Creates a transition matrix such that `Y` can be approximated by a linear combination of landmarks. Any transformation of the landmarks can be trivially applied to `Y` by performing `transform_Y = transitions.dot(transform)` Parameters ---------- Y: array-like, [n_samples_y, n_features] new data for which an affinity matrix is calculated to the existing data. `n_features` must match either the ambient or PCA dimensions Returns ------- transitions : array-like, [n_samples_y, self.data.shape[0]] Transition matrix from `Y` to `self.data`
12,018
def wrap_paragraphs(content, hard_breaks=False): paras = filter(None, [para.strip() for para in content.split()]) paras = [build_paragraph(para, hard_breaks) for para in paras] return .join(paras)
Returns *content* with all paragraphs wrapped in `<p>` tags. If *hard_breaks* is set, line breaks are converted to `<br />` tags.
12,019
def compile_dir(dfn, optimize_python=True): if PYTHON is None: return if int(PYTHON_VERSION[0]) >= 3: args = [PYTHON, , , , , dfn] else: args = [PYTHON, , , , dfn] if optimize_python: args.insert(1, ) return_code = subprocess.call(args) if return_code != 0: print(.format(.join(args))) print( ) exit(1)
Compile *.py in directory `dfn` to *.pyo
12,020
def mtf_image_transformer_base_imagenet_mp64(): hparams = mtf_image_transformer_base_imagenet() hparams.mesh_shape = "model:8;batch:4" hparams.layout = "batch:batch;d_ff:model;heads:model" hparams.batch_size = 8 hparams.img_len = 64 hparams.num_decoder_layers = 8 return hparams
Model parallel ImageNet parameters.
12,021
def create_placeholder_access_object(self, instance): related_model = self.related_model def get_related_model_objects(name): return related_model.objects.get( parent_type=ContentType.objects.get_for_model(type(instance)), parent_id=instance.id, slot=name, ).get_content_items() class PlaceholderAccess(object): def __getattribute__(self, name): try: return get_related_model_objects(name) except related_model.DoesNotExist: return super(PlaceholderAccess, self).__getattribute__(name) def __getitem__(self, item): try: return get_related_model_objects(item) except related_model.DoesNotExist: raise KeyError return PlaceholderAccess()
Created objects with placeholder slots as properties. Each placeholder created for an object will be added to a `PlaceHolderAccess` object as a set property.
12,022
def initialize(config): if in config: client = utils.find_entrypoint(, config[], required=True) else: client = redis.StrictRedis kwargs = {} for cfg_var, type_ in REDIS_CONFIGS.items(): if cfg_var in config: kwargs[cfg_var] = type_(config[cfg_var]) if not in kwargs and not in kwargs: raise redis.ConnectionError("No host specified for redis database") cpool_class = None cpool = {} extra_kwargs = {} for key, value in config.items(): if key.startswith(): _dummy, _sep, varname = key.partition() if varname == : cpool[varname] = utils.find_entrypoint( , value, required=True) elif varname == : cpool[varname] = int(value) elif varname == : cpool[varname] = utils.find_entrypoint( , value, required=True) else: cpool[varname] = value elif key not in REDIS_CONFIGS and key not in REDIS_EXCLUDES: extra_kwargs[key] = value if cpool: cpool_class = redis.ConnectionPool if in config: cpool_class = utils.find_entrypoint(, config[], required=True) if cpool_class: cpool.update(kwargs) if not in cpool: if in cpool: if in cpool: del cpool[] if in cpool: del cpool[] cpool[] = cpool[] del cpool[] cpool[] = redis.UnixDomainSocketConnection else: cpool[] = redis.Connection kwargs = dict(connection_pool=cpool_class(**cpool)) kwargs.update(extra_kwargs) return client(**kwargs)
Initialize a connection to the Redis database.
12,023
def sheets(self): data = Dict() for src in [src for src in self.zipfile.namelist() if in src]: name = os.path.splitext(os.path.basename(src))[0] xml = self.xml(src) data[name] = xml return data
return the sheets of data.
12,024
def main(args,parser,subparser=None): base = args.base if base is None: base = os.environ.get() if base is None: bot.error("You must set a base of experiments with --base" % base) sys.exit(1) if not os.path.exists(base): bot.error("Base folder %s does not exist." % base) sys.exit(1) experiments = args.experiments if experiments is None: experiments = " ".join(glob("%s/*" % base)) os.environ[] = experiments if args.vars is not None: if os.path.exists(args.vars): os.environ[] = args.vars os.environ[] = args.delim else: bot.warning( %args.vars) subid = os.environ.get() if args.subid is not None: subid = args.subid os.environ[] = subid os.environ[] = str(args.disable_randomize) os.environ[] = base from expfactory.server import start start(port=5000)
this is the main entrypoint for a container based web server, with most of the variables coming from the environment. See the Dockerfile template for how this function is executed.
12,025
def get_all(self, sort_order=None, sort_target=): return self.get( key=_encode(b), metadata=True, sort_order=sort_order, sort_target=sort_target, range_end=_encode(b), )
Get all keys currently stored in etcd. :returns: sequence of (value, metadata) tuples
12,026
def PartialDynamicSystem(self, ieq, variable): if ieq == 0: if variable == self.physical_nodes[0].variable: return [WeightedSum([self.physical_nodes[1].variable, self.voltage_signal], variable, [1, -1])] elif variable == self.physical_nodes[1].variable: return [WeightedSum([self.physical_nodes[0].variable, self.voltage_signal], variable, [1, 1])]
returns dynamical system blocks associated to output variable
12,027
def get_path_for_termid(self,termid): terminal_id = self.terminal_for_term.get(termid) paths = self.paths_for_terminal[terminal_id] labels = [self.label_for_nonter[nonter] for nonter in paths[0]] return labels
This function returns the path (in terms of phrase types) from one term the root @type termid: string @param termid: one term id @rtype: list @return: the path, list of phrase types
12,028
def find_path(network, pore_pairs, weights=None): r Ps = sp.array(pore_pairs, ndmin=2) if weights is None: weights = sp.ones_like(network.Ts) graph = network.create_adjacency_matrix(weights=weights, fmt=, drop_zeros=False) paths = csgraph.dijkstra(csgraph=graph, indices=Ps[:, 0], return_predecessors=True)[1] pores = [] throats = [] for row in range(0, sp.shape(Ps)[0]): j = Ps[row][1] ans = [] while paths[row][j] > -9999: ans.append(j) j = paths[row][j] ans.append(Ps[row][0]) ans.reverse() pores.append(sp.array(ans, dtype=int)) Ts = network.find_neighbor_throats(pores=ans, mode=) throats.append(sp.array(Ts, dtype=int)) pdict = PrintableDict dict_ = pdict(**{: pores, : throats}) return dict_
r""" Find the shortest path between pairs of pores. Parameters ---------- network : OpenPNM Network Object The Network object on which the search should be performed pore_pairs : array_like An N x 2 array containing N pairs of pores for which the shortest path is sought. weights : array_like, optional An Nt-long list of throat weights for the search. Typically this would be the throat lengths, but could also be used to represent the phase configuration. If no weights are given then the standard topological connections of the Network are used. Returns ------- A dictionary containing both the pores and throats that define the shortest path connecting each pair of input pores. Notes ----- The shortest path is found using Dijkstra's algorithm included in the scipy.sparse.csgraph module TODO: The returned throat path contains the correct values, but not necessarily in the true order Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic(shape=[3, 3, 3]) >>> a = op.topotools.find_path(network=pn, pore_pairs=[[0, 4], [0, 10]]) >>> a['pores'] [array([0, 1, 4]), array([ 0, 1, 10])] >>> a['throats'] [array([ 0, 19]), array([ 0, 37])]
12,029
def blob_services(self): api_version = self._get_api_version() if api_version == : from .v2018_07_01.operations import BlobServicesOperations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
Instance depends on the API version: * 2018-07-01: :class:`BlobServicesOperations<azure.mgmt.storage.v2018_07_01.operations.BlobServicesOperations>`
12,030
def configure_interface(self, name, commands): commands = make_iterable(commands) commands.insert(0, % name) return self.configure(commands)
Configures the specified interface with the commands Args: name (str): The interface name to configure commands: The commands to configure in the interface Returns: True if the commands completed successfully
12,031
def _init_sbc_config(self, config): if (config.channel_mode == SBCChannelMode.CHANNEL_MODE_MONO): self.config.mode = self.codec.SBC_MODE_MONO elif (config.channel_mode == SBCChannelMode.CHANNEL_MODE_STEREO): self.config.mode = self.codec.SBC_MODE_STEREO elif (config.channel_mode == SBCChannelMode.CHANNEL_MODE_DUAL): self.config.mode = self.codec.SBC_MODE_DUAL_CHANNEL elif (config.channel_mode == SBCChannelMode.CHANNEL_MODE_JOINT_STEREO): self.config.mode = self.codec.SBC_MODE_JOINT_STEREO if (config.frequency == SBCSamplingFrequency.FREQ_16KHZ): self.config.frequency = self.codec.SBC_FREQ_16000 elif (config.frequency == SBCSamplingFrequency.FREQ_32KHZ): self.config.frequency = self.codec.SBC_FREQ_32000 elif (config.frequency == SBCSamplingFrequency.FREQ_44_1KHZ): self.config.frequency = self.codec.SBC_FREQ_44100 elif (config.frequency == SBCSamplingFrequency.FREQ_48KHZ): self.config.frequency = self.codec.SBC_FREQ_48000 if (config.allocation_method == SBCAllocationMethod.LOUDNESS): self.config.allocation = self.codec.SBC_AM_LOUDNESS elif (config.allocation_method == SBCAllocationMethod.SNR): self.config.allocation = self.codec.SBC_AM_SNR if (config.subbands == SBCSubbands.SUBBANDS_4): self.config.subbands = self.codec.SBC_SB_4 elif (config.subbands == SBCSubbands.SUBBANDS_8): self.config.subbands = self.codec.SBC_SB_8 if (config.block_length == SBCBlocks.BLOCKS_4): self.config.blocks = self.codec.SBC_BLK_4 elif (config.block_length == SBCBlocks.BLOCKS_8): self.config.blocks = self.codec.SBC_BLK_8 elif (config.block_length == SBCBlocks.BLOCKS_12): self.config.blocks = self.codec.SBC_BLK_12 elif (config.block_length == SBCBlocks.BLOCKS_16): self.config.blocks = self.codec.SBC_BLK_16 self.config.bitpool = config.max_bitpool self.config.endian = self.codec.SBC_LE
Translator from namedtuple config representation to the sbc_t type. :param namedtuple config: See :py:class:`.SBCCodecConfig` :returns:
12,032
def last_or_default(self, default, predicate=None): if self.closed(): raise ValueError("Attempt to call last_or_default() on a " "closed Queryable.") return self._last_or_default(default) if predicate is None else self._last_or_default_predicate(default, predicate)
The last element (optionally satisfying a predicate) or a default. If the predicate is omitted or is None this query returns the last element in the sequence; otherwise, it returns the last element in the sequence for which the predicate evaluates to True. If there is no such element the value of the default argument is returned. Note: This method uses immediate execution. Args: default: The value which will be returned if either the sequence is empty or there are no elements matching the predicate. predicate: An optional unary predicate function, the only argument to which is the element. The return value should be True for matching elements, otherwise False. If the predicate is omitted or None the last element of the source sequence will be returned. Returns: The last element of the sequence if predicate is None, otherwise the last element for which the predicate returns True. If there is no such element, the default argument is returned. Raises: ValueError: If the Queryable is closed. TypeError: If the predicate is not callable.
12,033
def lein(word, max_length=4, zero_pad=True): return Lein().encode(word, max_length, zero_pad)
Return the Lein code for a word. This is a wrapper for :py:meth:`Lein.encode`. Parameters ---------- word : str The word to transform max_length : int The length of the code returned (defaults to 4) zero_pad : bool Pad the end of the return value with 0s to achieve a max_length string Returns ------- str The Lein code Examples -------- >>> lein('Christopher') 'C351' >>> lein('Niall') 'N300' >>> lein('Smith') 'S210' >>> lein('Schmidt') 'S521'
12,034
def image_alias_delete(image, alias, remote_addr=None, cert=None, key=None, verify_cert=True): * image = _verify_image(image, remote_addr, cert, key, verify_cert) try: image.delete_alias(alias) except pylxd.exceptions.LXDAPIException: return False return True
Delete an alias (this is currently not restricted to the image) image : An image alias, a fingerprint or a image object alias : The alias to delete remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr and its a TCP Address! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates. CLI Examples: .. code-block:: bash $ salt '*' lxd.image_alias_add xenial/amd64 x "Short version of xenial/amd64"
12,035
def move_entry(self, entry = None, group = None): if entry is None or group is None or type(entry) is not v1Entry or \ type(group) is not v1Group: raise KPError("Need an entry and a group.") elif entry not in self.entries: raise KPError("No entry found.") elif group in self.groups: entry.group.entries.remove(entry) group.entries.append(entry) entry.group_id = group.id_ entry.group = group return True else: raise KPError("No group found.")
Move an entry to another group. A v1Group group and a v1Entry entry are needed.
12,036
def style_node(self, additional_style_attrib=None): style_attrib = {"style:name": self.name, "style:family": self.FAMILY} if additional_style_attrib: style_attrib.update(additional_style_attrib) if self.PARENT_STYLE_DICT: style_attrib.update(self.PARENT_STYLE_DICT) node = el("style:style", attrib=style_attrib) props = sub_el(node, self.STYLE_PROP, attrib=self.styles) return node
generate a style node (for automatic-styles) could specify additional attributes such as 'style:parent-style-name' or 'style:list-style-name'
12,037
def TryLink( self, text, extension ): return self.TryBuild(self.env.Program, text, extension )
Compiles the program given in text to an executable env.Program, using extension as file extension (e.g. '.c'). Returns 1, if compilation was successful, 0 otherwise. The target is saved in self.lastTarget (for further processing).
12,038
def simplified(self): if self._simplified is None: self._simplified = SimplifiedChainTransform(self) return self._simplified
A simplified representation of the same transformation.
12,039
def wnfild(small, window): assert isinstance(window, stypes.SpiceCell) assert window.dtype == 1 small = ctypes.c_double(small) libspice.wnfild_c(small, ctypes.byref(window)) return window
Fill small gaps between adjacent intervals of a double precision window. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wnfild_c.html :param small: Limiting measure of small gaps. :type small: float :param window: Window to be filled :type window: spiceypy.utils.support_types.SpiceCell :return: Filled Window. :rtype: spiceypy.utils.support_types.SpiceCell
12,040
def pad_light(self, values): while len(values) < 4: values.append(0.) return list(map(float, values))
Accept an array of up to 4 values, and return an array of 4 values. If the input array is less than length 4, pad it with zeroes until it is length 4. Also ensure each value is a float
12,041
def vq_loss(x, targets, codebook_size, beta=0.25, decay=0.999, epsilon=1e-5, soft_em=False, num_samples=10, temperature=None, do_update=True): x_shape = common_layers.shape_list(x) target_shape = common_layers.shape_list(targets) hidden_size = x_shape[-1] means, _, _ = get_vq_codebook(codebook_size, hidden_size) x = tf.reshape(x, [-1, hidden_size]) targets = tf.reshape(targets, [-1]) one_hot_targets = tf.one_hot(targets, codebook_size) target_means = tf.matmul(one_hot_targets, means) discrete_x, code_loss, distances = vq_body( x, codebook_size, beta=beta, decay=decay, epsilon=epsilon, soft_em=soft_em, num_samples=num_samples, temperature=temperature, do_update=do_update) logits = -distances targets_loss = tf.losses.sparse_softmax_cross_entropy( logits=logits, labels=targets) targets_loss = tf.reduce_mean(targets_loss) x_means = tf.matmul(discrete_x, means) x_means = x + tf.stop_gradient(x_means - x) discrete_x = tf.reshape(discrete_x, x_shape[:-1] + [codebook_size]) target_means = tf.reshape(target_means, target_shape + [hidden_size]) return discrete_x, x_means, target_means, code_loss, targets_loss
Compute the loss of large vocab tensors using a VQAE codebook. Args: x: Tensor of inputs to be quantized to nearest code targets: Tensor of target indices to target codes codebook_size: Size of quantization codebook beta: scalar float for moving averages decay: scalar float for moving averages epsilon: scalar float for moving averages soft_em: boolean, whether to apply a soft sampling procedure num_samples: if soft_em, number of samples to take temperature: temperature if we want to sample nearest neighbors or None do_update: whether to update the means; True by default, can be a Tensor Returns: discrete_x: one-hot Tensor indicating which codebook element is closest to x x_means: Tensor, on the forward pass: closest codebook element to x, on the backwards pass: soft convex-combination of codebook elements by proximity to x target_means: the codebook elements corresponding to the targets code_loss: loss driving x closer to its nearest codebook element targets_loss: cross-entropy loss driving x closer to code corresponding to target
12,042
def get_form(self, request, obj=None, **kwargs): parent_id = request.REQUEST.get(, None) if parent_id: return FolderForm else: folder_form = super(FolderAdmin, self).get_form( request, obj=None, **kwargs) def folder_form_clean(form_obj): cleaned_data = form_obj.cleaned_data folders_with_same_name = Folder.objects.filter( parent=form_obj.instance.parent, name=cleaned_data[]) if form_obj.instance.pk: folders_with_same_name = folders_with_same_name.exclude( pk=form_obj.instance.pk) if folders_with_same_name.exists(): raise ValidationError( ) return cleaned_data folder_form.clean = folder_form_clean return folder_form
Returns a Form class for use in the admin add view. This is used by add_view and change_view.
12,043
def _parse_snapshot_share(response, name): snapshot = response.headers.get() return _parse_share(response, name, snapshot)
Extracts snapshot return header.
12,044
def pprint(sequence_file, annotation=None, annotation_file=None, block_length=10, blocks_per_line=6): annotations = [] if annotation: annotations.append([(first - 1, last) for first, last in annotation]) try: line = next(sequence_file) if line.startswith(): _pprint_fasta(itertools.chain([line], sequence_file), annotations=annotations, annotation_file=annotation_file, block_length=block_length, blocks_per_line=blocks_per_line) else: _pprint_line(line.strip(), annotations=annotations, annotation_file=annotation_file, block_length=block_length, blocks_per_line=blocks_per_line) except StopIteration: pass
Pretty-print sequence(s) from a file.
12,045
def detect_language(index_page): dom = dhtmlparser.parseString(index_page) clean_content = dhtmlparser.removeTags(dom) lang = None try: lang = langdetect.detect(clean_content) except UnicodeDecodeError: lang = langdetect.detect(clean_content.decode("utf-8")) return SourceString( lang, source="langdetect" )
Detect `languages` using `langdetect` library. Args: index_page (str): HTML content of the page you wish to analyze. Returns: obj: One :class:`.SourceString` object.
12,046
def copy(self, key=None): other = self.__class__( redis=self.redis, key=key, writeback=self.writeback ) other.extend(self) return other
Return a new collection with the same items as this one. If *key* is specified, create the new collection with the given Redis key.
12,047
def list_space_systems(self, page_size=None): params = {} if page_size is not None: params[] = page_size return pagination.Iterator( client=self._client, path=.format(self._instance), params=params, response_class=mdb_pb2.ListSpaceSystemsResponse, items_key=, item_mapper=SpaceSystem, )
Lists the space systems visible to this client. Space systems are returned in lexicographical order. :rtype: :class:`.SpaceSystem` iterator
12,048
def mutate(self, node, index): assert index < len(OFFSETS), assert isinstance(node, parso.python.tree.Number) val = eval(node.value) + OFFSETS[index] return parso.python.tree.Number( + str(val), node.start_pos)
Modify the numeric value on `node`.
12,049
def create_or_update_issue_remote_links(self, issue_key, link_url, title, global_id=None, relationship=None): url = .format(issue_key=issue_key) data = {: {: link_url, : title}} if global_id: data[] = global_id if relationship: data[] = relationship return self.post(url, data=data)
Add Remote Link to Issue, update url if global_id is passed :param issue_key: str :param link_url: str :param title: str :param global_id: str, OPTIONAL: :param relationship: str, OPTIONAL: Default by built-in method: 'Web Link'
12,050
def make_secure_stub(credentials, user_agent, stub_class, host, extra_options=()): channel = make_secure_channel( credentials, user_agent, host, extra_options=extra_options ) return stub_class(channel)
Makes a secure stub for an RPC service. Uses / depends on gRPC. :type credentials: :class:`google.auth.credentials.Credentials` :param credentials: The OAuth2 Credentials to use for creating access tokens. :type user_agent: str :param user_agent: The user agent to be used with API requests. :type stub_class: type :param stub_class: A gRPC stub type for a given service. :type host: str :param host: The host for the service. :type extra_options: tuple :param extra_options: (Optional) Extra gRPC options passed when creating the channel. :rtype: object, instance of ``stub_class`` :returns: The stub object used to make gRPC requests to a given API.
12,051
def get_shell_history(): if in globals(): a = list(get_ipython().history_manager.input_hist_raw) a.reverse() return a elif in _os.environ: try: p = _os.path.join(_settings.path_user, ".spyder2", "history.py") a = read_lines(p) a.reverse() return a except: pass else: try: import wx for x in wx.GetTopLevelWindows(): if type(x) in [wx.py.shell.ShellFrame, wx.py.crust.CrustFrame]: a = x.shell.GetText().split(">>>") a.reverse() return a except: pass return []
This only works with some shells.
12,052
def tag_secondary_structure(self, force=False): for polymer in self._molecules: if polymer.molecule_type == : polymer.tag_secondary_structure(force=force) return
Tags each `Monomer` in the `Assembly` with it's secondary structure. Notes ----- DSSP must be available to call. Check by running `isambard.external_programs.dssp.test_dssp`. If DSSP is not available, please follow instruction here to add it: https://github.com/woolfson-group/isambard#external-programs For more information on DSSP see [1]. References ---------- .. [1] Kabsch W, Sander C (1983) "Dictionary of protein secondary structure: pattern recognition of hydrogen-bonded and geometrical features", Biopolymers, 22, 2577-637. Parameters ---------- force : bool, optional If True the tag will be run even if `Monomers` are already tagged
12,053
def update_config(configclass: type(Config)): config = configclass() def print_list(ctx, param, value): configclass.__config_path__, config_path = , configclass.__config_path__
Command line function to update and the a config.
12,054
def list_all(dev: Device): for name, service in dev.services.items(): click.echo(click.style("\nService %s" % name, bold=True)) for method in service.methods: click.echo(" %s" % method.name)
List all available API calls.
12,055
def generate_labels_from_classifications(classifications, timestamps): window_length = timestamps[1] - timestamps[0] combo_list = [(classifications[k], timestamps[k]) for k in range(0, len(classifications))] labels = [] for k, g in itertools.groupby(combo_list, lambda x: x[0]): items = list(g) start_time = items[0][1] end_time = items[-1][1] + window_length label_class = items[0][0] labels.append(AudacityLabel(start_time, end_time, label_class)) return labels
This is to generate continuous segments out of classified small windows :param classifications: :param timestamps: :return:
12,056
def get_client(self, initial_timeout=0.1, next_timeout=30): try: return self._test_client(self._q.get(True, initial_timeout)) except Empty: try: self._lock.acquire() if self._clients_in_use >= self._max_clients: raise _ClientUnavailableError("Too many clients in use") return self._test_client(self._make_client()) except NetworkError: if not self._tolerate_error: raise except _ClientUnavailableError as e: try: return self._test_client(self._q.get(True, next_timeout)) except Empty: raise e finally: self._lock.release()
Wait until a client instance is available :param float initial_timeout: how long to wait initially for an existing client to complete :param float next_timeout: if the pool could not obtain a client during the initial timeout, and we have allocated the maximum available number of clients, wait this long until we can retrieve another one :return: A connection object
12,057
def one_way(data, n): term = data.astype() no_term = n - term t_exp = np.mean(term, 0) t_exp = np.array([t_exp, ] * data.shape[0]) nt_exp = n - t_exp t_mss = (term - t_exp) ** 2 / t_exp nt_mss = (no_term - nt_exp) ** 2 / nt_exp chi2 = t_mss + nt_mss return special.chdtrc(1, chi2)
One-way chi-square test of independence. Takes a 1D array as input and compares activation at each voxel to proportion expected under a uniform distribution throughout the array. Note that if you're testing activation with this, make sure that only valid voxels (e.g., in-mask gray matter voxels) are included in the array, or results won't make any sense!
12,058
def _visit_for(self, cls, node, parent): newnode = cls(node.lineno, node.col_offset, parent) type_annotation = self.check_type_comment(node) newnode.postinit( target=self.visit(node.target, newnode), iter=self.visit(node.iter, newnode), body=[self.visit(child, newnode) for child in node.body], orelse=[self.visit(child, newnode) for child in node.orelse], type_annotation=type_annotation, ) return newnode
visit a For node by returning a fresh instance of it
12,059
def multi_constructor_pkl(loader, tag_suffix, node): mapping = loader.construct_yaml_str(node) if tag_suffix != "" and tag_suffix != u"": raise AssertionError(+tag_suffix+) rval = ObjectProxy(None, {}, yaml.serialize(node)) rval.instance = serial.load(mapping) return rval
Constructor function passed to PyYAML telling it how to load objects from paths to .pkl files. See PyYAML documentation for details on the call signature.
12,060
def run_inference(examples, serving_bundle): batch_size = 64 if serving_bundle.estimator and serving_bundle.feature_spec: preds = serving_bundle.estimator.predict( lambda: tf.data.Dataset.from_tensor_slices( tf.parse_example([ex.SerializeToString() for ex in examples], serving_bundle.feature_spec)).batch(batch_size)) if serving_bundle.use_predict: preds_key = serving_bundle.predict_output_tensor elif serving_bundle.model_type == : preds_key = else: preds_key = values = [] for pred in preds: values.append(pred[preds_key]) return common_utils.convert_prediction_values(values, serving_bundle) elif serving_bundle.custom_predict_fn: values = serving_bundle.custom_predict_fn(examples) return common_utils.convert_prediction_values(values, serving_bundle) else: return platform_utils.call_servo(examples, serving_bundle)
Run inference on examples given model information Args: examples: A list of examples that matches the model spec. serving_bundle: A `ServingBundle` object that contains the information to make the inference request. Returns: A ClassificationResponse or RegressionResponse proto.
12,061
def K_separator_demister_York(P, horizontal=False): r P = P/psi if P < 15: if P < 1: P = 1 K = 0.1821 + 0.0029*P + 0.0460*log(P) elif P < 40: K = 0.35 else: if P > 5500: P = 5500 K = 0.430 - 0.023*log(P) K *= foot if horizontal: K *= 1.25 return K
r'''Calculates the Sounders Brown `K` factor as used in determining maximum permissible gas velocity in a two-phase separator in either a horizontal or vertical orientation, *with a demister*. This function is a curve fit to [1]_ published in [2]_ and is widely used. For 1 < P < 15 psia: .. math:: K = 0.1821 + 0.0029P + 0.0460\ln P For 15 <= P <= 40 psia: .. math:: K = 0.35 For P < 5500 psia: .. math:: K = 0.430 - 0.023\ln P In the above equations, P is in units of psia. Parameters ---------- P : float Pressure of separator, [Pa] horizontal : bool, optional Whether to use the vertical or horizontal value; horizontal is 1.25 times higher, [-] Returns ------- K : float Sounders Brown Horizontal or vertical `K` factor for two-phase separator design with a demister, [m/s] Notes ----- If the input pressure is under 1 psia, 1 psia is used. If the input pressure is over 5500 psia, 5500 psia is used. Examples -------- >>> K_separator_demister_York(975*psi) 0.08281536035331669 References ---------- .. [2] Otto H. York Company, "Mist Elimination in Gas Treatment Plants and Refineries," Engineering, Parsippany, NJ. .. [1] Svrcek, W. Y., and W. D. Monnery. "Design Two-Phase Separators within the Right Limits" Chemical Engineering Progress, (October 1, 1993): 53-60.
12,062
def decree(cls, path, concrete_start=, **kwargs): try: return cls(_make_decree(path, concrete_start), **kwargs) except KeyError: raise Exception(f)
Constructor for Decree binary analysis. :param str path: Path to binary to analyze :param str concrete_start: Concrete stdin to use before symbolic input :param kwargs: Forwarded to the Manticore constructor :return: Manticore instance, initialized with a Decree State :rtype: Manticore
12,063
def put_stream(self, rel_path, metadata=None, cb=None): import Queue import time import threading md5 = metadata.get(, None) if metadata else None acl = ( if metadata.get(, False) else metadata.get(, )) if metadata else path = self._prefix(self._rename(rel_path)) class ThreadUploader(threading.Thread): def __init__(self, n, queue): threading.Thread.__init__(self) self.n = n self.queue = queue def run(self): while True: mp, part_number, buf = self.queue.get() if mp is None: logger.debug( "put_stream: Thread {} exiting".format( self.n)) self.queue.task_done() return logger.debug( "put_stream: Thread {}: processing part: {}".format( self.n, part_number)) t1 = time.time() try: mp.upload_part_from_file(buf, part_number) finally: self.queue.task_done() t2 = time.time() logger.debug("put_stream: Thread {}, part {}. time = {} rate = {} b/s" .format( self.n, part_number, round(t2 - t1, 3), round((float(buf.tell()) / (t2 - t1)), 2))) if metadata is None: metadata = {} if md5: metadata[] = md5 write() or writeline() calls on Schedules a buffer to be sent in a thread by queuing it' logger.debug( "_send_buffer: sending part {} to thread pool size: {}, total_size = {}" .format( self.part_number, self.buffer.tell(), self.total_size)) self.buffer.seek(0) thread_upload_queue.put( (self.mp, self.part_number, self.buffer)) def write(self, d): import io self.buffer.write(d) self.total_size += len(d) if self.buffer.tell() > buffer_size: self._send_buffer() self.part_number += 1 self.buffer = io.BytesIO() def writelines(self, lines): raise NotImplemented() def close(self): if self.buffer.tell() > 0: self._send_buffer() thread_upload_queue.join() for i in range(num_threads): thread_upload_queue.put( (None, None, None)) thread_upload_queue.join() if self.total_size > 0: self.mp.complete_upload() this.bucket.set_acl(acl, path) this.put_metadata(self.rel_path, metadata) def __enter__(self): return self def __exit__(self, type_, value, traceback): if type_: return False self.close() return flo(rel_path)
Return a Flo object that can be written to to send data to S3. This will result in a multi-part upload, possibly with each part being sent in its own thread
12,064
def get_next(self): while self.stack: if self.stack[-1].wkids: node = self.stack[-1].wkids.pop(0) if not self.stack[-1].wkids: self.stack[-1].wkids = None if node in self.history: self.cycle_func(node, self.stack) else: node.wkids = copy.copy(self.kids_func(node, self.stack[-1])) self.stack.append(node) self.history[node] = None else: node = self.stack.pop() del self.history[node] if node: if self.stack: parent = self.stack[-1] else: parent = None self.eval_func(node, parent) return node return None
Return the next node for this walk of the tree. This function is intentionally iterative, not recursive, to sidestep any issues of stack size limitations.
12,065
def get_matrix_index(graph: BELGraph) -> Set[str]: return { node.name for node in graph if isinstance(node, CentralDogma) and node.namespace.upper() == }
Return set of HGNC names from Proteins/Rnas/Genes/miRNA, nodes that can be used by SPIA.
12,066
def update_commands(self, commands_str): commands = dict(parse_qsl(commands_str, keep_blank_values=True)) _if = commands.get("if", self._if) if _if: self._if = Condition(_if) self._set_int(commands, "max_length") self._set_int(commands, "min_length") self.color = self._check_color(commands.get("color")) self.not_zero = "not_zero" in commands or self.not_zero self.show = "show" in commands or self.show self.soft = "soft" in commands or self.soft
update with commands from the block
12,067
def check_trytes_codec(encoding): if encoding == AsciiTrytesCodec.name: return AsciiTrytesCodec.get_codec_info() elif encoding == AsciiTrytesCodec.compat_name: warn( .format( new_codec=AsciiTrytesCodec.name, old_codec=AsciiTrytesCodec.compat_name, ), DeprecationWarning, ) return AsciiTrytesCodec.get_codec_info() return None
Determines which codec to use for the specified encoding. References: - https://docs.python.org/3/library/codecs.html#codecs.register
12,068
def merge_intervals(intervals): if intervals is None: return None intervals.sort(key=lambda i: i[0]) out = [intervals.pop(0)] for i in intervals: if out[-1][-1] >= i[0]: out[-1][-1] = max(out[-1][-1], i[-1]) else: out.append(i) return out
Merge intervals in the form of a list.
12,069
def serialize_to_xml_str(obj_pyxb, pretty=True, strip_prolog=False, xslt_url=None): return serialize_gen(obj_pyxb, None, pretty, strip_prolog, xslt_url)
Serialize PyXB object to pretty printed XML ``str`` for display. Args: obj_pyxb: PyXB object PyXB object to serialize. pretty: bool False: Disable pretty print formatting. XML will not have line breaks. strip_prolog: True: remove any XML prolog (e.g., ``<?xml version="1.0" encoding="utf-8"?>``), from the resulting XML doc. xslt_url: str If specified, add a processing instruction to the XML doc that specifies the download location for an XSLT stylesheet. Returns: str: Pretty printed XML document
12,070
def get_optimized_molecule(self): opt_coor = self.get_optimization_coordinates() if len(opt_coor) == 0: return None else: return Molecule( self.molecule.numbers, opt_coor[-1], )
Return a molecule object of the optimal geometry
12,071
def sample_from_distribution(self, distribution, k, proportions=False): dist = self._get_column(distribution) total = sum(dist) assert total > 0 and np.all(dist >= 0), dist = dist/sum(dist) sample = np.random.multinomial(k, dist) if proportions: sample = sample / sum(sample) label = self._unused_label(self._as_label(distribution) + ) return self.with_column(label, sample)
Return a new table with the same number of rows and a new column. The values in the distribution column are define a multinomial. They are replaced by sample counts/proportions in the output. >>> sizes = Table(['size', 'count']).with_rows([ ... ['small', 50], ... ['medium', 100], ... ['big', 50], ... ]) >>> sizes.sample_from_distribution('count', 1000) # doctest: +SKIP size | count | count sample small | 50 | 239 medium | 100 | 496 big | 50 | 265 >>> sizes.sample_from_distribution('count', 1000, True) # doctest: +SKIP size | count | count sample small | 50 | 0.24 medium | 100 | 0.51 big | 50 | 0.25
12,072
def _drop_duplicate_ij(self): self.network[] = list(map(lambda x: tuple(sorted(x)), list( zip(*[self.network[].values, self.network[].values])))) self.network.drop_duplicates([, ], inplace=True) self.network.reset_index(inplace=True, drop=True) self.network.drop(, inplace=True, axis=1)
Drops duplicate entries from the network dataframe.
12,073
def create_many(self, statements): create_statements = [] for statement in statements: statement_data = statement.serialize() tag_data = list(set(statement_data.pop(, []))) statement_data[] = tag_data if not statement.search_text: statement_data[] = self.tagger.get_bigram_pair_string(statement.text) if not statement.search_in_response_to and statement.in_response_to: statement_data[] = self.tagger.get_bigram_pair_string(statement.in_response_to) create_statements.append(statement_data) self.statements.insert_many(create_statements)
Creates multiple statement entries.
12,074
async def execute(self, query: str, *args, timeout: float=None) -> str: async with self.acquire() as con: return await con.execute(query, *args, timeout=timeout)
Execute an SQL command (or commands). Pool performs this operation using one of its connections. Other than that, it behaves identically to :meth:`Connection.execute() <connection.Connection.execute>`. .. versionadded:: 0.10.0
12,075
def setBaudrate(self, baudrate): from . import mavutil if self.baudrate == baudrate: return self.baudrate = baudrate self.mav.mav.serial_control_send(self.port, mavutil.mavlink.SERIAL_CONTROL_FLAG_EXCLUSIVE, 0, self.baudrate, 0, [0]*70) self.flushInput() self.debug("Changed baudrate %u" % self.baudrate)
set baudrate
12,076
def PopEvent(self): try: macb_group_identifier, content_identifier, event = heapq.heappop( self._heap) if macb_group_identifier == : macb_group_identifier = None return macb_group_identifier, content_identifier, event except IndexError: return None
Pops an event from the heap. Returns: tuple: containing: str: identifier of the event MACB group or None if the event cannot be grouped. str: identifier of the event content. EventObject: event.
12,077
def op( name, labels, predictions, num_thresholds=None, weights=None, display_name=None, description=None, collections=None): import tensorflow.compat.v1 as tf if num_thresholds is None: num_thresholds = _DEFAULT_NUM_THRESHOLDS if weights is None: weights = 1.0 dtype = predictions.dtype with tf.name_scope(name, values=[labels, predictions, weights]): tf.assert_type(labels, tf.bool) f_labels = tf.cast(labels, dtype) predictions = tf.minimum(1.0, tf.maximum(0.0, predictions)) true_labels = f_labels * weights false_labels = (1.0 - f_labels) * weights predictions = tf.reshape(predictions, [-1]) true_labels = tf.reshape(true_labels, [-1, 1]) false_labels = tf.reshape(false_labels, [-1, 1]) bucket_indices = tf.cast( tf.floor(predictions * (num_thresholds - 1)), tf.int32) tp_buckets = tf.reduce_sum( input_tensor=tf.one_hot(bucket_indices, depth=num_thresholds) * true_labels, axis=0) fp_buckets = tf.reduce_sum( input_tensor=tf.one_hot(bucket_indices, depth=num_thresholds) * false_labels, axis=0) tp = tf.cumsum(tp_buckets, reverse=True, name=) fp = tf.cumsum(fp_buckets, reverse=True, name=) tn = fp[0] - fp fn = tp[0] - tp precision = tp / tf.maximum(_MINIMUM_COUNT, tp + fp) recall = tp / tf.maximum(_MINIMUM_COUNT, tp + fn) return _create_tensor_summary( name, tp, fp, tn, fn, precision, recall, num_thresholds, display_name, description, collections)
Create a PR curve summary op for a single binary classifier. Computes true/false positive/negative values for the given `predictions` against the ground truth `labels`, against a list of evenly distributed threshold values in `[0, 1]` of length `num_thresholds`. Each number in `predictions`, a float in `[0, 1]`, is compared with its corresponding boolean label in `labels`, and counts as a single tp/fp/tn/fn value at each threshold. This is then multiplied with `weights` which can be used to reweight certain values, or more commonly used for masking values. Args: name: A tag attached to the summary. Used by TensorBoard for organization. labels: The ground truth values. A Tensor of `bool` values with arbitrary shape. predictions: A float32 `Tensor` whose values are in the range `[0, 1]`. Dimensions must match those of `labels`. num_thresholds: Number of thresholds, evenly distributed in `[0, 1]`, to compute PR metrics for. Should be `>= 2`. This value should be a constant integer value, not a Tensor that stores an integer. weights: Optional float32 `Tensor`. Individual counts are multiplied by this value. This tensor must be either the same shape as or broadcastable to the `labels` tensor. display_name: Optional name for this summary in TensorBoard, as a constant `str`. Defaults to `name`. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[Graph Keys.SUMMARIES]`. Returns: A summary operation for use in a TensorFlow graph. The float32 tensor produced by the summary operation is of dimension (6, num_thresholds). The first dimension (of length 6) is of the order: true positives, false positives, true negatives, false negatives, precision, recall.
12,078
def add_template(self, tpl): objcls = self.inner_class.my_type name = getattr(tpl, , ) sdesc = getattr(tpl, , ) hname = getattr(tpl, , ) logger.debug("Adding a %s template: host_name: %s, name: %s, service_description: %s", objcls, hname, name, sdesc) if not name and not hname: msg = "a %s template has been defined without name nor host_name. from: %s" \ % (objcls, tpl.imported_from) tpl.add_error(msg) elif not name and not sdesc: msg = "a %s template has been defined without name nor service_description. from: %s" \ % (objcls, tpl.imported_from) tpl.add_error(msg) elif not name: setattr(tpl, , "%s_%s" % (hname, sdesc)) tpl = self.index_template(tpl) elif name: tpl = self.index_template(tpl) self.templates[tpl.uuid] = tpl logger.debug(, len(self.templates), tpl)
Adds and index a template into the `templates` container. This implementation takes into account that a service has two naming attribute: `host_name` and `service_description`. :param tpl: The template to add :type tpl: :return: None
12,079
def _send(self, data): try: self._sock.sendto(data.encode(), self._addr) except (socket.error, RuntimeError): pass
Send data to statsd.
12,080
def display_name(self): for k in self._NAME_KEYS: if self._raw.get(k): return self._raw[k] if "profile" in self._raw and self._raw["profile"].get(k): return self._raw["profile"][k] return self._raw["name"]
Find the most appropriate display name for a user: look for a "display_name", then a "real_name", and finally fall back to the always-present "name".
12,081
def hash(self): hashed = super(Compare, self).hash() return khash(hashed, self._comp_value, self._comp_type)
:rtype: int :return: hash of the condition
12,082
def _get_bq_service(credentials=None, service_url=None): assert credentials, http = credentials.authorize(Http()) service = build( , , http=http, discoveryServiceUrl=service_url, cache_discovery=False ) return service
Construct an authorized BigQuery service object.
12,083
def list_(): * ret = [] states_path = _states_path() if not os.path.isdir(states_path): return ret for state in os.listdir(states_path): if state.endswith((, )): ret.append(state[:-9]) return sorted(set(ret))
Return the list of frozen states. CLI Example: .. code-block:: bash salt '*' freezer.list
12,084
def numberOfXTilesAtZoom(self, zoom): "Returns the number of tiles over x at a given zoom level" [minRow, minCol, maxRow, maxCol] = self.getExtentAddress(zoom) return maxCol - minCol + 1
Returns the number of tiles over x at a given zoom level
12,085
def get_last_weeks(number_of_weeks): time_now = datetime.now() year = time_now.isocalendar()[0] week = time_now.isocalendar()[1] weeks = [] for i in range(0, number_of_weeks): start = get_week_dates(year, week - i, as_timestamp=True)[0] n_year, n_week = get_year_week(start) weeks.append((n_year, n_week)) return weeks
Get the last weeks.
12,086
def from_pubkey(cls, pubkey, compressed=True, version=56, prefix=None): pubkey = PublicKey(pubkey, prefix=prefix or Prefix.prefix) if compressed: pubkey_plain = pubkey.compressed() else: pubkey_plain = pubkey.uncompressed() addressbin = ripemd160(hashlib.sha512(unhexlify(pubkey_plain)).hexdigest()) result = Base58(hexlify(addressbin).decode("ascii")) return cls(result, prefix=pubkey.prefix)
Derive address using ``RIPEMD160(SHA512(x))``
12,087
def save_model(model, output_file=None, output_dir=None, output_prefix=): if output_file: mzn_file = output_file output_file = open(output_file, , buffering=1) else: output_prefix += output_file = NamedTemporaryFile( dir=output_dir, prefix=output_prefix, suffix=, delete=False, mode=, buffering=1 ) mzn_file = output_file.name output_file.write(model) output_file.close() logger.info(.format(mzn_file)) return mzn_file
Save a model to file. Parameters ---------- model : str The minizinc model (i.e. the content of a ``.mzn`` file). output_file : str The path to the output file. If this parameter is ``None`` (default), a temporary file is created with the given model in the specified output directory, using the specified prefix. output_dir : str The directory where to create the file in case ``output_file`` is None. Default is ``None``, which creates a file in the system temporary directory. output_prefix : str The prefix for the output file if created. Default is ``'pymzn'``. Returns ------- str The path to the newly created ``.mzn`` file.
12,088
def get_point(self, *position): array = _ffi.new(self._arrayType, position) if self._useOctaves: return (self._noiseFunc(self._noise, array, self._octaves) + 1) * 0.5 return (self._noiseFunc(self._noise, array) + 1) * 0.5
Return the noise value of a specific position. Example usage: value = noise.getPoint(x, y, z) Args: position (Tuple[float, ...]): The point to sample at. Returns: float: The noise value at position. This will be a floating point in the 0.0-1.0 range.
12,089
def set_boolean(self, option, value): if not isinstance(value, bool): raise TypeError("%s must be a boolean" % option) self.options[option] = str(value).lower()
Set a boolean option. Args: option (str): name of option. value (bool): value of the option. Raises: TypeError: Value must be a boolean.
12,090
def plot(self, format=, bits=None, **kwargs): if format == : return super(StateVector, self).plot(**kwargs) if format == : from ..plot import Plot kwargs.setdefault(, ) return Plot(*self.to_dqflags(bits=bits).values(), projection=, **kwargs) raise ValueError(" argument must be one of: or " "")
Plot the data for this `StateVector` Parameters ---------- format : `str`, optional, default: ``'segments'`` The type of plot to make, either 'segments' to plot the SegmentList for each bit, or 'timeseries' to plot the raw data for this `StateVector` bits : `list`, optional A list of bit indices or bit names, defaults to `~StateVector.bits`. This argument is ignored if ``format`` is not ``'segments'`` **kwargs Other keyword arguments to be passed to either `~gwpy.plot.SegmentAxes.plot` or `~gwpy.plot.Axes.plot`, depending on ``format``. Returns ------- plot : `~gwpy.plot.Plot` output plot object See Also -------- matplotlib.pyplot.figure for documentation of keyword arguments used to create the figure matplotlib.figure.Figure.add_subplot for documentation of keyword arguments used to create the axes gwpy.plot.SegmentAxes.plot_flag for documentation of keyword arguments used in rendering each statevector flag.
12,091
def idxmax(self, axis=0, skipna=True, *args, **kwargs): skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs) i = nanops.nanargmax(com.values_from_object(self), skipna=skipna) if i == -1: return np.nan return self.index[i]
Return the row label of the maximum value. If multiple values equal the maximum, the first row label with that value is returned. Parameters ---------- skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. axis : int, default 0 For compatibility with DataFrame.idxmax. Redundant for application on Series. *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the maximum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmax : Return indices of the maximum values along the given axis. DataFrame.idxmax : Return index of first occurrence of maximum over requested axis. Series.idxmin : Return index *label* of the first occurrence of minimum of values. Notes ----- This method is the Series version of ``ndarray.argmax``. This method returns the label of the maximum, while ``ndarray.argmax`` returns the position. To get the position, use ``series.values.argmax()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 3, 4], ... index=['A', 'B', 'C', 'D', 'E']) >>> s A 1.0 B NaN C 4.0 D 3.0 E 4.0 dtype: float64 >>> s.idxmax() 'C' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmax(skipna=False) nan
12,092
def from_df(cls, df, **kwargs): tree = cls(**kwargs) for (n,b), g in df.groupby([,]): sources = [Source(**s[[,,,,]]) for _,s in g.iterrows()] obs = Observation(n, b, g.resolution.mean(), sources=sources, relative=g.relative.any()) tree.add_observation(obs) return tree
DataFrame must have the right columns. these are: name, band, resolution, mag, e_mag, separation, pa
12,093
def to_json(self): json_dict = self.to_json_basic() json_dict[] = self.channel json_dict[] = self.disable_inhibit_forced json_dict[] = self.status json_dict[] = self.led_status json_dict[] = self.delay_time return json.dumps(json_dict)
:return: str
12,094
def _doBottomUpCompute(self, rfInput, resetSignal): self._conditionalBreak() self._spatialPoolerInput = rfInput.reshape(-1) assert(rfInput.shape[0] == 1) inputVector = numpy.array(rfInput[0]).astype() outputVector = numpy.zeros(self._sfdr.getNumColumns()).astype() self._sfdr.compute(inputVector, self.learningMode, outputVector) self._spatialPoolerOutput[:] = outputVector[:] if self._fpLogSP: output = self._spatialPoolerOutput.reshape(-1) outputNZ = output.nonzero()[0] outStr = " ".join(["%d" % int(token) for token in outputNZ]) print >>self._fpLogSP, output.size, outStr if self._fpLogSPInput: output = rfInput.reshape(-1) outputNZ = output.nonzero()[0] outStr = " ".join(["%d" % int(token) for token in outputNZ]) print >>self._fpLogSPInput, output.size, outStr return self._spatialPoolerOutput
Do one iteration of inference and/or learning and return the result Parameters: -------------------------------------------- rfInput: Input vector. Shape is: (1, inputVectorLen). resetSignal: True if reset is asserted
12,095
def get_value_from_handle(self, handle, key, handlerecord_json=None): LOGGER.debug() handlerecord_json = self.__get_handle_record_if_necessary(handle, handlerecord_json) if handlerecord_json is None: raise HandleNotFoundException(handle=handle) list_of_entries = handlerecord_json[] indices = [] for i in xrange(len(list_of_entries)): if list_of_entries[i][] == key: indices.append(i) if len(indices) == 0: return None else: if len(indices) > 1: LOGGER.debug( + handle + \ + key + \ ) return list_of_entries[indices[0]][][]
Retrieve a single value from a single Handle. If several entries with this key exist, the methods returns the first one. If the handle does not exist, the method will raise a HandleNotFoundException. :param handle: The handle to take the value from. :param key: The key. :return: A string containing the value or None if the Handle record does not contain the key. :raises: :exc:`~b2handle.handleexceptions.HandleSyntaxError` :raises: :exc:`~b2handle.handleexceptions.HandleNotFoundException`
12,096
def assign_valence(mol): for u, v, bond in mol.bonds_iter(): if bond.order == 2: mol.atom(u).pi = 1 mol.atom(v).pi = 1 if mol.atom(u).symbol == "O" and not mol.atom(u).charge: mol.atom(v).carbonyl_C = 1 if mol.atom(v).symbol == "O" and not mol.atom(v).charge: mol.atom(u).carbonyl_C = 1 elif bond.order == 3: mol.atom(u).pi = mol.atom(v).pi = 2 max_nbr = {"C": 4, "Si": 4, "N": 3, "P": 3, "As": 3, "O": 2, "S": 2, "Se": 2, "F": 1, "Cl": 1, "Br": 1, "I": 1} for i, nbrs in mol.neighbors_iter(): atom = mol.atom(i) if len(nbrs) == 2 and all(bond.order == 2 for bond in nbrs.values()): atom.pi = 2 if atom.symbol in max_nbr: h_cnt = max_nbr[atom.symbol] - len(nbrs) - atom.pi + atom.charge if h_cnt > 0: mol.atom(i).add_hydrogen(h_cnt) mol.descriptors.add("Valence")
Assign pi electron and hydrogens
12,097
def get_curline(): if Frame: frame = Frame.get_selected_python_frame() if frame: line = f = frame.get_pyop() if f and not f.is_optimized_out(): cwd = os.path.join(os.getcwd(), ) fname = f.filename() if cwd in fname: fname = fname[len(cwd):] try: line = f.current_line() except IOError: pass if line: line = repr(line).strip("\n-> %s(%s): %s'
Return the current python source line.
12,098
def setUp(self, mfd_conf): TypeFirstSecondThirdModel_WeightMFD_spacingMinimum_Magnitudeb_valueMaximum_MagnitudeMaximum_Magnitude_Uncertainty self.mfd_type = mfd_conf[] self.mfd_model = + self.mfd_type self.mfd_weight = mfd_conf[] self.bin_width = mfd_conf[] self.mmin = mfd_conf[] self.mmax = None self.mmax_sigma = None self.b_value = mfd_conf[][0] self.b_value_sigma = mfd_conf[][1] self.occurrence_rate = None
Input core configuration parameters as specified in the configuration file :param dict mfd_conf: Configuration file containing the following attributes: * 'Type' - Choose between the 1st, 2nd or 3rd type of recurrence model {'First' | 'Second' | 'Third'} * 'Model_Weight' - Logic tree weight of model type (float) * 'MFD_spacing' - Width of MFD bin (float) * 'Minimum_Magnitude' - Minimum magnitude of activity rates (float) * 'b_value' - Tuple of (b-value, b-value uncertainty) * 'Maximum_Magnitude' - Maximum magnitude on fault (if not defined will use scaling relation) * 'Maximum_Magnitude_Uncertainty' - Uncertainty on maximum magnitude (If not defined and the MSR has a sigma term then this will be taken from sigma)
12,099
def deploy(stage, lambda_package, no_lambda, rebuild_deps, config_file): config = _load_config(config_file) if stage is None: stage = config[] s3 = boto3.client() cfn = boto3.client() region = _get_aws_region() previous_deployment = None try: previous_deployment = cfn.describe_stacks( StackName=config[])[][0] except botocore.exceptions.ClientError: pass built_package = False new_package = True if lambda_package is None and not no_lambda: print("Building lambda package...") lambda_package = _build(config, rebuild_deps=rebuild_deps) built_package = True elif lambda_package is None: new_package = False lambda_package = _get_from_stack(previous_deployment, , ) s3.delete_object(Bucket=bucket, Key=old_pkg) _print_status(config)
Deploy the project to the development stage.