code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def datetime_to_timestamp(dt):
"""Convert timezone-aware `datetime` to POSIX timestamp and
return seconds since UNIX epoch.
Note: similar to `datetime.timestamp()` in Python 3.3+.
"""
epoch = datetime.utcfromtimestamp(0).replace(tzinfo=UTC)
return (dt - epoch).total_seconds() | Convert timezone-aware `datetime` to POSIX timestamp and
return seconds since UNIX epoch.
Note: similar to `datetime.timestamp()` in Python 3.3+. |
def intersects(self, geometry, crs=None):
"""Select geometries that intersect with a GeoJSON geometry.
Geospatial operator: {$geoIntersects: {...}}
Documentation: https://docs.mongodb.com/manual/reference/operator/query/geoIntersects/#op._S_geoIntersects
{
$geoIntersects: { $geometry: <geometry; a GeoJSON object> }
}
"""
if crs:
geometry = dict(geometry)
geometry['crs'] = {'type': 'name', 'properties': {'name': crs}}
return Filter({self._name: {'$geoIntersects': {'$geometry': geometry}}}) | Select geometries that intersect with a GeoJSON geometry.
Geospatial operator: {$geoIntersects: {...}}
Documentation: https://docs.mongodb.com/manual/reference/operator/query/geoIntersects/#op._S_geoIntersects
{
$geoIntersects: { $geometry: <geometry; a GeoJSON object> }
} |
def one_of(inners, arg):
"""At least one of the inner validators must pass"""
for inner in inners:
with suppress(com.IbisTypeError, ValueError):
return inner(arg)
rules_formatted = ', '.join(map(repr, inners))
raise com.IbisTypeError(
'Arg passes neither of the following rules: {}'.format(rules_formatted)
) | At least one of the inner validators must pass |
def _param_grad_helper(self,dL_dK,X,X2,target):
"""derivative of the covariance matrix with respect to the parameters."""
AX = np.dot(X,self.transform)
if X2 is None:
X2 = X
ZX2 = AX
else:
AX2 = np.dot(X2, self.transform)
self.k._param_grad_helper(dL_dK,X,X2,target)
self.k._param_grad_helper(dL_dK,AX,X2,target)
self.k._param_grad_helper(dL_dK,X,AX2,target)
self.k._param_grad_helper(dL_dK,AX,AX2,target) | derivative of the covariance matrix with respect to the parameters. |
def clean_up_dangling_images(self):
"""
Clean up all dangling images.
"""
cargoes = Image.all(client=self._client_session, filters={'dangling': True})
for id, cargo in six.iteritems(cargoes):
logger.info("Removing dangling image: {0}".format(id))
cargo.delete() | Clean up all dangling images. |
def addBiosample(self):
"""
Adds a new biosample into this repo
"""
self._openRepo()
dataset = self._repo.getDatasetByName(self._args.datasetName)
biosample = bio_metadata.Biosample(
dataset, self._args.biosampleName)
biosample.populateFromJson(self._args.biosample)
self._updateRepo(self._repo.insertBiosample, biosample) | Adds a new biosample into this repo |
def dict_match(d, key, default=None):
"""Like __getitem__ but works as if the keys() are all filename patterns.
Returns the value of any dict key that matches the passed key.
Args:
d (dict): A dict with filename patterns as keys
key (str): A key potentially matching any of the keys
default (object): The object to return if no pattern matched the
passed in key
Returns:
object: The dict value where the dict key matched the passed in key.
Or default if there was no match.
"""
if key in d and "[" not in key:
return d[key]
else:
for pattern, value in iteritems(d):
if fnmatchcase(key, pattern):
return value
return default | Like __getitem__ but works as if the keys() are all filename patterns.
Returns the value of any dict key that matches the passed key.
Args:
d (dict): A dict with filename patterns as keys
key (str): A key potentially matching any of the keys
default (object): The object to return if no pattern matched the
passed in key
Returns:
object: The dict value where the dict key matched the passed in key.
Or default if there was no match. |
def commented_out_code_lines(source):
"""Return line numbers of comments that are likely code.
Commented-out code is bad practice, but modifying it just adds even
more clutter.
"""
line_numbers = []
try:
for t in generate_tokens(source):
token_type = t[0]
token_string = t[1]
start_row = t[2][0]
line = t[4]
# Ignore inline comments.
if not line.lstrip().startswith('#'):
continue
if token_type == tokenize.COMMENT:
stripped_line = token_string.lstrip('#').strip()
if (
' ' in stripped_line and
'#' not in stripped_line and
check_syntax(stripped_line)
):
line_numbers.append(start_row)
except (SyntaxError, tokenize.TokenError):
pass
return line_numbers | Return line numbers of comments that are likely code.
Commented-out code is bad practice, but modifying it just adds even
more clutter. |
def _items_to_rela_paths(self, items):
"""Returns a list of repo-relative paths from the given items which
may be absolute or relative paths, entries or blobs"""
paths = []
for item in items:
if isinstance(item, (BaseIndexEntry, (Blob, Submodule))):
paths.append(self._to_relative_path(item.path))
elif isinstance(item, string_types):
paths.append(self._to_relative_path(item))
else:
raise TypeError("Invalid item type: %r" % item)
# END for each item
return paths | Returns a list of repo-relative paths from the given items which
may be absolute or relative paths, entries or blobs |
def autoscan():
"""autoscan will check all of the serial ports to see if they have
a matching VID:PID for a MicroPython board.
"""
for port in serial.tools.list_ports.comports():
if is_micropython_usb_device(port):
connect_serial(port[0]) | autoscan will check all of the serial ports to see if they have
a matching VID:PID for a MicroPython board. |
def handle(cls, value, provider=None, **kwargs):
"""Fetch an output from the designated stack.
Args:
value (str): string with the following format:
<stack_name>::<output_name>, ie. some-stack::SomeOutput
provider (:class:`stacker.provider.base.BaseProvider`): subclass of
the base provider
Returns:
str: output from the specified stack
"""
if provider is None:
raise ValueError('Provider is required')
d = deconstruct(value)
stack_fqn = d.stack_name
output = provider.get_output(stack_fqn, d.output_name)
return output | Fetch an output from the designated stack.
Args:
value (str): string with the following format:
<stack_name>::<output_name>, ie. some-stack::SomeOutput
provider (:class:`stacker.provider.base.BaseProvider`): subclass of
the base provider
Returns:
str: output from the specified stack |
def pipe(self, target):
"""
Pipes this Recver to *target*. *target* can either be `Sender`_ (or
`Pair`_) or a callable.
If *target* is a Sender, the two pairs are rewired so that sending on
this Recver's Sender will now be directed to the target's Recver::
sender1, recver1 = h.pipe()
sender2, recver2 = h.pipe()
recver1.pipe(sender2)
h.spawn(sender1.send, 'foo')
recver2.recv() # returns 'foo'
If *target* is a callable, a new `Pipe`_ will be created. This Recver
and the new Pipe's Sender are passed to the target callable to act as
upstream and downstream. The callable can then do any processing
desired including filtering, mapping and duplicating packets::
sender, recver = h.pipe()
def pipeline(upstream, downstream):
for i in upstream:
if i % 2:
downstream.send(i*2)
recver = recver.pipe(pipeline)
@h.spawn
def _():
for i in xrange(10):
sender.send(i)
recver.recv() # returns 2 (0 is filtered, so 1*2)
recver.recv() # returns 6 (2 is filtered, so 3*2)
"""
if callable(target):
sender, recver = self.hub.pipe()
# link the two ends in the closure with a strong reference to
# prevent them from being garbage collected if this piped section
# is used in a chain
self.downstream = sender
sender.upstream = self
@self.hub.spawn
def _():
try:
target(self, sender)
except vanilla.exception.Halt:
sender.close()
return recver
else:
return target.connect(self) | Pipes this Recver to *target*. *target* can either be `Sender`_ (or
`Pair`_) or a callable.
If *target* is a Sender, the two pairs are rewired so that sending on
this Recver's Sender will now be directed to the target's Recver::
sender1, recver1 = h.pipe()
sender2, recver2 = h.pipe()
recver1.pipe(sender2)
h.spawn(sender1.send, 'foo')
recver2.recv() # returns 'foo'
If *target* is a callable, a new `Pipe`_ will be created. This Recver
and the new Pipe's Sender are passed to the target callable to act as
upstream and downstream. The callable can then do any processing
desired including filtering, mapping and duplicating packets::
sender, recver = h.pipe()
def pipeline(upstream, downstream):
for i in upstream:
if i % 2:
downstream.send(i*2)
recver = recver.pipe(pipeline)
@h.spawn
def _():
for i in xrange(10):
sender.send(i)
recver.recv() # returns 2 (0 is filtered, so 1*2)
recver.recv() # returns 6 (2 is filtered, so 3*2) |
def leaky_twice_relu6(x, alpha_low=0.2, alpha_high=0.2, name="leaky_relu6"):
""":func:`leaky_twice_relu6` can be used through its shortcut: :func:`:func:`tl.act.ltrelu6`.
This activation function is a modified version :func:`leaky_relu` introduced by the following paper:
`Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
This activation function also follows the behaviour of the activation function :func:`tf.nn.relu6` introduced by the following paper:
`Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__
This function push further the logic by adding `leaky` behaviour both below zero and above six.
The function return the following results:
- When x < 0: ``f(x) = alpha_low * x``.
- When x in [0, 6]: ``f(x) = x``.
- When x > 6: ``f(x) = 6 + (alpha_high * (x-6))``.
Parameters
----------
x : Tensor
Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``, ``int16``, or ``int8``.
alpha_low : float
Slope for x < 0: ``f(x) = alpha_low * x``.
alpha_high : float
Slope for x < 6: ``f(x) = 6 (alpha_high * (x-6))``.
name : str
The function name (optional).
Examples
--------
>>> import tensorlayer as tl
>>> net = tl.layers.DenseLayer(net, 100, act=lambda x : tl.act.leaky_twice_relu6(x, 0.2, 0.2), name='dense')
Returns
-------
Tensor
A ``Tensor`` in the same type as ``x``.
References
----------
- `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
- `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__
"""
if not isinstance(alpha_high, tf.Tensor) and not (0 < alpha_high <= 1):
raise ValueError("`alpha_high` value must be in [0, 1]`")
if not isinstance(alpha_low, tf.Tensor) and not (0 < alpha_low <= 1):
raise ValueError("`alpha_low` value must be in [0, 1]`")
with tf.name_scope(name, "leaky_twice_relu6") as name_scope:
x = tf.convert_to_tensor(x, name="features")
x_is_above_0 = tf.minimum(x, 6 * (1 - alpha_high) + alpha_high * x)
x_is_below_0 = tf.minimum(alpha_low * x, 0)
return tf.maximum(x_is_above_0, x_is_below_0, name=name_scope) | :func:`leaky_twice_relu6` can be used through its shortcut: :func:`:func:`tl.act.ltrelu6`.
This activation function is a modified version :func:`leaky_relu` introduced by the following paper:
`Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
This activation function also follows the behaviour of the activation function :func:`tf.nn.relu6` introduced by the following paper:
`Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__
This function push further the logic by adding `leaky` behaviour both below zero and above six.
The function return the following results:
- When x < 0: ``f(x) = alpha_low * x``.
- When x in [0, 6]: ``f(x) = x``.
- When x > 6: ``f(x) = 6 + (alpha_high * (x-6))``.
Parameters
----------
x : Tensor
Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``, ``int16``, or ``int8``.
alpha_low : float
Slope for x < 0: ``f(x) = alpha_low * x``.
alpha_high : float
Slope for x < 6: ``f(x) = 6 (alpha_high * (x-6))``.
name : str
The function name (optional).
Examples
--------
>>> import tensorlayer as tl
>>> net = tl.layers.DenseLayer(net, 100, act=lambda x : tl.act.leaky_twice_relu6(x, 0.2, 0.2), name='dense')
Returns
-------
Tensor
A ``Tensor`` in the same type as ``x``.
References
----------
- `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
- `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__ |
def approx_min_num_components(nodes, negative_edges):
"""
Find approximate minimum number of connected components possible
Each edge represents that two nodes must be separated
This code doesn't solve the problem. The problem is NP-complete and
reduces to minimum clique cover (MCC). This is only an approximate
solution. Not sure what the approximation ratio is.
CommandLine:
python -m utool.util_graph approx_min_num_components
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> nodes = [1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> edges = [(1, 2), (2, 3), (3, 1),
>>> (4, 5), (5, 6), (6, 4),
>>> (7, 8), (8, 9), (9, 7),
>>> (1, 4), (4, 7), (7, 1),
>>> ]
>>> g_pos = nx.Graph()
>>> g_pos.add_edges_from(edges)
>>> g_neg = nx.complement(g_pos)
>>> #import plottool as pt
>>> #pt.qt4ensure()
>>> #pt.show_nx(g_pos)
>>> #pt.show_nx(g_neg)
>>> negative_edges = g_neg.edges()
>>> nodes = [1, 2, 3, 4, 5, 6, 7]
>>> negative_edges = [(1, 2), (2, 3), (4, 5)]
>>> result = approx_min_num_components(nodes, negative_edges)
>>> print(result)
2
"""
import utool as ut
num = 0
g_neg = nx.Graph()
g_neg.add_nodes_from(nodes)
g_neg.add_edges_from(negative_edges)
# Collapse all nodes with degree 0
if nx.__version__.startswith('2'):
deg0_nodes = [n for n, d in g_neg.degree() if d == 0]
else:
deg0_nodes = [n for n, d in g_neg.degree_iter() if d == 0]
for u, v in ut.itertwo(deg0_nodes):
nx_contracted_nodes(g_neg, v, u, inplace=True)
# g_neg = nx.contracted_nodes(g_neg, v, u, self_loops=False)
# Initialize unused nodes to be everything
unused = list(g_neg.nodes())
# complement of the graph contains all possible positive edges
g_pos = nx.complement(g_neg)
if False:
from networkx.algorithms.approximation import clique
maxiset, cliques = clique.clique_removal(g_pos)
num = len(cliques)
return num
# Iterate until we have used all nodes
while len(unused) > 0:
# Seed a new "minimum component"
num += 1
# Grab a random unused node n1
#idx1 = np.random.randint(0, len(unused))
idx1 = 0
n1 = unused[idx1]
unused.remove(n1)
neigbs = list(g_pos.neighbors(n1))
neigbs = ut.isect(neigbs, unused)
while len(neigbs) > 0:
# Find node n2, that n1 could be connected to
#idx2 = np.random.randint(0, len(neigbs))
idx2 = 0
n2 = neigbs[idx2]
unused.remove(n2)
# Collapse negative information of n1 and n2
g_neg = nx.contracted_nodes(g_neg, n1, n2)
# Compute new possible positive edges
g_pos = nx.complement(g_neg)
# Iterate until n1 has no more possible connections
neigbs = list(g_pos.neighbors(n1))
neigbs = ut.isect(neigbs, unused)
print('num = %r' % (num,))
return num | Find approximate minimum number of connected components possible
Each edge represents that two nodes must be separated
This code doesn't solve the problem. The problem is NP-complete and
reduces to minimum clique cover (MCC). This is only an approximate
solution. Not sure what the approximation ratio is.
CommandLine:
python -m utool.util_graph approx_min_num_components
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> nodes = [1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> edges = [(1, 2), (2, 3), (3, 1),
>>> (4, 5), (5, 6), (6, 4),
>>> (7, 8), (8, 9), (9, 7),
>>> (1, 4), (4, 7), (7, 1),
>>> ]
>>> g_pos = nx.Graph()
>>> g_pos.add_edges_from(edges)
>>> g_neg = nx.complement(g_pos)
>>> #import plottool as pt
>>> #pt.qt4ensure()
>>> #pt.show_nx(g_pos)
>>> #pt.show_nx(g_neg)
>>> negative_edges = g_neg.edges()
>>> nodes = [1, 2, 3, 4, 5, 6, 7]
>>> negative_edges = [(1, 2), (2, 3), (4, 5)]
>>> result = approx_min_num_components(nodes, negative_edges)
>>> print(result)
2 |
def _sync_last_sale_prices(self, dt=None):
"""Sync the last sale prices on the metrics tracker to a given
datetime.
Parameters
----------
dt : datetime
The time to sync the prices to.
Notes
-----
This call is cached by the datetime. Repeated calls in the same bar
are cheap.
"""
if dt is None:
dt = self.datetime
if dt != self._last_sync_time:
self.metrics_tracker.sync_last_sale_prices(
dt,
self.data_portal,
)
self._last_sync_time = dt | Sync the last sale prices on the metrics tracker to a given
datetime.
Parameters
----------
dt : datetime
The time to sync the prices to.
Notes
-----
This call is cached by the datetime. Repeated calls in the same bar
are cheap. |
def translate_features_to_letter_annotations(protein, more_sites=None):
"""Store select uniprot features (sites) as letter annotations with the key as the
type of site and the values as a list of booleans"""
from ssbio.databases.uniprot import longname_sites
from collections import defaultdict
sites = longname_sites ## longname_sites = ["active site", "binding site", "metal ion-binding site", "site"]
sites.append('nucleotide phosphate-binding region')
sites.append('DNA-binding region')
sites.append('intramembrane region')
sites.append("transmembrane region")
sites.append("catalyticResidue")
## ADD MORE IF YOU WANT
if more_sites:
more_sites = ssbio.utils.force_list(more_sites)
sites.extend(more_sites)
sites = list(set(sites))
for site in sites:
protein.representative_sequence.letter_annotations[site] = [False] * protein.representative_sequence.seq_len
to_store = defaultdict(list)
for f in protein.representative_sequence.features:
if f.type in sites:
to_store[f.type].append(f)
for site, feature in to_store.items():
try:
positions = [int(f.location.start) for f in feature]
except TypeError:
log.error('Protein {}, SeqProp {}: unable to translate feature {} into letter annotation'.format(protein.id, protein.representative_sequence.id, site))
continue
feat_letter_anno = []
for x in range(protein.representative_sequence.seq_len):
if x in positions:
idx = positions.index(x)
if 'description' in feature[idx].qualifiers:
feat_letter_anno.append(feature[idx].qualifiers['description'])
else:
feat_letter_anno.append(True)
else:
feat_letter_anno.append(False)
protein.representative_sequence.letter_annotations[site] = feat_letter_anno | Store select uniprot features (sites) as letter annotations with the key as the
type of site and the values as a list of booleans |
def check_file_encoding(self, input_file_path):
"""
Check whether the given file is UTF-8 encoded.
:param string input_file_path: the path of the file to be checked
:rtype: :class:`~aeneas.validator.ValidatorResult`
"""
self.log([u"Checking encoding of file '%s'", input_file_path])
self.result = ValidatorResult()
if self._are_safety_checks_disabled(u"check_file_encoding"):
return self.result
if not gf.file_can_be_read(input_file_path):
self._failed(u"File '%s' cannot be read." % (input_file_path))
return self.result
with io.open(input_file_path, "rb") as file_object:
bstring = file_object.read()
self._check_utf8_encoding(bstring)
return self.result | Check whether the given file is UTF-8 encoded.
:param string input_file_path: the path of the file to be checked
:rtype: :class:`~aeneas.validator.ValidatorResult` |
def abs(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the abs function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the abs function.
'''
return cls._unary_op(x, tf.abs, tf.float32) | Returns a TensorFluent for the abs function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the abs function. |
def clear(self):
'''
Method which resets any variables held by this class, so that the parser can be used again
:return: Nothing
'''
self.tags = []
'''the current list of tags which have been opened in the XML file'''
self.chars = {}
'''the chars held by each tag, indexed by their tag name'''
self.attribs = {}
'''the attributes of each tag, indexed by their tag name'''
self.handler = None
''' the method which will handle the current tag, and the data currently in the class '''
self.piece = PieceTree.PieceTree()
'''the class tree top'''
self.isDynamic = False
'''Indicator of whether the current thing being processed is a dynamic'''
self.data["note"] = None
self.data["direction"] = None
self.data["expression"] = None
self.data["degree"] = None
self.data["frame_note"] = None
self.data["staff_id"] = 1
self.data["voice"] = 1
self.data["handleType"] = "" | Method which resets any variables held by this class, so that the parser can be used again
:return: Nothing |
def instantiate_by_name(self, object_name):
""" Instantiate object from the environment, possibly giving some extra arguments """
if object_name not in self.instances:
instance = self.instantiate_from_data(self.environment[object_name])
self.instances[object_name] = instance
return instance
else:
return self.instances[object_name] | Instantiate object from the environment, possibly giving some extra arguments |
def serialize(self, pid, record, links_factory=None):
"""Serialize a single record and persistent identifier.
:param pid: Persistent identifier instance.
:param record: Record instance.
:param links_factory: Factory function for record links.
"""
return self.schema.tostring(
self.transform_record(pid, record, links_factory)) | Serialize a single record and persistent identifier.
:param pid: Persistent identifier instance.
:param record: Record instance.
:param links_factory: Factory function for record links. |
def total_surface_energy(self):
"""
Total surface energy of the Wulff shape.
Returns:
(float) sum(surface_energy_hkl * area_hkl)
"""
tot_surface_energy = 0
for hkl in self.miller_energy_dict.keys():
tot_surface_energy += self.miller_energy_dict[hkl] * \
self.miller_area_dict[hkl]
return tot_surface_energy | Total surface energy of the Wulff shape.
Returns:
(float) sum(surface_energy_hkl * area_hkl) |
def inflate_bbox(self):
"""
Realign the left and right edges of the bounding box such that they are
inflated to align modulo 4.
This method is optional, and used mainly to accommodate devices with
COM/SEG GDDRAM structures that store pixels in 4-bit nibbles.
"""
left, top, right, bottom = self.bounding_box
self.bounding_box = (
left & 0xFFFC,
top,
right if right % 4 == 0 else (right & 0xFFFC) + 0x04,
bottom)
return self.bounding_box | Realign the left and right edges of the bounding box such that they are
inflated to align modulo 4.
This method is optional, and used mainly to accommodate devices with
COM/SEG GDDRAM structures that store pixels in 4-bit nibbles. |
def reload(name=DEFAULT, all_names=False):
"""Reload one or all :class:`ConfigNamespace`. Reload clears the cache of
:mod:`staticconf.schema` and :mod:`staticconf.getters`, allowing them to
pickup the latest values in the namespace.
Defaults to reloading just the DEFAULT namespace.
:param name: the name of the :class:`ConfigNamespace` to reload
:param all_names: If True, reload all namespaces, and ignore `name`
"""
for namespace in get_namespaces_from_names(name, all_names):
for value_proxy in namespace.get_value_proxies():
value_proxy.reset() | Reload one or all :class:`ConfigNamespace`. Reload clears the cache of
:mod:`staticconf.schema` and :mod:`staticconf.getters`, allowing them to
pickup the latest values in the namespace.
Defaults to reloading just the DEFAULT namespace.
:param name: the name of the :class:`ConfigNamespace` to reload
:param all_names: If True, reload all namespaces, and ignore `name` |
def tune(runner, kernel_options, device_options, tuning_options):
""" Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: dict
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: dict
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: dict
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
results = []
cache = {}
#scale variables in x because PSO works with velocities to visit different configurations
tuning_options["scaling"] = True
#using this instead of get_bounds because scaling is used
bounds, _, _ = get_bounds_x0_eps(tuning_options)
args = (kernel_options, tuning_options, runner, results, cache)
num_particles = 20
maxiter = 100
best_time_global = 1e20
best_position_global = []
# init particle swarm
swarm = []
for i in range(0, num_particles):
swarm.append(Particle(bounds, args))
for i in range(maxiter):
if tuning_options.verbose:
print("start iteration ", i, "best time global", best_time_global)
# evaluate particle positions
for j in range(num_particles):
swarm[j].evaluate(_cost_func)
# update global best if needed
if swarm[j].time <= best_time_global:
best_position_global = swarm[j].position
best_time_global = swarm[j].time
# update particle velocities and positions
for j in range(0, num_particles):
swarm[j].update_velocity(best_position_global)
swarm[j].update_position(bounds)
if tuning_options.verbose:
print('Final result:')
print(best_position_global)
print(best_time_global)
return results, runner.dev.get_environment() | Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: dict
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: dict
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: dict
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict() |
def add_transcript(self, transcript):
"""Add the information transcript
This adds a transcript dict to variant['transcripts']
Args:
transcript (dict): A transcript dictionary
"""
logger.debug("Adding transcript {0} to variant {1}".format(
transcript, self['variant_id']))
self['transcripts'].append(transcript) | Add the information transcript
This adds a transcript dict to variant['transcripts']
Args:
transcript (dict): A transcript dictionary |
def stops(self):
"""Return stops served by this route."""
serves = set()
for trip in self.trips():
for stop_time in trip.stop_times():
serves |= stop_time.stops()
return serves | Return stops served by this route. |
def chunker(f, n):
"""
Utility function to split iterable `f` into `n` chunks
"""
f = iter(f)
x = []
while 1:
if len(x) < n:
try:
x.append(f.next())
except StopIteration:
if len(x) > 0:
yield tuple(x)
break
else:
yield tuple(x)
x = [] | Utility function to split iterable `f` into `n` chunks |
async def write_register(self, address, value, skip_encode=False):
"""Write a modbus register."""
await self._request('write_registers', address, value, skip_encode=skip_encode) | Write a modbus register. |
def getViews(self, path, year=None, month=None, day=None, hour=None):
"""Use this method to get the number of views for a Telegraph article.
:param path: Required. Path to the Telegraph page
(in the format Title-12-31, where 12 is the month and 31 the day the article was first published).
:type path: str
:param year: Required if month is passed.
If passed, the number of page views for the requested year will be returned.
:type year: int
:param month: Required if day is passed.
If passed, the number of page views for the requested month will be returned.
:type month: int
:param day: Required if hour is passed.
If passed, the number of page views for the requested day will be returned.
:type day: int
:param hour: If passed, the number of page views for the requested hour will be returned.
:type hour: int
:return:
"""
if path is None:
raise TelegraphAPIException("Error while executing getViews: "
"PAGE_NOT_FOUND")
r = requests.post(BASE_URL + "getViews/" + path, data={
"year": year,
"month": month,
"day": day,
"hour": hour,
})
if r.json()['ok'] is not True:
raise TelegraphAPIException("Error while executing getViews: " +
r.json()['error'])
return r.json()['result'] | Use this method to get the number of views for a Telegraph article.
:param path: Required. Path to the Telegraph page
(in the format Title-12-31, where 12 is the month and 31 the day the article was first published).
:type path: str
:param year: Required if month is passed.
If passed, the number of page views for the requested year will be returned.
:type year: int
:param month: Required if day is passed.
If passed, the number of page views for the requested month will be returned.
:type month: int
:param day: Required if hour is passed.
If passed, the number of page views for the requested day will be returned.
:type day: int
:param hour: If passed, the number of page views for the requested hour will be returned.
:type hour: int
:return: |
def wait_until_element_visible(self, element, timeout=None):
"""Search element and wait until it is visible
:param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
:param timeout: max time to wait
:returns: the web element if it is visible
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
:raises TimeoutException: If the element is still not visible after the timeout
"""
return self._wait_until(self._expected_condition_find_element_visible, element, timeout) | Search element and wait until it is visible
:param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
:param timeout: max time to wait
:returns: the web element if it is visible
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
:raises TimeoutException: If the element is still not visible after the timeout |
async def _into_id_set(client, chats):
"""Helper util to turn the input chat or chats into a set of IDs."""
if chats is None:
return None
if not utils.is_list_like(chats):
chats = (chats,)
result = set()
for chat in chats:
if isinstance(chat, int):
if chat < 0:
result.add(chat) # Explicitly marked IDs are negative
else:
result.update({ # Support all valid types of peers
utils.get_peer_id(types.PeerUser(chat)),
utils.get_peer_id(types.PeerChat(chat)),
utils.get_peer_id(types.PeerChannel(chat)),
})
elif isinstance(chat, TLObject) and chat.SUBCLASS_OF_ID == 0x2d45687:
# 0x2d45687 == crc32(b'Peer')
result.add(utils.get_peer_id(chat))
else:
chat = await client.get_input_entity(chat)
if isinstance(chat, types.InputPeerSelf):
chat = await client.get_me(input_peer=True)
result.add(utils.get_peer_id(chat))
return result | Helper util to turn the input chat or chats into a set of IDs. |
def guess_headers(self):
"""
Attempt to guess what headers may be required in order to use this
type. Returns `guess_headers` of all children recursively.
* If the typename is in the :const:`KNOWN_TYPES` dictionary, use the
header specified there
* If it's an STL type, include <{type}>
* If it exists in the ROOT namespace and begins with T,
include <{type}.h>
"""
name = self.name.replace("*", "")
headers = []
if name in KNOWN_TYPES:
headers.append(KNOWN_TYPES[name])
elif name in STL:
headers.append('<{0}>'.format(name))
elif hasattr(ROOT, name) and name.startswith("T"):
headers.append('<{0}.h>'.format(name))
elif '::' in name:
headers.append('<{0}.h>'.format(name.replace('::', '/')))
elif name == 'allocator':
headers.append('<memory>')
else:
try:
# is this just a basic type?
CPPGrammar.BASIC_TYPE.parseString(name, parseAll=True)
except ParseException as e:
# nope... I don't know what it is
log.warning(
"unable to guess headers required for {0}".format(name))
if self.params:
for child in self.params:
headers.extend(child.guess_headers)
# remove duplicates
return list(set(headers)) | Attempt to guess what headers may be required in order to use this
type. Returns `guess_headers` of all children recursively.
* If the typename is in the :const:`KNOWN_TYPES` dictionary, use the
header specified there
* If it's an STL type, include <{type}>
* If it exists in the ROOT namespace and begins with T,
include <{type}.h> |
def stream_sample(self, md5, kwargs=None):
""" Stream the sample by giving back a generator, typically used on 'logs'.
Args:
md5: the md5 of the sample
kwargs: a way of specifying subsets of samples (None for all)
max_rows: the maximum number of rows to return
Returns:
A generator that yields rows of the file/log
"""
# Get the max_rows if specified
max_rows = kwargs.get('max_rows', None) if kwargs else None
# Grab the sample and it's raw bytes
sample = self.get_sample(md5)['sample']
raw_bytes = sample['raw_bytes']
# Figure out the type of file to be streamed
type_tag = sample['type_tag']
if type_tag == 'bro':
bro_log = bro_log_reader.BroLogReader(convert_datetimes=False)
mem_file = StringIO(raw_bytes)
generator = bro_log.read_log(mem_file)
return generator
elif type_tag == 'els_query':
els_log = json.loads(raw_bytes)
# Try to determine a couple of different types of ELS query results
if 'fields' in els_log['hits']['hits'][0]:
generator = (row['fields'] for row in els_log['hits']['hits'][:max_rows])
else:
generator = (row['_source'] for row in els_log['hits']['hits'][:max_rows])
return generator
elif type_tag == 'log':
generator = ({'row':row} for row in raw_bytes.split('\n')[:max_rows])
return generator
elif type_tag == 'json':
generator = (row for row in json.loads(raw_bytes)[:max_rows])
return generator
else:
raise RuntimeError('Cannot stream file %s with type_tag:%s' % (md5, type_tag)) | Stream the sample by giving back a generator, typically used on 'logs'.
Args:
md5: the md5 of the sample
kwargs: a way of specifying subsets of samples (None for all)
max_rows: the maximum number of rows to return
Returns:
A generator that yields rows of the file/log |
def _encode_char(char, charmap, defaultchar):
""" Encode a single character with the given encoding map
:param char: char to encode
:param charmap: dictionary for mapping characters in this code page
"""
if ord(char) < 128:
return ord(char)
if char in charmap:
return charmap[char]
return ord(defaultchar) | Encode a single character with the given encoding map
:param char: char to encode
:param charmap: dictionary for mapping characters in this code page |
def get_printable(iterable):
"""
Get printable characters from the specified string.
Note that str.isprintable() is not available in Python 2.
"""
if iterable:
return ''.join(i for i in iterable if i in string.printable)
return '' | Get printable characters from the specified string.
Note that str.isprintable() is not available in Python 2. |
def _replace_bm(self):
"""Replace ``_block_matcher`` with current values."""
self._block_matcher = cv2.StereoBM(preset=self._bm_preset,
ndisparities=self._search_range,
SADWindowSize=self._window_size) | Replace ``_block_matcher`` with current values. |
def print_licences(params, metadata):
"""Print licenses.
:param argparse.Namespace params: parameter
:param bootstrap_py.classifier.Classifiers metadata: package metadata
"""
if hasattr(params, 'licenses'):
if params.licenses:
_pp(metadata.licenses_desc())
sys.exit(0) | Print licenses.
:param argparse.Namespace params: parameter
:param bootstrap_py.classifier.Classifiers metadata: package metadata |
def _RawGlobPathSpecWithNumericSchema(
file_system, parent_path_spec, segment_format, location, segment_number):
"""Globs for path specifications according to a numeric naming schema.
Args:
file_system (FileSystem): file system.
parent_path_spec (PathSpec): parent path specification.
segment_format (str): naming schema of the segment file location.
location (str): the base segment file location string.
segment_number (int): first segment number.
Returns:
list[PathSpec]: path specifications that match the glob.
"""
segment_files = []
while True:
segment_location = segment_format.format(location, segment_number)
# Note that we don't want to set the keyword arguments when not used
# because the path specification base class will check for unused
# keyword arguments and raise.
kwargs = path_spec_factory.Factory.GetProperties(parent_path_spec)
kwargs['location'] = segment_location
if parent_path_spec.parent is not None:
kwargs['parent'] = parent_path_spec.parent
segment_path_spec = path_spec_factory.Factory.NewPathSpec(
parent_path_spec.type_indicator, **kwargs)
if not file_system.FileEntryExistsByPathSpec(segment_path_spec):
break
segment_files.append(segment_path_spec)
segment_number += 1
return segment_files | Globs for path specifications according to a numeric naming schema.
Args:
file_system (FileSystem): file system.
parent_path_spec (PathSpec): parent path specification.
segment_format (str): naming schema of the segment file location.
location (str): the base segment file location string.
segment_number (int): first segment number.
Returns:
list[PathSpec]: path specifications that match the glob. |
def socket_recv(self):
"""
Called by TelnetServer when recv data is ready.
"""
try:
data = self.sock.recv(2048)
except socket.error, ex:
print ("?? socket.recv() error '%d:%s' from %s" %
(ex[0], ex[1], self.addrport()))
raise BogConnectionLost()
## Did they close the connection?
size = len(data)
if size == 0:
raise BogConnectionLost()
## Update some trackers
self.last_input_time = time.time()
self.bytes_received += size
## Test for telnet commands
for byte in data:
self._iac_sniffer(byte)
## Look for newline characters to get whole lines from the buffer
while True:
mark = self.recv_buffer.find('\n')
if mark == -1:
break
cmd = self.recv_buffer[:mark].strip()
self.command_list.append(cmd)
self.cmd_ready = True
self.recv_buffer = self.recv_buffer[mark+1:] | Called by TelnetServer when recv data is ready. |
def get_hdrgos_g_usrgos(self, usrgos):
"""Return hdrgos which contain the usrgos."""
hdrgos_for_usrgos = set()
hdrgos_all = self.get_hdrgos()
usrgo2hdrgo = self.get_usrgo2hdrgo()
for usrgo in usrgos:
if usrgo in hdrgos_all:
hdrgos_for_usrgos.add(usrgo)
continue
hdrgo_cur = usrgo2hdrgo.get(usrgo, None)
if hdrgo_cur is not None:
hdrgos_for_usrgos.add(hdrgo_cur)
return hdrgos_for_usrgos | Return hdrgos which contain the usrgos. |
def point_in_polygon(points, x, y):
""" Ray casting algorithm.
Determines how many times a horizontal ray starting from the point
intersects with the sides of the polygon.
If it is an even number of times, the point is outside, if odd, inside.
The algorithm does not always report correctly when the point is very close to the boundary.
The polygon is passed as a list of (x,y)-tuples.
"""
odd = False
n = len(points)
for i in range(n):
j = i < n - 1 and i + 1 or 0
x0, y0 = points[i][0], points[i][1]
x1, y1 = points[j][0], points[j][1]
if (y0 < y and y1 >= y) or (y1 < y and y0 >= y):
if x0 + (y - y0) / (y1 - y0) * (x1 - x0) < x:
odd = not odd
return odd | Ray casting algorithm.
Determines how many times a horizontal ray starting from the point
intersects with the sides of the polygon.
If it is an even number of times, the point is outside, if odd, inside.
The algorithm does not always report correctly when the point is very close to the boundary.
The polygon is passed as a list of (x,y)-tuples. |
def get_supported_file_loaders_2(force=False):
"""Returns a list of file-based module loaders.
Each item is a tuple (loader, suffixes).
"""
if force or (2, 7) <= sys.version_info < (3, 4): # valid until which py3 version ?
import imp
loaders = []
for suffix, mode, type in imp.get_suffixes():
if type == imp.PY_SOURCE:
loaders.append((SourceFileLoader2, [suffix]))
else:
loaders.append((ImpFileLoader2, [suffix]))
return loaders
elif sys.version_info >= (3, 4): # valid from which py3 version ?
from importlib.machinery import (
SOURCE_SUFFIXES, SourceFileLoader,
BYTECODE_SUFFIXES, SourcelessFileLoader,
EXTENSION_SUFFIXES, ExtensionFileLoader,
)
# This is already defined in importlib._bootstrap_external
# but is not exposed.
extensions = ExtensionFileLoader, EXTENSION_SUFFIXES
source = SourceFileLoader, SOURCE_SUFFIXES
bytecode = SourcelessFileLoader, BYTECODE_SUFFIXES
return [extensions, source, bytecode] | Returns a list of file-based module loaders.
Each item is a tuple (loader, suffixes). |
def get_service(self):
"""
Returns a BigQuery service object.
"""
http_authorized = self._authorize()
return build(
'bigquery', 'v2', http=http_authorized, cache_discovery=False) | Returns a BigQuery service object. |
def pad(self, pad_length):
"""
Pad the pianoroll with zeros at the end along the time axis.
Parameters
----------
pad_length : int
The length to pad with zeros along the time axis.
"""
self.pianoroll = np.pad(
self.pianoroll, ((0, pad_length), (0, 0)), 'constant') | Pad the pianoroll with zeros at the end along the time axis.
Parameters
----------
pad_length : int
The length to pad with zeros along the time axis. |
def post_create_app(cls, app, **settings):
"""Register the errorhandler for the AppException to the passed in
App.
Args:
app (fleaker.base.BaseApplication): A Flask application that
extends the Fleaker Base Application, such that the hooks are
implemented.
Kwargs:
register_errorhandler (bool): A boolean indicating if we want to
automatically register an errorhandler for the
:class:`AppException` exception class after we create this App.
Pass ``False`` to prevent registration. Default is ``True``.
Returns:
fleaker.base.BaseApplication: Returns the app it was given.
"""
register_errorhandler = settings.pop('register_errorhandler', True)
if register_errorhandler:
AppException.register_errorhandler(app)
return app | Register the errorhandler for the AppException to the passed in
App.
Args:
app (fleaker.base.BaseApplication): A Flask application that
extends the Fleaker Base Application, such that the hooks are
implemented.
Kwargs:
register_errorhandler (bool): A boolean indicating if we want to
automatically register an errorhandler for the
:class:`AppException` exception class after we create this App.
Pass ``False`` to prevent registration. Default is ``True``.
Returns:
fleaker.base.BaseApplication: Returns the app it was given. |
def probe_plugins():
"""Runs uWSGI to determine what plugins are available and prints them out.
Generic plugins come first then after blank line follow request plugins.
"""
plugins = UwsgiRunner().get_plugins()
for plugin in sorted(plugins.generic):
click.secho(plugin)
click.secho('')
for plugin in sorted(plugins.request):
click.secho(plugin) | Runs uWSGI to determine what plugins are available and prints them out.
Generic plugins come first then after blank line follow request plugins. |
def service_upsert(path, service_name, definition):
'''
Create or update the definition of a docker-compose service
This does not pull or up the service
This wil re-write your yaml file. Comments will be lost. Indentation is set to 2 spaces
path
Path where the docker-compose file is stored on the server
service_name
Name of the service to create
definition
Service definition as yaml or json string
CLI Example:
.. code-block:: bash
salt myminion dockercompose.service_upsert /path/where/docker-compose/stored service_name definition
'''
compose_result, loaded_definition, err = __load_compose_definitions(path, definition)
if err:
return err
services = compose_result['compose_content']['services']
if service_name in services:
msg = 'Service {0} already exists'.format(service_name)
return __standardize_result(False, msg, None, None)
services[service_name] = loaded_definition
return __dump_compose_file(path, compose_result,
'Service definition for {0} is set'.format(service_name),
already_existed=True) | Create or update the definition of a docker-compose service
This does not pull or up the service
This wil re-write your yaml file. Comments will be lost. Indentation is set to 2 spaces
path
Path where the docker-compose file is stored on the server
service_name
Name of the service to create
definition
Service definition as yaml or json string
CLI Example:
.. code-block:: bash
salt myminion dockercompose.service_upsert /path/where/docker-compose/stored service_name definition |
def LOO(self, kern, X, Y, likelihood, posterior, Y_metadata=None, K=None):
"""
Leave one out error as found in
"Bayesian leave-one-out cross-validation approximations for Gaussian latent variable models"
Vehtari et al. 2014.
"""
g = posterior.woodbury_vector
c = posterior.woodbury_inv
c_diag = np.diag(c)[:, None]
neg_log_marginal_LOO = 0.5*np.log(2*np.pi) - 0.5*np.log(c_diag) + 0.5*(g**2)/c_diag
#believe from Predictive Approaches for Choosing Hyperparameters in Gaussian Processes
#this is the negative marginal LOO
return -neg_log_marginal_LOO | Leave one out error as found in
"Bayesian leave-one-out cross-validation approximations for Gaussian latent variable models"
Vehtari et al. 2014. |
def new_driver(self, testname=None):
'''
Used at a start of a test to get a new instance of WebDriver. If the
'resuebrowser' setting is true, it will use a recycled WebDriver instance
with delete_all_cookies() called.
Kwargs:
testname (str) - Optional test name to pass to Selenium Grid. Helpful for
labeling tests on 3rd party WebDriver cloud providers.
Returns:
Webdriver - Selenium Webdriver instance.
Usage::
driver = WTF_WEBDRIVER_MANAGER.new_driver()
driver.get("http://the-internet.herokuapp.com")
'''
channel = self.__get_channel()
# Get reference for the current driver.
driver = self.__get_driver_for_channel(channel)
if self.__config.get(WebDriverManager.REUSE_BROWSER, True):
if driver is None:
driver = self._webdriver_factory.create_webdriver(
testname=testname)
# Register webdriver so it can be retrieved by the manager and
# cleaned up after exit.
self.__register_driver(channel, driver)
else:
try:
# Attempt to get the browser to a pristine state as possible when we are
# reusing this for another test.
driver.delete_all_cookies()
# check to see if webdriver is still responding
driver.get("about:blank")
except:
# In the case the browser is unhealthy, we should kill it
# and serve a new one.
try:
if driver.is_online():
driver.quit()
except:
pass
driver = self._webdriver_factory.create_webdriver(
testname=testname)
self.__register_driver(channel, driver)
else:
# Attempt to tear down any existing webdriver.
if driver is not None:
try:
driver.quit()
except:
pass
self.__unregister_driver(channel)
driver = self._webdriver_factory.create_webdriver(
testname=testname)
self.__register_driver(channel, driver)
return driver | Used at a start of a test to get a new instance of WebDriver. If the
'resuebrowser' setting is true, it will use a recycled WebDriver instance
with delete_all_cookies() called.
Kwargs:
testname (str) - Optional test name to pass to Selenium Grid. Helpful for
labeling tests on 3rd party WebDriver cloud providers.
Returns:
Webdriver - Selenium Webdriver instance.
Usage::
driver = WTF_WEBDRIVER_MANAGER.new_driver()
driver.get("http://the-internet.herokuapp.com") |
def images(self):
"""Instance depends on the API version:
* 2016-04-30-preview: :class:`ImagesOperations<azure.mgmt.compute.v2016_04_30_preview.operations.ImagesOperations>`
* 2017-03-30: :class:`ImagesOperations<azure.mgmt.compute.v2017_03_30.operations.ImagesOperations>`
* 2017-12-01: :class:`ImagesOperations<azure.mgmt.compute.v2017_12_01.operations.ImagesOperations>`
* 2018-04-01: :class:`ImagesOperations<azure.mgmt.compute.v2018_04_01.operations.ImagesOperations>`
* 2018-06-01: :class:`ImagesOperations<azure.mgmt.compute.v2018_06_01.operations.ImagesOperations>`
* 2018-10-01: :class:`ImagesOperations<azure.mgmt.compute.v2018_10_01.operations.ImagesOperations>`
* 2019-03-01: :class:`ImagesOperations<azure.mgmt.compute.v2019_03_01.operations.ImagesOperations>`
"""
api_version = self._get_api_version('images')
if api_version == '2016-04-30-preview':
from .v2016_04_30_preview.operations import ImagesOperations as OperationClass
elif api_version == '2017-03-30':
from .v2017_03_30.operations import ImagesOperations as OperationClass
elif api_version == '2017-12-01':
from .v2017_12_01.operations import ImagesOperations as OperationClass
elif api_version == '2018-04-01':
from .v2018_04_01.operations import ImagesOperations as OperationClass
elif api_version == '2018-06-01':
from .v2018_06_01.operations import ImagesOperations as OperationClass
elif api_version == '2018-10-01':
from .v2018_10_01.operations import ImagesOperations as OperationClass
elif api_version == '2019-03-01':
from .v2019_03_01.operations import ImagesOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) | Instance depends on the API version:
* 2016-04-30-preview: :class:`ImagesOperations<azure.mgmt.compute.v2016_04_30_preview.operations.ImagesOperations>`
* 2017-03-30: :class:`ImagesOperations<azure.mgmt.compute.v2017_03_30.operations.ImagesOperations>`
* 2017-12-01: :class:`ImagesOperations<azure.mgmt.compute.v2017_12_01.operations.ImagesOperations>`
* 2018-04-01: :class:`ImagesOperations<azure.mgmt.compute.v2018_04_01.operations.ImagesOperations>`
* 2018-06-01: :class:`ImagesOperations<azure.mgmt.compute.v2018_06_01.operations.ImagesOperations>`
* 2018-10-01: :class:`ImagesOperations<azure.mgmt.compute.v2018_10_01.operations.ImagesOperations>`
* 2019-03-01: :class:`ImagesOperations<azure.mgmt.compute.v2019_03_01.operations.ImagesOperations>` |
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(UsersCollector, self).get_default_config()
config.update({
'path': 'users',
'utmp': None,
})
return config | Returns the default collector settings |
def get_metric_by_name(name: str) -> Callable[..., Any]:
"""Returns a metric callable with a corresponding name."""
if name not in _REGISTRY:
raise ConfigError(f'"{name}" is not registered as a metric')
return fn_from_str(_REGISTRY[name]) | Returns a metric callable with a corresponding name. |
def name(self):
"""
The UI name of this style.
"""
name = self._element.name_val
if name is None:
return None
return BabelFish.internal2ui(name) | The UI name of this style. |
def _load_key(key_object):
"""
Common code to load public and private keys into PublicKey and PrivateKey
objects
:param key_object:
An asn1crypto.keys.PublicKeyInfo or asn1crypto.keys.PrivateKeyInfo
object
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
oscrypto.errors.AsymmetricKeyError - when the key is incompatible with the OS crypto library
OSError - when an error is returned by the OS crypto library
:return:
A PublicKey or PrivateKey object
"""
if key_object.algorithm == 'ec':
curve_type, details = key_object.curve
if curve_type != 'named':
raise AsymmetricKeyError('OS X only supports EC keys using named curves')
if details not in set(['secp256r1', 'secp384r1', 'secp521r1']):
raise AsymmetricKeyError(pretty_message(
'''
OS X only supports EC keys using the named curves secp256r1,
secp384r1 and secp521r1
'''
))
elif key_object.algorithm == 'dsa' and key_object.hash_algo == 'sha2':
raise AsymmetricKeyError(pretty_message(
'''
OS X only supports DSA keys based on SHA1 (2048 bits or less) - this
key is based on SHA2 and is %s bits
''',
key_object.bit_size
))
elif key_object.algorithm == 'dsa' and key_object.hash_algo is None:
raise IncompleteAsymmetricKeyError(pretty_message(
'''
The DSA key does not contain the necessary p, q and g parameters
and can not be used
'''
))
if isinstance(key_object, keys.PublicKeyInfo):
source = key_object.dump()
key_class = Security.kSecAttrKeyClassPublic
else:
source = key_object.unwrap().dump()
key_class = Security.kSecAttrKeyClassPrivate
cf_source = None
cf_dict = None
cf_output = None
try:
cf_source = CFHelpers.cf_data_from_bytes(source)
key_type = {
'dsa': Security.kSecAttrKeyTypeDSA,
'ec': Security.kSecAttrKeyTypeECDSA,
'rsa': Security.kSecAttrKeyTypeRSA,
}[key_object.algorithm]
cf_dict = CFHelpers.cf_dictionary_from_pairs([
(Security.kSecAttrKeyType, key_type),
(Security.kSecAttrKeyClass, key_class),
(Security.kSecAttrCanSign, CoreFoundation.kCFBooleanTrue),
(Security.kSecAttrCanVerify, CoreFoundation.kCFBooleanTrue),
])
error_pointer = new(CoreFoundation, 'CFErrorRef *')
sec_key_ref = Security.SecKeyCreateFromData(cf_dict, cf_source, error_pointer)
handle_cf_error(error_pointer)
if key_class == Security.kSecAttrKeyClassPublic:
return PublicKey(sec_key_ref, key_object)
if key_class == Security.kSecAttrKeyClassPrivate:
return PrivateKey(sec_key_ref, key_object)
finally:
if cf_source:
CoreFoundation.CFRelease(cf_source)
if cf_dict:
CoreFoundation.CFRelease(cf_dict)
if cf_output:
CoreFoundation.CFRelease(cf_output) | Common code to load public and private keys into PublicKey and PrivateKey
objects
:param key_object:
An asn1crypto.keys.PublicKeyInfo or asn1crypto.keys.PrivateKeyInfo
object
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
oscrypto.errors.AsymmetricKeyError - when the key is incompatible with the OS crypto library
OSError - when an error is returned by the OS crypto library
:return:
A PublicKey or PrivateKey object |
def highshelf(self, gain=-20.0, frequency=3000, slope=0.5):
"""highshelf takes 3 parameters: a signed number for gain or
attenuation in dB, filter frequency in Hz and slope (default=0.5).
Beware of clipping when using positive gain.
"""
self.command.append('treble')
self.command.append(gain)
self.command.append(frequency)
self.command.append(slope)
return self | highshelf takes 3 parameters: a signed number for gain or
attenuation in dB, filter frequency in Hz and slope (default=0.5).
Beware of clipping when using positive gain. |
def is_condition_met(self, hand, *args):
"""
The hand contains four sets of winds
:param hand: list of hand's sets
:return: boolean
"""
pon_sets = [x for x in hand if is_pon(x)]
if len(pon_sets) != 4:
return False
count_wind_sets = 0
winds = [EAST, SOUTH, WEST, NORTH]
for item in pon_sets:
if is_pon(item) and item[0] in winds:
count_wind_sets += 1
return count_wind_sets == 4 | The hand contains four sets of winds
:param hand: list of hand's sets
:return: boolean |
def auth_request_url(self, client_id=None, redirect_uris="urn:ietf:wg:oauth:2.0:oob",
scopes=__DEFAULT_SCOPES, force_login=False):
"""
Returns the url that a client needs to request an oauth grant from the server.
To log in with oauth, send your user to this URL. The user will then log in and
get a code which you can pass to log_in.
scopes are as in `log_in()`_, redirect_uris is where the user should be redirected to
after authentication. Note that redirect_uris must be one of the URLs given during
app registration. When using urn:ietf:wg:oauth:2.0:oob, the code is simply displayed,
otherwise it is added to the given URL as the "code" request parameter.
Pass force_login if you want the user to always log in even when already logged
into web mastodon (i.e. when registering multiple different accounts in an app).
"""
if client_id is None:
client_id = self.client_id
else:
if os.path.isfile(client_id):
with open(client_id, 'r') as secret_file:
client_id = secret_file.readline().rstrip()
params = dict()
params['client_id'] = client_id
params['response_type'] = "code"
params['redirect_uri'] = redirect_uris
params['scope'] = " ".join(scopes)
params['force_login'] = force_login
formatted_params = urlencode(params)
return "".join([self.api_base_url, "/oauth/authorize?", formatted_params]) | Returns the url that a client needs to request an oauth grant from the server.
To log in with oauth, send your user to this URL. The user will then log in and
get a code which you can pass to log_in.
scopes are as in `log_in()`_, redirect_uris is where the user should be redirected to
after authentication. Note that redirect_uris must be one of the URLs given during
app registration. When using urn:ietf:wg:oauth:2.0:oob, the code is simply displayed,
otherwise it is added to the given URL as the "code" request parameter.
Pass force_login if you want the user to always log in even when already logged
into web mastodon (i.e. when registering multiple different accounts in an app). |
def collect_analysis(self):
'''
:return: a dictionary which is used to get the serialized analyzer definition from the analyzer class.
'''
analysis = {}
for field in self.fields.values():
for analyzer_name in ('analyzer', 'index_analyzer', 'search_analyzer'):
if not hasattr(field, analyzer_name):
continue
analyzer = getattr(field, analyzer_name)
if not isinstance(analyzer, Analyzer):
continue
definition = analyzer.get_analysis_definition()
if definition is None:
continue
for key in definition:
analysis.setdefault(key, {}).update(definition[key])
return analysis | :return: a dictionary which is used to get the serialized analyzer definition from the analyzer class. |
def database_list_folder(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /database-xxxx/listFolder API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Databases#API-method%3A-%2Fdatabase-xxxx%2FlistFolder
"""
return DXHTTPRequest('/%s/listFolder' % object_id, input_params, always_retry=always_retry, **kwargs) | Invokes the /database-xxxx/listFolder API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Databases#API-method%3A-%2Fdatabase-xxxx%2FlistFolder |
def kl_divergence(self, logits_q, logits_p):
"""
Categorical distribution KL divergence calculation
KL(Q || P) = sum Q_i log (Q_i / P_i)
When talking about logits this is:
sum exp(Q_i) * (Q_i - P_i)
"""
return (torch.exp(logits_q) * (logits_q - logits_p)).sum(1, keepdim=True) | Categorical distribution KL divergence calculation
KL(Q || P) = sum Q_i log (Q_i / P_i)
When talking about logits this is:
sum exp(Q_i) * (Q_i - P_i) |
def assign_types_to_resources(resource_types,**kwargs):
"""
Assign new types to list of resources.
This function checks if the necessary
attributes are present and adds them if needed. Non existing attributes
are also added when the type is already assigned. This means that this
function can also be used to update resources, when a resource type has
changed.
"""
#Remove duplicate values from types by turning it into a set
type_ids = list(set([rt.type_id for rt in resource_types]))
db_types = db.DBSession.query(TemplateType).filter(TemplateType.id.in_(type_ids)).options(joinedload_all('typeattrs')).all()
types = {}
for db_type in db_types:
if types.get(db_type.id) is None:
types[db_type.id] = db_type
log.debug("Retrieved all the appropriate template types")
res_types = []
res_attrs = []
res_scenarios = []
net_id = None
node_ids = []
link_ids = []
grp_ids = []
for resource_type in resource_types:
ref_id = resource_type.ref_id
ref_key = resource_type.ref_key
if resource_type.ref_key == 'NETWORK':
net_id = ref_id
elif resource_type.ref_key == 'NODE':
node_ids.append(ref_id)
elif resource_type.ref_key == 'LINK':
link_ids.append(ref_id)
elif resource_type.ref_key == 'GROUP':
grp_ids.append(ref_id)
if net_id:
net = db.DBSession.query(Network).filter(Network.id==net_id).one()
nodes = _get_nodes(node_ids)
links = _get_links(link_ids)
groups = _get_groups(grp_ids)
for resource_type in resource_types:
ref_id = resource_type.ref_id
ref_key = resource_type.ref_key
type_id = resource_type.type_id
if ref_key == 'NETWORK':
resource = net
elif ref_key == 'NODE':
resource = nodes[ref_id]
elif ref_key == 'LINK':
resource = links[ref_id]
elif ref_key == 'GROUP':
resource = groups[ref_id]
ra, rt, rs= set_resource_type(resource, type_id, types)
if rt is not None:
res_types.append(rt)
if len(ra) > 0:
res_attrs.extend(ra)
if len(rs) > 0:
res_scenarios.extend(rs)
log.debug("Retrieved all the appropriate resources")
if len(res_types) > 0:
new_types = db.DBSession.execute(ResourceType.__table__.insert(), res_types)
if len(res_attrs) > 0:
new_res_attrs = db.DBSession.execute(ResourceAttr.__table__.insert(), res_attrs)
new_ras = db.DBSession.query(ResourceAttr).filter(and_(ResourceAttr.id>=new_res_attrs.lastrowid, ResourceAttr.id<(new_res_attrs.lastrowid+len(res_attrs)))).all()
ra_map = {}
for ra in new_ras:
ra_map[(ra.ref_key, ra.attr_id, ra.node_id, ra.link_id, ra.group_id, ra.network_id)] = ra.id
for rs in res_scenarios:
rs['resource_attr_id'] = ra_map[(rs['ref_key'], rs['attr_id'], rs['node_id'], rs['link_id'], rs['group_id'], rs['network_id'])]
if len(res_scenarios) > 0:
new_scenarios = db.DBSession.execute(ResourceScenario.__table__.insert(), res_scenarios)
#Make DBsession 'dirty' to pick up the inserts by doing a fake delete.
db.DBSession.query(ResourceAttr).filter(ResourceAttr.attr_id==None).delete()
ret_val = [t for t in types.values()]
return ret_val | Assign new types to list of resources.
This function checks if the necessary
attributes are present and adds them if needed. Non existing attributes
are also added when the type is already assigned. This means that this
function can also be used to update resources, when a resource type has
changed. |
def run(self, cell, is_full_fc=False, parse_fc=True):
"""Make supercell force constants readable for phonopy
Note
----
Born effective charges and dielectric constant tensor are read
from QE output file if they exist. But this means
dipole-dipole contributions are removed from force constants
and this force constants matrix is not usable in phonopy.
Arguments
---------
cell : PhonopyAtoms
Primitive cell used for QE/PH calculation.
is_full_fc : Bool, optional, default=False
Whether to create full or compact force constants.
parse_fc : Bool, optional, default=True
Force constants file of QE is not parsed when this is False.
False may be used when expected to parse only epsilon and born.
"""
with open(self._filename) as f:
fc_dct = self._parse_q2r(f)
self.dimension = fc_dct['dimension']
self.epsilon = fc_dct['dielectric']
self.borns = fc_dct['born']
if parse_fc:
(self.fc,
self.primitive,
self.supercell) = self._arrange_supercell_fc(
cell, fc_dct['fc'], is_full_fc=is_full_fc) | Make supercell force constants readable for phonopy
Note
----
Born effective charges and dielectric constant tensor are read
from QE output file if they exist. But this means
dipole-dipole contributions are removed from force constants
and this force constants matrix is not usable in phonopy.
Arguments
---------
cell : PhonopyAtoms
Primitive cell used for QE/PH calculation.
is_full_fc : Bool, optional, default=False
Whether to create full or compact force constants.
parse_fc : Bool, optional, default=True
Force constants file of QE is not parsed when this is False.
False may be used when expected to parse only epsilon and born. |
def wait_turns(self, turns, cb=None):
"""Call ``self.app.engine.next_turn()`` ``n`` times, waiting ``self.app.turn_length`` in between
Disables input for the duration.
:param turns: number of turns to wait
:param cb: function to call when done waiting, optional
:return: ``None``
"""
self.disable_input()
self.app.wait_turns(turns, cb=partial(self.enable_input, cb)) | Call ``self.app.engine.next_turn()`` ``n`` times, waiting ``self.app.turn_length`` in between
Disables input for the duration.
:param turns: number of turns to wait
:param cb: function to call when done waiting, optional
:return: ``None`` |
def _get_notmuch_message(self, mid):
"""returns :class:`notmuch.database.Message` with given id"""
mode = Database.MODE.READ_ONLY
db = Database(path=self.path, mode=mode)
try:
return db.find_message(mid)
except:
errmsg = 'no message with id %s exists!' % mid
raise NonexistantObjectError(errmsg) | returns :class:`notmuch.database.Message` with given id |
def delete_table_records(self, table, query_column, ids_to_delete):
""" Responsys.deleteTableRecords call
Accepts:
InteractObject table
string query_column
possible values: 'RIID'|'EMAIL_ADDRESS'|'CUSTOMER_ID'|'MOBILE_NUMBER'
list ids_to_delete
Returns a list of DeleteResult instances
"""
table = table.get_soap_object(self.client)
result = self.call('deleteTableRecords', table, query_column, ids_to_delete)
if hasattr(result, '__iter__'):
return [DeleteResult(delete_result) for delete_result in result]
return [DeleteResult(result)] | Responsys.deleteTableRecords call
Accepts:
InteractObject table
string query_column
possible values: 'RIID'|'EMAIL_ADDRESS'|'CUSTOMER_ID'|'MOBILE_NUMBER'
list ids_to_delete
Returns a list of DeleteResult instances |
def create_signed_pair(self, name, ca_name, cert_type=crypto.TYPE_RSA,
bits=2048, years=5, alt_names=None, serial=0,
overwrite=False):
"""
Create a key-cert pair
Arguments: name - The name of the key-cert pair
ca_name - The name of the CA to sign this cert
cert_type - The type of the cert. TYPE_RSA or TYPE_DSA
bits - The number of bits to use
alt_names - An array of alternative names in the format:
IP:address, DNS:address
Returns: KeyCertPair for the new signed pair
"""
key = self.create_key_pair(cert_type, bits)
req = self.create_request(key, CN=name)
extensions = [
crypto.X509Extension(
b"extendedKeyUsage", True, b"serverAuth, clientAuth"),
]
if alt_names:
extensions.append(
crypto.X509Extension(b"subjectAltName",
False, ",".join(alt_names).encode())
)
ca_bundle = self.store.get_files(ca_name)
cacert = ca_bundle.cert.load()
cakey = ca_bundle.key.load()
cert = self.sign(req, (cacert, cakey), (0, 60*60*24*365*years),
extensions=extensions)
x509s = {'key': key, 'cert': cert, 'ca': None}
self.store.add_files(name, x509s, parent_ca=ca_name,
overwrite=overwrite)
# Relate these certs as being parent and child
self.store.add_sign_link(ca_name, name)
return self.store.get_record(name) | Create a key-cert pair
Arguments: name - The name of the key-cert pair
ca_name - The name of the CA to sign this cert
cert_type - The type of the cert. TYPE_RSA or TYPE_DSA
bits - The number of bits to use
alt_names - An array of alternative names in the format:
IP:address, DNS:address
Returns: KeyCertPair for the new signed pair |
def transform_literals(rdf, literalmap):
"""Transform literal properties of Concepts, as defined by config file."""
affected_types = (SKOS.Concept, SKOS.Collection,
SKOSEXT.DeprecatedConcept)
props = set()
for t in affected_types:
for conc in rdf.subjects(RDF.type, t):
for p, o in rdf.predicate_objects(conc):
if isinstance(o, Literal) \
and (p in literalmap or not in_general_ns(p)):
props.add(p)
for p in props:
if mapping_match(p, literalmap):
newval = mapping_get(p, literalmap)
newuris = [v[0] for v in newval]
logging.debug("transform literal %s -> %s", p, str(newuris))
replace_predicate(
rdf, p, newuris, subjecttypes=affected_types)
else:
logging.info("Don't know what to do with literal %s", p) | Transform literal properties of Concepts, as defined by config file. |
def netdevs():
''' RX and TX bytes for each of the network devices '''
with open('/proc/net/dev') as f:
net_dump = f.readlines()
device_data={}
data = namedtuple('data',['rx','tx'])
for line in net_dump[2:]:
line = line.split(':')
if line[0].strip() != 'lo':
device_data[line[0].strip()] = data(float(line[1].split()[0])/(1024.0*1024.0),
float(line[1].split()[8])/(1024.0*1024.0))
return device_data | RX and TX bytes for each of the network devices |
def drop_words(text, threshold=2, to_lower=True, delimiters=DEFAULT_DELIMITERS,
stop_words=None):
'''
Remove words that occur below a certain number of times in an SArray.
This is a common method of cleaning text before it is used, and can increase the
quality and explainability of the models learned on the transformed data.
RareWordTrimmer can be applied to all the string-, dictionary-, and list-typed
columns in an SArray.
* **string** : The string is first tokenized. By default, all letters are
first converted to lower case, then tokenized by space characters. Each
token is taken to be a word, and the words occurring below a threshold
number of times across the entire column are removed, then the remaining
tokens are concatenated back into a string.
* **list** : Each element of the list must be a string, where each element
is assumed to be a token. The remaining tokens are then filtered
by count occurrences and a threshold value.
* **dict** : The method first obtains the list of keys in the dictionary.
This list is then processed as a standard list, except the value of each
key must be of integer type and is considered to be the count of that key.
Parameters
----------
text : SArray[str | dict | list]
The input text data.
threshold : int, optional
The count below which words are removed from the input.
stop_words: list[str], optional
A manually specified list of stop words, which are removed regardless
of count.
to_lower : bool, optional
Indicates whether to map the input strings to lower case before counting.
delimiters: list[string], optional
A list of delimiter characters for tokenization. By default, the list
is defined to be the list of space characters. The user can define
any custom list of single-character delimiters. Alternatively, setting
`delimiters=None` will use a Penn treebank type tokenization, which
is better at handling punctuations. (See reference below for details.)
Returns
-------
out : SArray.
An SArray with words below a threshold removed.
See Also
--------
count_ngrams, tf_idf, tokenize,
References
----------
- `Penn treebank tokenization <https://web.archive.org/web/19970614072242/http://www.cis.upenn.edu:80/~treebank/tokenization.html>`_
Examples
--------
.. sourcecode:: python
>>> import turicreate
# Create input data
>>> sa = turicreate.SArray(["The quick brown fox jumps in a fox like way.",
"Word word WORD, word!!!word"])
# Run drop_words
>>> turicreate.text_analytics.drop_words(sa)
dtype: str
Rows: 2
['fox fox', 'word word']
# Run drop_words with Penn treebank style tokenization to handle
# punctuations
>>> turicreate.text_analytics.drop_words(sa, delimiters=None)
dtype: str
Rows: 2
['fox fox', 'word word word']
# Run drop_words with dictionary input
>>> sa = turicreate.SArray([{'alice bob': 1, 'Bob alice': 2},
{'a dog': 0, 'a dog cat': 5}])
>>> turicreate.text_analytics.drop_words(sa)
dtype: dict
Rows: 2
[{'bob alice': 2}, {'a dog cat': 5}]
# Run drop_words with list input
>>> sa = turicreate.SArray([['one', 'bar bah', 'One'],
['a dog', 'a dog cat', 'A DOG']])
>>> turicreate.text_analytics.drop_words(sa)
dtype: list
Rows: 2
[['one', 'one'], ['a dog', 'a dog']]
'''
_raise_error_if_not_sarray(text, "text")
## Compute word counts
sf = _turicreate.SFrame({'docs': text})
fe = _feature_engineering.RareWordTrimmer(features='docs',
threshold=threshold,
to_lower=to_lower,
delimiters=delimiters,
stopwords=stop_words,
output_column_prefix=None)
tokens = fe.fit_transform(sf)
return tokens['docs'] | Remove words that occur below a certain number of times in an SArray.
This is a common method of cleaning text before it is used, and can increase the
quality and explainability of the models learned on the transformed data.
RareWordTrimmer can be applied to all the string-, dictionary-, and list-typed
columns in an SArray.
* **string** : The string is first tokenized. By default, all letters are
first converted to lower case, then tokenized by space characters. Each
token is taken to be a word, and the words occurring below a threshold
number of times across the entire column are removed, then the remaining
tokens are concatenated back into a string.
* **list** : Each element of the list must be a string, where each element
is assumed to be a token. The remaining tokens are then filtered
by count occurrences and a threshold value.
* **dict** : The method first obtains the list of keys in the dictionary.
This list is then processed as a standard list, except the value of each
key must be of integer type and is considered to be the count of that key.
Parameters
----------
text : SArray[str | dict | list]
The input text data.
threshold : int, optional
The count below which words are removed from the input.
stop_words: list[str], optional
A manually specified list of stop words, which are removed regardless
of count.
to_lower : bool, optional
Indicates whether to map the input strings to lower case before counting.
delimiters: list[string], optional
A list of delimiter characters for tokenization. By default, the list
is defined to be the list of space characters. The user can define
any custom list of single-character delimiters. Alternatively, setting
`delimiters=None` will use a Penn treebank type tokenization, which
is better at handling punctuations. (See reference below for details.)
Returns
-------
out : SArray.
An SArray with words below a threshold removed.
See Also
--------
count_ngrams, tf_idf, tokenize,
References
----------
- `Penn treebank tokenization <https://web.archive.org/web/19970614072242/http://www.cis.upenn.edu:80/~treebank/tokenization.html>`_
Examples
--------
.. sourcecode:: python
>>> import turicreate
# Create input data
>>> sa = turicreate.SArray(["The quick brown fox jumps in a fox like way.",
"Word word WORD, word!!!word"])
# Run drop_words
>>> turicreate.text_analytics.drop_words(sa)
dtype: str
Rows: 2
['fox fox', 'word word']
# Run drop_words with Penn treebank style tokenization to handle
# punctuations
>>> turicreate.text_analytics.drop_words(sa, delimiters=None)
dtype: str
Rows: 2
['fox fox', 'word word word']
# Run drop_words with dictionary input
>>> sa = turicreate.SArray([{'alice bob': 1, 'Bob alice': 2},
{'a dog': 0, 'a dog cat': 5}])
>>> turicreate.text_analytics.drop_words(sa)
dtype: dict
Rows: 2
[{'bob alice': 2}, {'a dog cat': 5}]
# Run drop_words with list input
>>> sa = turicreate.SArray([['one', 'bar bah', 'One'],
['a dog', 'a dog cat', 'A DOG']])
>>> turicreate.text_analytics.drop_words(sa)
dtype: list
Rows: 2
[['one', 'one'], ['a dog', 'a dog']] |
def filter_geoquiet(sat, maxKp=None, filterTime=None, kpData=None, kp_inst=None):
"""Filters pysat.Instrument data for given time after Kp drops below gate.
Loads Kp data for the same timeframe covered by sat and sets sat.data to
NaN for times when Kp > maxKp and for filterTime after Kp drops below maxKp.
Parameters
----------
sat : pysat.Instrument
Instrument to be filtered
maxKp : float
Maximum Kp value allowed. Kp values above this trigger
sat.data filtering.
filterTime : int
Number of hours to filter data after Kp drops below maxKp
kpData : pysat.Instrument (optional)
Kp pysat.Instrument object with data already loaded
kp_inst : pysat.Instrument (optional)
Kp pysat.Instrument object ready to load Kp data.Overrides kpData.
Returns
-------
None : NoneType
sat Instrument object modified in place
"""
if kp_inst is not None:
kp_inst.load(date=sat.date, verifyPad=True)
kpData = kp_inst
elif kpData is None:
kp = pysat.Instrument('sw', 'kp', pad=pds.DateOffset(days=1))
kp.load(date=sat.date, verifyPad=True)
kpData = kp
if maxKp is None:
maxKp = 3+ 1./3.
if filterTime is None:
filterTime = 24
# now the defaults are ensured, let's do some filtering
# date of satellite data
date = sat.date
selData = kpData[date-pds.DateOffset(days=1):date+pds.DateOffset(days=1)]
ind, = np.where(selData['kp'] >= maxKp)
for lind in ind:
sat.data[selData.index[lind]:(selData.index[lind]+pds.DateOffset(hours=filterTime) )] = np.NaN
sat.data = sat.data.dropna(axis=0, how='all')
return | Filters pysat.Instrument data for given time after Kp drops below gate.
Loads Kp data for the same timeframe covered by sat and sets sat.data to
NaN for times when Kp > maxKp and for filterTime after Kp drops below maxKp.
Parameters
----------
sat : pysat.Instrument
Instrument to be filtered
maxKp : float
Maximum Kp value allowed. Kp values above this trigger
sat.data filtering.
filterTime : int
Number of hours to filter data after Kp drops below maxKp
kpData : pysat.Instrument (optional)
Kp pysat.Instrument object with data already loaded
kp_inst : pysat.Instrument (optional)
Kp pysat.Instrument object ready to load Kp data.Overrides kpData.
Returns
-------
None : NoneType
sat Instrument object modified in place |
def creationTime(item):
"""
Returns the creation time of the given item.
"""
forThisItem = _CreationTime.createdItem == item
return item.store.findUnique(_CreationTime, forThisItem).timestamp | Returns the creation time of the given item. |
def apply_all_rules(self, *args, **kwargs):
"""cycle through all rules and apply them all without regard to
success or failure
returns:
True - since success or failure is ignored"""
for x in self.rules:
self._quit_check()
if self.config.chatty_rules:
self.config.logger.debug(
'apply_all_rules: %s',
to_str(x.__class__)
)
predicate_result, action_result = x.act(*args, **kwargs)
if self.config.chatty_rules:
self.config.logger.debug(
' : pred - %s; act - %s',
predicate_result,
action_result
)
return True | cycle through all rules and apply them all without regard to
success or failure
returns:
True - since success or failure is ignored |
def completion():
"""Output completion (to be eval'd).
For bash or zsh, add the following to your .bashrc or .zshrc:
eval "$(doitlive completion)"
For fish, add the following to ~/.config/fish/completions/doitlive.fish:
eval (doitlive completion)
"""
shell = env.get("SHELL", None)
if env.get("SHELL", None):
echo(
click_completion.get_code(
shell=shell.split(os.sep)[-1], prog_name="doitlive"
)
)
else:
echo(
"Please ensure that the {SHELL} environment "
"variable is set.".format(SHELL=style("SHELL", bold=True))
)
sys.exit(1) | Output completion (to be eval'd).
For bash or zsh, add the following to your .bashrc or .zshrc:
eval "$(doitlive completion)"
For fish, add the following to ~/.config/fish/completions/doitlive.fish:
eval (doitlive completion) |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'feedback_id') and self.feedback_id is not None:
_dict['feedback_id'] = self.feedback_id
if hasattr(self, 'user_id') and self.user_id is not None:
_dict['user_id'] = self.user_id
if hasattr(self, 'comment') and self.comment is not None:
_dict['comment'] = self.comment
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'feedback_data') and self.feedback_data is not None:
_dict['feedback_data'] = self.feedback_data._to_dict()
return _dict | Return a json dictionary representing this model. |
def GetTemplateID(alias,location,name):
"""Given a template name return the unique OperatingSystem ID.
:param alias: short code for a particular account. If none will use account's default alias
:param location: datacenter where group resides
:param name: template name
"""
if alias is None: alias = clc.v1.Account.GetAlias()
if location is None: location = clc.v1.Account.GetLocation()
r = Server.GetTemplates(alias,location)
for row in r:
if row['Name'].lower() == name.lower(): return(row['OperatingSystem'])
else:
if clc.args: clc.v1.output.Status("ERROR",3,"Template %s not found in account %s datacenter %s" % (name,alias,location))
raise Exception("Template not found") | Given a template name return the unique OperatingSystem ID.
:param alias: short code for a particular account. If none will use account's default alias
:param location: datacenter where group resides
:param name: template name |
def subbrick(dset,label,coef=False,tstat=False,fstat=False,rstat=False,number_only=False):
''' returns a string referencing the given subbrick within a dset
This method reads the header of the dataset ``dset``, finds the subbrick whose
label matches ``label`` and returns a string of type ``dataset[X]``, which can
be used by most AFNI programs to refer to a subbrick within a file
The options coef, tstat, fstat, and rstat will add the suffix that is
appended to the label by 3dDeconvolve
:coef: "#0_Coef"
:tstat: "#0_Tstat"
:fstat: "_Fstat"
:rstat: "_R^2"
If ``coef`` or ``tstat`` are set to a number, it will use that parameter number
(instead of 0), for models that use multiple parameters (e.g., "TENT").
if ``number_only`` is set to ``True``, will only return the subbrick number instead of a string
'''
if coef is not False:
if coef is True:
coef = 0
label += "#%d_Coef" % coef
elif tstat != False:
if tstat==True:
tstat = 0
label += "#%d_Tstat" % tstat
elif fstat:
label += "_Fstat"
elif rstat:
label += "_R^2"
info = nl.dset_info(dset)
if info==None:
nl.notify('Error: Couldn\'t get info from dset "%s"'%dset,level=nl.level.error)
return None
i = info.subbrick_labeled(label)
if number_only:
return i
return '%s[%d]' % (dset,i) | returns a string referencing the given subbrick within a dset
This method reads the header of the dataset ``dset``, finds the subbrick whose
label matches ``label`` and returns a string of type ``dataset[X]``, which can
be used by most AFNI programs to refer to a subbrick within a file
The options coef, tstat, fstat, and rstat will add the suffix that is
appended to the label by 3dDeconvolve
:coef: "#0_Coef"
:tstat: "#0_Tstat"
:fstat: "_Fstat"
:rstat: "_R^2"
If ``coef`` or ``tstat`` are set to a number, it will use that parameter number
(instead of 0), for models that use multiple parameters (e.g., "TENT").
if ``number_only`` is set to ``True``, will only return the subbrick number instead of a string |
def _translate_dst_register_oprnd(self, operand):
"""Translate destination register operand to SMT expr.
"""
reg_info = self._arch_alias_mapper.get(operand.name, None)
parent_reg_constrs = []
if reg_info:
var_base_name, offset = reg_info
var_name_old = self._get_var_name(var_base_name, fresh=False)
var_name_new = self._get_var_name(var_base_name, fresh=True)
var_size = self._arch_regs_size[var_base_name]
ret_val_old = self.make_bitvec(var_size, var_name_old)
ret_val_new = self.make_bitvec(var_size, var_name_new)
ret_val = smtfunction.extract(ret_val_new, offset, operand.size)
if 0 < offset < var_size - 1:
lower_expr_1 = smtfunction.extract(ret_val_new, 0, offset)
lower_expr_2 = smtfunction.extract(ret_val_old, 0, offset)
parent_reg_constrs += [lower_expr_1 == lower_expr_2]
upper_expr_1 = smtfunction.extract(ret_val_new, offset + operand.size, var_size - offset - operand.size)
upper_expr_2 = smtfunction.extract(ret_val_old, offset + operand.size, var_size - offset - operand.size)
parent_reg_constrs += [upper_expr_1 == upper_expr_2]
elif offset == 0:
upper_expr_1 = smtfunction.extract(ret_val_new, offset + operand.size, var_size - offset - operand.size)
upper_expr_2 = smtfunction.extract(ret_val_old, offset + operand.size, var_size - offset - operand.size)
parent_reg_constrs += [upper_expr_1 == upper_expr_2]
elif offset == var_size-1:
lower_expr_1 = smtfunction.extract(ret_val_new, 0, offset)
lower_expr_2 = smtfunction.extract(ret_val_old, 0, offset)
parent_reg_constrs += [lower_expr_1 == lower_expr_2]
else:
var_name_new = self._get_var_name(operand.name, fresh=True)
ret_val = self.make_bitvec(operand.size, var_name_new)
return ret_val, parent_reg_constrs | Translate destination register operand to SMT expr. |
def p_ident_parts(self, p):
""" ident_parts : ident_part
| selector
| filter_group
"""
if not isinstance(p[1], list):
p[1] = [p[1]]
p[0] = p[1] | ident_parts : ident_part
| selector
| filter_group |
def dropEvent(self, event: QDropEvent):
items = [item for item in self.items(event.scenePos()) if isinstance(item, GraphicsItem) and item.acceptDrops()]
item = None if len(items) == 0 else items[0]
if len(event.mimeData().urls()) > 0:
self.files_dropped.emit(event.mimeData().urls())
indexes = list(event.mimeData().text().split("/")[:-1])
group_nodes = []
file_nodes = []
for index in indexes:
try:
row, column, parent = map(int, index.split(","))
if parent == -1:
parent = self.tree_root_item
else:
parent = self.tree_root_item.child(parent)
node = parent.child(row)
if node.is_group:
group_nodes.append(node)
else:
file_nodes.append(node)
except ValueError:
continue
# Which Nodes to add?
nodes_to_add = []
""":type: list of ProtocolTreeItem """
for group_node in group_nodes:
nodes_to_add.extend(group_node.children)
nodes_to_add.extend([file_node for file_node in file_nodes if file_node not in nodes_to_add])
protocols_to_add = [node.protocol for node in nodes_to_add]
ref_item = item
position = None if ref_item is None else item.drop_indicator_position
self.add_protocols(ref_item, position, protocols_to_add)
super().dropEvent(event) | :type: list of ProtocolTreeItem |
def upload_resumable(self, fd, filesize, filehash, unit_hash, unit_id,
unit_size, quick_key=None, action_on_duplicate=None,
mtime=None, version_control=None, folder_key=None,
filedrop_key=None, path=None, previous_hash=None):
"""upload/resumable
http://www.mediafire.com/developers/core_api/1.3/upload/#resumable
"""
action = 'upload/resumable'
headers = {
'x-filesize': str(filesize),
'x-filehash': filehash,
'x-unit-hash': unit_hash,
'x-unit-id': str(unit_id),
'x-unit-size': str(unit_size)
}
params = QueryParams({
'quick_key': quick_key,
'action_on_duplicate': action_on_duplicate,
'mtime': mtime,
'version_control': version_control,
'folder_key': folder_key,
'filedrop_key': filedrop_key,
'path': path,
'previous_hash': previous_hash
})
upload_info = {
"fd": fd,
"filename": "chunk"
}
return self.request(action, params, action_token_type="upload",
upload_info=upload_info, headers=headers) | upload/resumable
http://www.mediafire.com/developers/core_api/1.3/upload/#resumable |
def attend_fight(self, mappings, node_ip, predictions, ntp):
"""
This function is for starting and managing a fight
once the details are known. It also handles the
task of returning any valid connections (if any) that
may be returned from threads in the simultaneous_fight function.
"""
# Bind listen server socket.
mappings = self.add_listen_sock(mappings)
log.debug(mappings)
# Walk to fight.
self.simultaneous_cons = []
predictions = predictions.split(" ")
self.simultaneous_fight(mappings, node_ip, predictions, ntp)
# Return hole made in opponent.
if len(self.simultaneous_cons):
"""
There may be a problem here. I noticed that when these lines
were removed during testing that connections tended to
succeed more. There may be a lack of synchronization between
the timing for connections to succeed so that a close on
one side of the fight ends up ruining valid connections on
this side. Will need to test more.
Notes: the UNL synchronization code could actually fix
this (potential) problem as a cool unintended side-effect.
"""
# Close unneeded holes.
"""
for i in range(1, len(self.simultaneous_cons)):
try:
print("Closing unneeded hole")
#self.simultaneous_cons[i].s.close()
except:
pass
"""
try:
# Return open hole.
return self.simultaneous_cons[0]
except:
# Try accept a connection.
log.debug("No holes found")
for mapping in mappings:
# Check if there's a new con.
s = mapping["listen"]
r, w, e = select.select(
[s],
[],
[],
0
)
# Find socket.
for found_sock in r:
# Not us.
if found_sock != s:
continue
# Accept a new con from the listen queue.
log.debug("Accept logic works!")
client, address = s.accept()
con = Sock(blocking=0)
con.set_sock(client)
return con
return None | This function is for starting and managing a fight
once the details are known. It also handles the
task of returning any valid connections (if any) that
may be returned from threads in the simultaneous_fight function. |
def analyze(self, mode=None, timesteps=None):
"""Analyzes the grid by power flow analysis
Analyze the grid for violations of hosting capacity. Means, perform a
power flow analysis and obtain voltages at nodes (load, generator,
stations/transformers and branch tees) and active/reactive power at
lines.
The power flow analysis can currently only be performed for both grid
levels MV and LV. See ToDos section for more information.
A static `non-linear power flow analysis is performed using PyPSA
<https://www.pypsa.org/doc/power_flow.html#full-non-linear-power-flow>`_.
The high-voltage to medium-voltage transformer are not included in the
analysis. The slack bus is defined at secondary side of these
transformers assuming an ideal tap changer. Hence, potential
overloading of the transformers is not studied here.
Parameters
----------
mode : str
Allows to toggle between power flow analysis (PFA) on the whole
grid topology (MV + LV), only MV or only LV. Defaults to None which
equals power flow analysis for MV + LV which is the only
implemented option at the moment. See ToDos section for
more information.
timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>`
Timesteps specifies for which time steps to conduct the power flow
analysis. It defaults to None in which case the time steps in
timeseries.timeindex (see :class:`~.grid.network.TimeSeries`) are
used.
Notes
-----
The current implementation always translates the grid topology
representation to the PyPSA format and stores it to
:attr:`self.network.pypsa`.
ToDos
------
The option to export only the edisgo MV grid (mode = 'mv') to conduct
a power flow analysis is implemented in
:func:`~.tools.pypsa_io.to_pypsa` but NotImplementedError is raised
since the rest of edisgo does not handle this option yet. The analyze
function will throw an error since
:func:`~.tools.pypsa_io.process_pfa_results`
does not handle aggregated loads and generators in the LV grids. Also,
grid reinforcement, pypsa update of time series, and probably other
functionalities do not work when only the MV grid is analysed.
Further ToDos are:
* explain how power plants are modeled, if possible use a link
* explain where to find and adjust power flow analysis defining
parameters
See Also
--------
:func:`~.tools.pypsa_io.to_pypsa`
Translator to PyPSA data format
"""
if timesteps is None:
timesteps = self.network.timeseries.timeindex
# check if timesteps is array-like, otherwise convert to list
if not hasattr(timesteps, "__len__"):
timesteps = [timesteps]
if self.network.pypsa is None:
# Translate eDisGo grid topology representation to PyPSA format
self.network.pypsa = pypsa_io.to_pypsa(
self.network, mode, timesteps)
else:
if self.network.pypsa.edisgo_mode is not mode:
# Translate eDisGo grid topology representation to PyPSA format
self.network.pypsa = pypsa_io.to_pypsa(
self.network, mode, timesteps)
# check if all timesteps are in pypsa.snapshots, if not update time
# series
if False in [True if _ in self.network.pypsa.snapshots else False
for _ in timesteps]:
pypsa_io.update_pypsa_timeseries(self.network, timesteps=timesteps)
# run power flow analysis
pf_results = self.network.pypsa.pf(timesteps)
if all(pf_results['converged']['0'].tolist()):
pypsa_io.process_pfa_results(
self.network, self.network.pypsa, timesteps)
else:
raise ValueError("Power flow analysis did not converge.") | Analyzes the grid by power flow analysis
Analyze the grid for violations of hosting capacity. Means, perform a
power flow analysis and obtain voltages at nodes (load, generator,
stations/transformers and branch tees) and active/reactive power at
lines.
The power flow analysis can currently only be performed for both grid
levels MV and LV. See ToDos section for more information.
A static `non-linear power flow analysis is performed using PyPSA
<https://www.pypsa.org/doc/power_flow.html#full-non-linear-power-flow>`_.
The high-voltage to medium-voltage transformer are not included in the
analysis. The slack bus is defined at secondary side of these
transformers assuming an ideal tap changer. Hence, potential
overloading of the transformers is not studied here.
Parameters
----------
mode : str
Allows to toggle between power flow analysis (PFA) on the whole
grid topology (MV + LV), only MV or only LV. Defaults to None which
equals power flow analysis for MV + LV which is the only
implemented option at the moment. See ToDos section for
more information.
timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>`
Timesteps specifies for which time steps to conduct the power flow
analysis. It defaults to None in which case the time steps in
timeseries.timeindex (see :class:`~.grid.network.TimeSeries`) are
used.
Notes
-----
The current implementation always translates the grid topology
representation to the PyPSA format and stores it to
:attr:`self.network.pypsa`.
ToDos
------
The option to export only the edisgo MV grid (mode = 'mv') to conduct
a power flow analysis is implemented in
:func:`~.tools.pypsa_io.to_pypsa` but NotImplementedError is raised
since the rest of edisgo does not handle this option yet. The analyze
function will throw an error since
:func:`~.tools.pypsa_io.process_pfa_results`
does not handle aggregated loads and generators in the LV grids. Also,
grid reinforcement, pypsa update of time series, and probably other
functionalities do not work when only the MV grid is analysed.
Further ToDos are:
* explain how power plants are modeled, if possible use a link
* explain where to find and adjust power flow analysis defining
parameters
See Also
--------
:func:`~.tools.pypsa_io.to_pypsa`
Translator to PyPSA data format |
def reload(self, **kwargs):
"""Reload the document"""
frame = self.one({'_id': self._id}, **kwargs)
self._document = frame._document | Reload the document |
def _decrypt_data_key(self, encrypted_data_key, algorithm, encryption_context=None):
"""Decrypts an encrypted data key and returns the plaintext.
:param data_key: Encrypted data key
:type data_key: aws_encryption_sdk.structures.EncryptedDataKey
:type algorithm: `aws_encryption_sdk.identifiers.Algorithm` (not used for KMS)
:param dict encryption_context: Encryption context to use in decryption
:returns: Decrypted data key
:rtype: aws_encryption_sdk.structures.DataKey
:raises DecryptKeyError: if Master Key is unable to decrypt data key
"""
kms_params = {"CiphertextBlob": encrypted_data_key.encrypted_data_key}
if encryption_context:
kms_params["EncryptionContext"] = encryption_context
if self.config.grant_tokens:
kms_params["GrantTokens"] = self.config.grant_tokens
# Catch any boto3 errors and normalize to expected DecryptKeyError
try:
response = self.config.client.decrypt(**kms_params)
plaintext = response["Plaintext"]
except (ClientError, KeyError):
error_message = "Master Key {key_id} unable to decrypt data key".format(key_id=self._key_id)
_LOGGER.exception(error_message)
raise DecryptKeyError(error_message)
return DataKey(
key_provider=self.key_provider, data_key=plaintext, encrypted_data_key=encrypted_data_key.encrypted_data_key
) | Decrypts an encrypted data key and returns the plaintext.
:param data_key: Encrypted data key
:type data_key: aws_encryption_sdk.structures.EncryptedDataKey
:type algorithm: `aws_encryption_sdk.identifiers.Algorithm` (not used for KMS)
:param dict encryption_context: Encryption context to use in decryption
:returns: Decrypted data key
:rtype: aws_encryption_sdk.structures.DataKey
:raises DecryptKeyError: if Master Key is unable to decrypt data key |
def saveNetworkToFile(self, filename, makeWrapper = 1, mode = "pickle", counter = None):
"""
Deprecated.
"""
if "?" in filename: # replace ? pattern in filename with epoch number
import re
char = "?"
match = re.search(re.escape(char) + "+", filename)
if match:
num = self.epoch
if counter != None:
num = counter
elif self.totalEpoch != 0: # use a total epoch, if one:
num = self.totalEpoch
fstring = "%%0%dd" % len(match.group())
filename = filename[:match.start()] + \
fstring % num + \
filename[match.end():]
self.lastAutoSaveNetworkFilename = filename
if mode == "pickle":
# dump network via pickle:
import pickle
basename = filename.split('.')[0]
filename += ".pickle"
fp = open(filename, 'w')
pickle.dump(self, fp)
fp.close()
# make wrapper python file:
if makeWrapper:
fp = open(basename + ".py", "w")
fp.write("from pyrobot.brain.conx import *\n")
fp.write("import pickle\n")
fp.write("fp = open('%s', 'r')\n" % filename)
fp.write("network = pickle.load(fp)")
fp.close()
# give some help:
print("To load network:")
print(" %% python -i %s " % (basename + ".py"))
print(" >>> network.train() # for example")
print("--- OR ---")
print(" % python")
print(" >>> from pyrobot.brain.conx import *")
print(" >>> network = loadNetwork(%s)" % filename)
print(" >>> network.train() # for example")
elif mode in ["plain", "conx"]:
fp = open(filename, "w")
fp.write("network, %s\n" % (self.__class__.__name__))
for layer in self.layers:
fp.write("layer, %s, %s\n" % (layer.name, layer.size))
# biases:
for i in range(layer.size):
fp.write("%f " % layer.weight[i])
fp.write("\n")
for connection in self.connections:
fp.write("connection, %s, %s\n" %(connection.fromLayer.name, connection.toLayer.name))
# weights:
for i in range(connection.fromLayer.size):
for j in range(connection.toLayer.size):
fp.write("%f " % connection.weight[i][j])
fp.write("\n")
fp.close() | Deprecated. |
def StyleFactory(style_elm):
"""
Return a style object of the appropriate |BaseStyle| subclass, according
to the type of *style_elm*.
"""
style_cls = {
WD_STYLE_TYPE.PARAGRAPH: _ParagraphStyle,
WD_STYLE_TYPE.CHARACTER: _CharacterStyle,
WD_STYLE_TYPE.TABLE: _TableStyle,
WD_STYLE_TYPE.LIST: _NumberingStyle
}[style_elm.type]
return style_cls(style_elm) | Return a style object of the appropriate |BaseStyle| subclass, according
to the type of *style_elm*. |
def __QueryFeed(self,
path,
type,
id,
result_fn,
create_fn,
query,
options=None,
partition_key_range_id=None):
"""Query for more than one Azure Cosmos resources.
:param str path:
:param str type:
:param str id:
:param function result_fn:
:param function create_fn:
:param (str or dict) query:
:param dict options:
The request options for the request.
:param str partition_key_range_id:
Specifies partition key range id.
:rtype:
list
:raises SystemError: If the query compatibility mode is undefined.
"""
if options is None:
options = {}
if query:
__GetBodiesFromQueryResult = result_fn
else:
def __GetBodiesFromQueryResult(result):
if result is not None:
return [create_fn(self, body) for body in result_fn(result)]
else:
# If there is no change feed, the result data is empty and result is None.
# This case should be interpreted as an empty array.
return []
initial_headers = self.default_headers.copy()
# Copy to make sure that default_headers won't be changed.
if query is None:
# Query operations will use ReadEndpoint even though it uses GET(for feed requests)
request = request_object._RequestObject(type, documents._OperationType.ReadFeed)
headers = base.GetHeaders(self,
initial_headers,
'get',
path,
id,
type,
options,
partition_key_range_id)
result, self.last_response_headers = self.__Get(path,
request,
headers)
return __GetBodiesFromQueryResult(result)
else:
query = self.__CheckAndUnifyQueryFormat(query)
initial_headers[http_constants.HttpHeaders.IsQuery] = 'true'
if (self._query_compatibility_mode == CosmosClient._QueryCompatibilityMode.Default or
self._query_compatibility_mode == CosmosClient._QueryCompatibilityMode.Query):
initial_headers[http_constants.HttpHeaders.ContentType] = runtime_constants.MediaTypes.QueryJson
elif self._query_compatibility_mode == CosmosClient._QueryCompatibilityMode.SqlQuery:
initial_headers[http_constants.HttpHeaders.ContentType] = runtime_constants.MediaTypes.SQL
else:
raise SystemError('Unexpected query compatibility mode.')
# Query operations will use ReadEndpoint even though it uses POST(for regular query operations)
request = request_object._RequestObject(type, documents._OperationType.SqlQuery)
headers = base.GetHeaders(self,
initial_headers,
'post',
path,
id,
type,
options,
partition_key_range_id)
result, self.last_response_headers = self.__Post(path,
request,
query,
headers)
return __GetBodiesFromQueryResult(result) | Query for more than one Azure Cosmos resources.
:param str path:
:param str type:
:param str id:
:param function result_fn:
:param function create_fn:
:param (str or dict) query:
:param dict options:
The request options for the request.
:param str partition_key_range_id:
Specifies partition key range id.
:rtype:
list
:raises SystemError: If the query compatibility mode is undefined. |
def toxml(self):
"""
Exports this object into a LEMS XML object
"""
return '<Constant' +\
(' name = "{0}"'.format(self.name) if self.name else '') +\
(' symbol = "{0}"'.format(self.symbol) if self.symbol else '') +\
(' value = "{0}"'.format(self.value) if self.value else '') +\
(' dimension = "{0}"'.format(self.dimension) if self.dimension else '') +\
(' description = "{0}"'.format(self.description) if self.description else '') +\
'/>' | Exports this object into a LEMS XML object |
def authenticate_direct_credentials(self, username, password):
"""
Performs a direct bind, however using direct credentials. Can be used
if interfacing with an Active Directory domain controller which
authenticates using [email protected] directly.
Performing this kind of lookup limits the information we can get from
ldap. Instead we can only deduce whether or not their bind was
successful. Do not use this method if you require more user info.
Args:
username (str): Username for the user to bind with.
LDAP_BIND_DIRECT_PREFIX will be prepended and
LDAP_BIND_DIRECT_SUFFIX will be appended.
password (str): User's password to bind with.
Returns:
AuthenticationResponse
"""
bind_user = '{}{}{}'.format(
self.config.get('LDAP_BIND_DIRECT_PREFIX'),
username,
self.config.get('LDAP_BIND_DIRECT_SUFFIX')
)
connection = self._make_connection(
bind_user=bind_user,
bind_password=password,
)
response = AuthenticationResponse()
try:
connection.bind()
response.status = AuthenticationResponseStatus.success
response.user_id = username
log.debug(
"Authentication was successful for user '{0}'".format(username))
if self.config.get('LDAP_BIND_DIRECT_GET_USER_INFO'):
# User wants extra info about the bind
user_filter = '({search_attr}={username})'.format(
search_attr=self.config.get('LDAP_USER_LOGIN_ATTR'),
username=username
)
search_filter = '(&{0}{1})'.format(
self.config.get('LDAP_USER_OBJECT_FILTER'),
user_filter,
)
connection.search(
search_base=self.full_user_search_dn,
search_filter=search_filter,
search_scope=getattr(
ldap3, self.config.get('LDAP_USER_SEARCH_SCOPE')),
attributes=self.config.get('LDAP_GET_USER_ATTRIBUTES'),
)
if len(connection.response) == 0 or \
(self.config.get('LDAP_FAIL_AUTH_ON_MULTIPLE_FOUND') and
len(connection.response) > 1):
# Don't allow them to log in.
log.error(
"Could not gather extra info for user '{0}'".format(username))
else:
user = connection.response[0]
user['attributes']['dn'] = user['dn']
response.user_info = user['attributes']
response.user_dn = user['dn']
except ldap3.core.exceptions.LDAPInvalidCredentialsResult:
log.debug(
"Authentication was not successful for user '{0}'".format(username))
response.status = AuthenticationResponseStatus.fail
except Exception as e:
log.error(e)
response.status = AuthenticationResponseStatus.fail
self.destroy_connection(connection)
return response | Performs a direct bind, however using direct credentials. Can be used
if interfacing with an Active Directory domain controller which
authenticates using [email protected] directly.
Performing this kind of lookup limits the information we can get from
ldap. Instead we can only deduce whether or not their bind was
successful. Do not use this method if you require more user info.
Args:
username (str): Username for the user to bind with.
LDAP_BIND_DIRECT_PREFIX will be prepended and
LDAP_BIND_DIRECT_SUFFIX will be appended.
password (str): User's password to bind with.
Returns:
AuthenticationResponse |
def describe_instance_health(self, load_balancer_name, instances=None):
"""
Get current state of all Instances registered to an Load Balancer.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type instances: List of strings
:param instances: The instance ID's of the EC2 instances
to return status for. If not provided,
the state of all instances will be returned.
:rtype: List of :class:`boto.ec2.elb.instancestate.InstanceState`
:return: list of state info for instances in this Load Balancer.
"""
params = {'LoadBalancerName' : load_balancer_name}
if instances:
self.build_list_params(params, instances,
'Instances.member.%d.InstanceId')
return self.get_list('DescribeInstanceHealth', params,
[('member', InstanceState)]) | Get current state of all Instances registered to an Load Balancer.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type instances: List of strings
:param instances: The instance ID's of the EC2 instances
to return status for. If not provided,
the state of all instances will be returned.
:rtype: List of :class:`boto.ec2.elb.instancestate.InstanceState`
:return: list of state info for instances in this Load Balancer. |
def decr(name, value=1, rate=1, tags=None):
"""Decrement a metric by value.
>>> import statsdecor
>>> statsdecor.decr('my.metric')
"""
client().decr(name, value, rate, tags) | Decrement a metric by value.
>>> import statsdecor
>>> statsdecor.decr('my.metric') |
def available(name):
'''
.. versionadded:: 2014.7.0
Returns ``True`` if the specified service is available, otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt '*' service.available sshd
'''
path = '/etc/rc.d/{0}'.format(name)
return os.path.isfile(path) and os.access(path, os.X_OK) | .. versionadded:: 2014.7.0
Returns ``True`` if the specified service is available, otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt '*' service.available sshd |
def start_capture(self, port_number, output_file, data_link_type="DLT_EN10MB"):
"""
Starts a packet capture.
:param port_number: allocated port number
:param output_file: PCAP destination file for the capture
:param data_link_type: PCAP data link type (DLT_*), default is DLT_EN10MB
"""
if port_number not in self._mappings:
raise DynamipsError("Port {} is not allocated".format(port_number))
nio = self._mappings[port_number]
data_link_type = data_link_type.lower()
if data_link_type.startswith("dlt_"):
data_link_type = data_link_type[4:]
if nio.input_filter[0] is not None and nio.output_filter[0] is not None:
raise DynamipsError("Port {} has already a filter applied".format(port_number))
yield from nio.bind_filter("both", "capture")
yield from nio.setup_filter("both", '{} "{}"'.format(data_link_type, output_file))
log.info('Ethernet hub "{name}" [{id}]: starting packet capture on port {port}'.format(name=self._name,
id=self._id,
port=port_number)) | Starts a packet capture.
:param port_number: allocated port number
:param output_file: PCAP destination file for the capture
:param data_link_type: PCAP data link type (DLT_*), default is DLT_EN10MB |
def _Execute(self, options):
"""Handles security groups operations."""
whitelist = dict(
name=options["name"],
description=options.get("description", "<empty>"))
return self._agent.client.compute.security_groups.create(**whitelist) | Handles security groups operations. |
def union(self, other):
"""
Makes a striplog of all unions.
Args:
Striplog. The striplog instance to union with.
Returns:
Striplog. The result of the union.
"""
if not isinstance(other, self.__class__):
m = "You can only union striplogs with each other."
raise StriplogError(m)
result = []
for iv in deepcopy(self):
for jv in other:
if iv.any_overlaps(jv):
iv = iv.union(jv)
result.append(iv)
return Striplog(result) | Makes a striplog of all unions.
Args:
Striplog. The striplog instance to union with.
Returns:
Striplog. The result of the union. |
def createSystem(self, topology, nonbondedMethod=NoCutoff,
nonbondedCutoff=1.0 * u.nanometer, constraints=None,
rigidWater=True, removeCMMotion=True, hydrogenMass=None,
**args):
"""Construct an OpenMM System representing a Topology with this force field.
Parameters
----------
topology : Topology
The Topology for which to create a System
nonbondedMethod : object=NoCutoff
The method to use for nonbonded interactions. Allowed values are
NoCutoff, CutoffNonPeriodic, CutoffPeriodic, Ewald, or PME.
nonbondedCutoff : distance=1*nanometer
The cutoff distance to use for nonbonded interactions
constraints : object=None
Specifies which bonds and angles should be implemented with constraints.
Allowed values are None, HBonds, AllBonds, or HAngles.
rigidWater : boolean=True
If true, water molecules will be fully rigid regardless of the value
passed for the constraints argument
removeCMMotion : boolean=True
If true, a CMMotionRemover will be added to the System
hydrogenMass : mass=None
The mass to use for hydrogen atoms bound to heavy atoms. Any mass
added to a hydrogen is subtracted from the heavy atom to keep
their total mass the same.
args
Arbitrary additional keyword arguments may also be specified.
This allows extra parameters to be specified that are specific to
particular force fields.
Returns
-------
system
the newly created System
"""
# Overwrite previous _SystemData object
self._SystemData = app.ForceField._SystemData()
data = self._SystemData
data.atoms = list(topology.atoms())
for atom in data.atoms:
data.excludeAtomWith.append([])
# Make a list of all bonds
for bond in topology.bonds():
data.bonds.append(app.ForceField._BondData(bond[0].index, bond[1].index))
# Record which atoms are bonded to each other atom
bonded_to_atom = []
for i in range(len(data.atoms)):
bonded_to_atom.append(set())
data.atomBonds.append([])
for i in range(len(data.bonds)):
bond = data.bonds[i]
bonded_to_atom[bond.atom1].add(bond.atom2)
bonded_to_atom[bond.atom2].add(bond.atom1)
data.atomBonds[bond.atom1].append(i)
data.atomBonds[bond.atom2].append(i)
# TODO: Better way to lookup nonbonded parameters...?
nonbonded_params = None
for generator in self.getGenerators():
if isinstance(generator, NonbondedGenerator):
nonbonded_params = generator.params.paramsForType
break
for chain in topology.chains():
for res in chain.residues():
for atom in res.atoms():
data.atomType[atom] = atom.id
if nonbonded_params:
params = nonbonded_params[atom.id]
data.atomParameters[atom] = params
# Create the System and add atoms
sys = mm.System()
for atom in topology.atoms():
# Look up the atom type name, returning a helpful error message if it cannot be found.
if atom not in data.atomType:
raise Exception("Could not identify atom type for atom '%s'." % str(atom))
typename = data.atomType[atom]
# Look up the type name in the list of registered atom types, returning a helpful error message if it cannot be found.
if typename not in self._atomTypes:
msg = "Could not find typename '%s' for atom '%s' in list of known atom types.\n" % (typename, str(atom))
msg += "Known atom types are: %s" % str(self._atomTypes.keys())
raise Exception(msg)
# Add the particle to the OpenMM system.
mass = self._atomTypes[typename].mass
sys.addParticle(mass)
# Adjust hydrogen masses if requested.
if hydrogenMass is not None:
if not u.is_quantity(hydrogenMass):
hydrogenMass *= u.dalton
for atom1, atom2 in topology.bonds():
if atom1.element == elem.hydrogen:
(atom1, atom2) = (atom2, atom1)
if atom2.element == elem.hydrogen and atom1.element not in (elem.hydrogen, None):
transfer_mass = hydrogenMass - sys.getParticleMass(atom2.index)
sys.setParticleMass(atom2.index, hydrogenMass)
mass = sys.getParticleMass(atom1.index) - transfer_mass
sys.setParticleMass(atom1.index, mass)
# Set periodic boundary conditions.
box_vectors = topology.getPeriodicBoxVectors()
if box_vectors is not None:
sys.setDefaultPeriodicBoxVectors(box_vectors[0],
box_vectors[1],
box_vectors[2])
elif nonbondedMethod not in [NoCutoff, CutoffNonPeriodic]:
raise ValueError('Requested periodic boundary conditions for a '
'Topology that does not specify periodic box '
'dimensions')
# Make a list of all unique angles
unique_angles = set()
for bond in data.bonds:
for atom in bonded_to_atom[bond.atom1]:
if atom != bond.atom2:
if atom < bond.atom2:
unique_angles.add((atom, bond.atom1, bond.atom2))
else:
unique_angles.add((bond.atom2, bond.atom1, atom))
for atom in bonded_to_atom[bond.atom2]:
if atom != bond.atom1:
if atom > bond.atom1:
unique_angles.add((bond.atom1, bond.atom2, atom))
else:
unique_angles.add((atom, bond.atom2, bond.atom1))
data.angles = sorted(list(unique_angles))
# Make a list of all unique proper torsions
unique_propers = set()
for angle in data.angles:
for atom in bonded_to_atom[angle[0]]:
if atom not in angle:
if atom < angle[2]:
unique_propers.add((atom, angle[0], angle[1], angle[2]))
else:
unique_propers.add((angle[2], angle[1], angle[0], atom))
for atom in bonded_to_atom[angle[2]]:
if atom not in angle:
if atom > angle[0]:
unique_propers.add((angle[0], angle[1], angle[2], atom))
else:
unique_propers.add((atom, angle[2], angle[1], angle[0]))
data.propers = sorted(list(unique_propers))
# Make a list of all unique improper torsions
for atom in range(len(bonded_to_atom)):
bonded_to = bonded_to_atom[atom]
if len(bonded_to) > 2:
for subset in itertools.combinations(bonded_to, 3):
data.impropers.append((atom, subset[0], subset[1], subset[2]))
# Identify bonds that should be implemented with constraints
if constraints == AllBonds or constraints == HAngles:
for bond in data.bonds:
bond.isConstrained = True
elif constraints == HBonds:
for bond in data.bonds:
atom1 = data.atoms[bond.atom1]
atom2 = data.atoms[bond.atom2]
bond.isConstrained = atom1.name.startswith('H') or atom2.name.startswith('H')
if rigidWater:
for bond in data.bonds:
atom1 = data.atoms[bond.atom1]
atom2 = data.atoms[bond.atom2]
if atom1.residue.name == 'HOH' and atom2.residue.name == 'HOH':
bond.isConstrained = True
# Identify angles that should be implemented with constraints
if constraints == HAngles:
for angle in data.angles:
atom1 = data.atoms[angle[0]]
atom2 = data.atoms[angle[1]]
atom3 = data.atoms[angle[2]]
numH = 0
if atom1.name.startswith('H'):
numH += 1
if atom3.name.startswith('H'):
numH += 1
data.isAngleConstrained.append(numH == 2 or (numH == 1 and atom2.name.startswith('O')))
else:
data.isAngleConstrained = len(data.angles)*[False]
if rigidWater:
for i in range(len(data.angles)):
angle = data.angles[i]
atom1 = data.atoms[angle[0]]
atom2 = data.atoms[angle[1]]
atom3 = data.atoms[angle[2]]
if atom1.residue.name == 'HOH' and atom2.residue.name == 'HOH' and atom3.residue.name == 'HOH':
data.isAngleConstrained[i] = True
# Add virtual sites
for atom in data.virtualSites:
(site, atoms, excludeWith) = data.virtualSites[atom]
index = atom.index
data.excludeAtomWith[excludeWith].append(index)
if site.type == 'average2':
sys.setVirtualSite(index, mm.TwoParticleAverageSite(
atoms[0], atoms[1], site.weights[0], site.weights[1]))
elif site.type == 'average3':
sys.setVirtualSite(index, mm.ThreeParticleAverageSite(
atoms[0], atoms[1], atoms[2],
site.weights[0], site.weights[1], site.weights[2]))
elif site.type == 'outOfPlane':
sys.setVirtualSite(index, mm.OutOfPlaneSite(
atoms[0], atoms[1], atoms[2],
site.weights[0], site.weights[1], site.weights[2]))
elif site.type == 'localCoords':
local_coord_site = mm.LocalCoordinatesSite(
atoms[0], atoms[1], atoms[2],
mm.Vec3(site.originWeights[0], site.originWeights[1], site.originWeights[2]),
mm.Vec3(site.xWeights[0], site.xWeights[1], site.xWeights[2]),
mm.Vec3(site.yWeights[0], site.yWeights[1], site.yWeights[2]),
mm.Vec3(site.localPos[0], site.localPos[1], site.localPos[2]))
sys.setVirtualSite(index, local_coord_site)
# Add forces to the System
for force in self._forces:
force.createForce(sys, data, nonbondedMethod, nonbondedCutoff, args)
if removeCMMotion:
sys.addForce(mm.CMMotionRemover())
# Let force generators do postprocessing
for force in self._forces:
if 'postprocessSystem' in dir(force):
force.postprocessSystem(sys, data, args)
# Execute scripts found in the XML files.
for script in self._scripts:
exec(script, locals())
return sys | Construct an OpenMM System representing a Topology with this force field.
Parameters
----------
topology : Topology
The Topology for which to create a System
nonbondedMethod : object=NoCutoff
The method to use for nonbonded interactions. Allowed values are
NoCutoff, CutoffNonPeriodic, CutoffPeriodic, Ewald, or PME.
nonbondedCutoff : distance=1*nanometer
The cutoff distance to use for nonbonded interactions
constraints : object=None
Specifies which bonds and angles should be implemented with constraints.
Allowed values are None, HBonds, AllBonds, or HAngles.
rigidWater : boolean=True
If true, water molecules will be fully rigid regardless of the value
passed for the constraints argument
removeCMMotion : boolean=True
If true, a CMMotionRemover will be added to the System
hydrogenMass : mass=None
The mass to use for hydrogen atoms bound to heavy atoms. Any mass
added to a hydrogen is subtracted from the heavy atom to keep
their total mass the same.
args
Arbitrary additional keyword arguments may also be specified.
This allows extra parameters to be specified that are specific to
particular force fields.
Returns
-------
system
the newly created System |
def select_key(self, key, bucket_name=None,
expression='SELECT * FROM S3Object',
expression_type='SQL',
input_serialization=None,
output_serialization=None):
"""
Reads a key with S3 Select.
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which the file is stored
:type bucket_name: str
:param expression: S3 Select expression
:type expression: str
:param expression_type: S3 Select expression type
:type expression_type: str
:param input_serialization: S3 Select input data serialization format
:type input_serialization: dict
:param output_serialization: S3 Select output data serialization format
:type output_serialization: dict
:return: retrieved subset of original data by S3 Select
:rtype: str
.. seealso::
For more details about S3 Select parameters:
http://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.select_object_content
"""
if input_serialization is None:
input_serialization = {'CSV': {}}
if output_serialization is None:
output_serialization = {'CSV': {}}
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
response = self.get_conn().select_object_content(
Bucket=bucket_name,
Key=key,
Expression=expression,
ExpressionType=expression_type,
InputSerialization=input_serialization,
OutputSerialization=output_serialization)
return ''.join(event['Records']['Payload'].decode('utf-8')
for event in response['Payload']
if 'Records' in event) | Reads a key with S3 Select.
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which the file is stored
:type bucket_name: str
:param expression: S3 Select expression
:type expression: str
:param expression_type: S3 Select expression type
:type expression_type: str
:param input_serialization: S3 Select input data serialization format
:type input_serialization: dict
:param output_serialization: S3 Select output data serialization format
:type output_serialization: dict
:return: retrieved subset of original data by S3 Select
:rtype: str
.. seealso::
For more details about S3 Select parameters:
http://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.select_object_content |
def paste_to_current_cell(self, tl_key, data, freq=None):
"""Pastes data into grid from top left cell tl_key
Parameters
----------
ul_key: Tuple
\key of top left cell of paste area
data: iterable of iterables where inner iterable returns string
\tThe outer iterable represents rows
freq: Integer, defaults to None
\tStatus message frequency
"""
self.pasting = True
grid_rows, grid_cols, __ = self.grid.code_array.shape
self.need_abort = False
tl_row, tl_col, tl_tab = self._get_full_key(tl_key)
row_overflow = False
col_overflow = False
no_pasted_cells = 0
for src_row, row_data in enumerate(data):
target_row = tl_row + src_row
if self.grid.actions._is_aborted(src_row, _("Pasting cells... "),
freq=freq):
self._abort_paste()
return False
# Check if rows fit into grid
if target_row >= grid_rows:
row_overflow = True
break
for src_col, cell_data in enumerate(row_data):
target_col = tl_col + src_col
if target_col >= grid_cols:
col_overflow = True
break
if cell_data is not None:
# Is only None if pasting into selection
key = target_row, target_col, tl_tab
try:
CellActions.set_code(self, key, cell_data)
no_pasted_cells += 1
except KeyError:
pass
if row_overflow or col_overflow:
self._show_final_overflow_message(row_overflow, col_overflow)
else:
self._show_final_paste_message(tl_key, no_pasted_cells)
self.pasting = False | Pastes data into grid from top left cell tl_key
Parameters
----------
ul_key: Tuple
\key of top left cell of paste area
data: iterable of iterables where inner iterable returns string
\tThe outer iterable represents rows
freq: Integer, defaults to None
\tStatus message frequency |
def cli(env, sortby, columns, datacenter, username, storage_type):
"""List file storage."""
file_manager = SoftLayer.FileStorageManager(env.client)
file_volumes = file_manager.list_file_volumes(datacenter=datacenter,
username=username,
storage_type=storage_type,
mask=columns.mask())
table = formatting.Table(columns.columns)
table.sortby = sortby
for file_volume in file_volumes:
table.add_row([value or formatting.blank()
for value in columns.row(file_volume)])
env.fout(table) | List file storage. |
Subsets and Splits