Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
1,600 | def datetime_to_timestamp(dt):
epoch = datetime.utcfromtimestamp(0).replace(tzinfo=UTC)
return (dt - epoch).total_seconds() | Convert timezone-aware `datetime` to POSIX timestamp and
return seconds since UNIX epoch.
Note: similar to `datetime.timestamp()` in Python 3.3+. |
1,601 | def intersects(self, geometry, crs=None):
if crs:
geometry = dict(geometry)
geometry[] = {: , : {: crs}}
return Filter({self._name: {: {: geometry}}}) | Select geometries that intersect with a GeoJSON geometry.
Geospatial operator: {$geoIntersects: {...}}
Documentation: https://docs.mongodb.com/manual/reference/operator/query/geoIntersects/#op._S_geoIntersects
{
$geoIntersects: { $geometry: <geometry; a GeoJSON object> }
} |
1,602 | def one_of(inners, arg):
for inner in inners:
with suppress(com.IbisTypeError, ValueError):
return inner(arg)
rules_formatted = .join(map(repr, inners))
raise com.IbisTypeError(
.format(rules_formatted)
) | At least one of the inner validators must pass |
1,603 | def _param_grad_helper(self,dL_dK,X,X2,target):
AX = np.dot(X,self.transform)
if X2 is None:
X2 = X
ZX2 = AX
else:
AX2 = np.dot(X2, self.transform)
self.k._param_grad_helper(dL_dK,X,X2,target)
self.k._param_grad_helper(dL_dK,AX,X2,target)
self.k._param_grad_helper(dL_dK,X,AX2,target)
self.k._param_grad_helper(dL_dK,AX,AX2,target) | derivative of the covariance matrix with respect to the parameters. |
1,604 | def clean_up_dangling_images(self):
cargoes = Image.all(client=self._client_session, filters={: True})
for id, cargo in six.iteritems(cargoes):
logger.info("Removing dangling image: {0}".format(id))
cargo.delete() | Clean up all dangling images. |
1,605 | def addBiosample(self):
self._openRepo()
dataset = self._repo.getDatasetByName(self._args.datasetName)
biosample = bio_metadata.Biosample(
dataset, self._args.biosampleName)
biosample.populateFromJson(self._args.biosample)
self._updateRepo(self._repo.insertBiosample, biosample) | Adds a new biosample into this repo |
1,606 | def dict_match(d, key, default=None):
if key in d and "[" not in key:
return d[key]
else:
for pattern, value in iteritems(d):
if fnmatchcase(key, pattern):
return value
return default | Like __getitem__ but works as if the keys() are all filename patterns.
Returns the value of any dict key that matches the passed key.
Args:
d (dict): A dict with filename patterns as keys
key (str): A key potentially matching any of the keys
default (object): The object to return if no pattern matched the
passed in key
Returns:
object: The dict value where the dict key matched the passed in key.
Or default if there was no match. |
1,607 | def commented_out_code_lines(source):
line_numbers = []
try:
for t in generate_tokens(source):
token_type = t[0]
token_string = t[1]
start_row = t[2][0]
line = t[4]
if not line.lstrip().startswith():
continue
if token_type == tokenize.COMMENT:
stripped_line = token_string.lstrip().strip()
if (
in stripped_line and
not in stripped_line and
check_syntax(stripped_line)
):
line_numbers.append(start_row)
except (SyntaxError, tokenize.TokenError):
pass
return line_numbers | Return line numbers of comments that are likely code.
Commented-out code is bad practice, but modifying it just adds even
more clutter. |
1,608 | def _items_to_rela_paths(self, items):
paths = []
for item in items:
if isinstance(item, (BaseIndexEntry, (Blob, Submodule))):
paths.append(self._to_relative_path(item.path))
elif isinstance(item, string_types):
paths.append(self._to_relative_path(item))
else:
raise TypeError("Invalid item type: %r" % item)
return paths | Returns a list of repo-relative paths from the given items which
may be absolute or relative paths, entries or blobs |
1,609 | def autoscan():
for port in serial.tools.list_ports.comports():
if is_micropython_usb_device(port):
connect_serial(port[0]) | autoscan will check all of the serial ports to see if they have
a matching VID:PID for a MicroPython board. |
1,610 | def handle(cls, value, provider=None, **kwargs):
if provider is None:
raise ValueError()
d = deconstruct(value)
stack_fqn = d.stack_name
output = provider.get_output(stack_fqn, d.output_name)
return output | Fetch an output from the designated stack.
Args:
value (str): string with the following format:
<stack_name>::<output_name>, ie. some-stack::SomeOutput
provider (:class:`stacker.provider.base.BaseProvider`): subclass of
the base provider
Returns:
str: output from the specified stack |
1,611 | def pipe(self, target):
if callable(target):
sender, recver = self.hub.pipe()
self.downstream = sender
sender.upstream = self
@self.hub.spawn
def _():
try:
target(self, sender)
except vanilla.exception.Halt:
sender.close()
return recver
else:
return target.connect(self) | Pipes this Recver to *target*. *target* can either be `Sender`_ (or
`Pair`_) or a callable.
If *target* is a Sender, the two pairs are rewired so that sending on
this Recver's Sender will now be directed to the target's Recver::
sender1, recver1 = h.pipe()
sender2, recver2 = h.pipe()
recver1.pipe(sender2)
h.spawn(sender1.send, 'foo')
recver2.recv() # returns 'foo'
If *target* is a callable, a new `Pipe`_ will be created. This Recver
and the new Pipe's Sender are passed to the target callable to act as
upstream and downstream. The callable can then do any processing
desired including filtering, mapping and duplicating packets::
sender, recver = h.pipe()
def pipeline(upstream, downstream):
for i in upstream:
if i % 2:
downstream.send(i*2)
recver = recver.pipe(pipeline)
@h.spawn
def _():
for i in xrange(10):
sender.send(i)
recver.recv() # returns 2 (0 is filtered, so 1*2)
recver.recv() # returns 6 (2 is filtered, so 3*2) |
1,612 | def leaky_twice_relu6(x, alpha_low=0.2, alpha_high=0.2, name="leaky_relu6"):
if not isinstance(alpha_high, tf.Tensor) and not (0 < alpha_high <= 1):
raise ValueError("`alpha_high` value must be in [0, 1]`")
if not isinstance(alpha_low, tf.Tensor) and not (0 < alpha_low <= 1):
raise ValueError("`alpha_low` value must be in [0, 1]`")
with tf.name_scope(name, "leaky_twice_relu6") as name_scope:
x = tf.convert_to_tensor(x, name="features")
x_is_above_0 = tf.minimum(x, 6 * (1 - alpha_high) + alpha_high * x)
x_is_below_0 = tf.minimum(alpha_low * x, 0)
return tf.maximum(x_is_above_0, x_is_below_0, name=name_scope) | :func:`leaky_twice_relu6` can be used through its shortcut: :func:`:func:`tl.act.ltrelu6`.
This activation function is a modified version :func:`leaky_relu` introduced by the following paper:
`Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
This activation function also follows the behaviour of the activation function :func:`tf.nn.relu6` introduced by the following paper:
`Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__
This function push further the logic by adding `leaky` behaviour both below zero and above six.
The function return the following results:
- When x < 0: ``f(x) = alpha_low * x``.
- When x in [0, 6]: ``f(x) = x``.
- When x > 6: ``f(x) = 6 + (alpha_high * (x-6))``.
Parameters
----------
x : Tensor
Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``, ``int16``, or ``int8``.
alpha_low : float
Slope for x < 0: ``f(x) = alpha_low * x``.
alpha_high : float
Slope for x < 6: ``f(x) = 6 (alpha_high * (x-6))``.
name : str
The function name (optional).
Examples
--------
>>> import tensorlayer as tl
>>> net = tl.layers.DenseLayer(net, 100, act=lambda x : tl.act.leaky_twice_relu6(x, 0.2, 0.2), name='dense')
Returns
-------
Tensor
A ``Tensor`` in the same type as ``x``.
References
----------
- `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
- `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__ |
1,613 | def approx_min_num_components(nodes, negative_edges):
import utool as ut
num = 0
g_neg = nx.Graph()
g_neg.add_nodes_from(nodes)
g_neg.add_edges_from(negative_edges)
if nx.__version__.startswith():
deg0_nodes = [n for n, d in g_neg.degree() if d == 0]
else:
deg0_nodes = [n for n, d in g_neg.degree_iter() if d == 0]
for u, v in ut.itertwo(deg0_nodes):
nx_contracted_nodes(g_neg, v, u, inplace=True)
unused = list(g_neg.nodes())
g_pos = nx.complement(g_neg)
if False:
from networkx.algorithms.approximation import clique
maxiset, cliques = clique.clique_removal(g_pos)
num = len(cliques)
return num
while len(unused) > 0:
num += 1
idx1 = 0
n1 = unused[idx1]
unused.remove(n1)
neigbs = list(g_pos.neighbors(n1))
neigbs = ut.isect(neigbs, unused)
while len(neigbs) > 0:
idx2 = 0
n2 = neigbs[idx2]
unused.remove(n2)
g_neg = nx.contracted_nodes(g_neg, n1, n2)
g_pos = nx.complement(g_neg)
neigbs = list(g_pos.neighbors(n1))
neigbs = ut.isect(neigbs, unused)
print( % (num,))
return num | Find approximate minimum number of connected components possible
Each edge represents that two nodes must be separated
This code doesn't solve the problem. The problem is NP-complete and
reduces to minimum clique cover (MCC). This is only an approximate
solution. Not sure what the approximation ratio is.
CommandLine:
python -m utool.util_graph approx_min_num_components
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> nodes = [1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> edges = [(1, 2), (2, 3), (3, 1),
>>> (4, 5), (5, 6), (6, 4),
>>> (7, 8), (8, 9), (9, 7),
>>> (1, 4), (4, 7), (7, 1),
>>> ]
>>> g_pos = nx.Graph()
>>> g_pos.add_edges_from(edges)
>>> g_neg = nx.complement(g_pos)
>>> #import plottool as pt
>>> #pt.qt4ensure()
>>> #pt.show_nx(g_pos)
>>> #pt.show_nx(g_neg)
>>> negative_edges = g_neg.edges()
>>> nodes = [1, 2, 3, 4, 5, 6, 7]
>>> negative_edges = [(1, 2), (2, 3), (4, 5)]
>>> result = approx_min_num_components(nodes, negative_edges)
>>> print(result)
2 |
1,614 | def _sync_last_sale_prices(self, dt=None):
if dt is None:
dt = self.datetime
if dt != self._last_sync_time:
self.metrics_tracker.sync_last_sale_prices(
dt,
self.data_portal,
)
self._last_sync_time = dt | Sync the last sale prices on the metrics tracker to a given
datetime.
Parameters
----------
dt : datetime
The time to sync the prices to.
Notes
-----
This call is cached by the datetime. Repeated calls in the same bar
are cheap. |
1,615 | def translate_features_to_letter_annotations(protein, more_sites=None):
from ssbio.databases.uniprot import longname_sites
from collections import defaultdict
sites = longname_sites
sites.append()
sites.append()
sites.append()
sites.append("transmembrane region")
sites.append("catalyticResidue")
if more_sites:
more_sites = ssbio.utils.force_list(more_sites)
sites.extend(more_sites)
sites = list(set(sites))
for site in sites:
protein.representative_sequence.letter_annotations[site] = [False] * protein.representative_sequence.seq_len
to_store = defaultdict(list)
for f in protein.representative_sequence.features:
if f.type in sites:
to_store[f.type].append(f)
for site, feature in to_store.items():
try:
positions = [int(f.location.start) for f in feature]
except TypeError:
log.error(.format(protein.id, protein.representative_sequence.id, site))
continue
feat_letter_anno = []
for x in range(protein.representative_sequence.seq_len):
if x in positions:
idx = positions.index(x)
if in feature[idx].qualifiers:
feat_letter_anno.append(feature[idx].qualifiers[])
else:
feat_letter_anno.append(True)
else:
feat_letter_anno.append(False)
protein.representative_sequence.letter_annotations[site] = feat_letter_anno | Store select uniprot features (sites) as letter annotations with the key as the
type of site and the values as a list of booleans |
1,616 | def check_file_encoding(self, input_file_path):
self.log([u"Checking encoding of file ", input_file_path])
self.result = ValidatorResult()
if self._are_safety_checks_disabled(u"check_file_encoding"):
return self.result
if not gf.file_can_be_read(input_file_path):
self._failed(u"File cannot be read." % (input_file_path))
return self.result
with io.open(input_file_path, "rb") as file_object:
bstring = file_object.read()
self._check_utf8_encoding(bstring)
return self.result | Check whether the given file is UTF-8 encoded.
:param string input_file_path: the path of the file to be checked
:rtype: :class:`~aeneas.validator.ValidatorResult` |
1,617 | def abs(cls, x: ) -> :
return cls._unary_op(x, tf.abs, tf.float32) | Returns a TensorFluent for the abs function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the abs function. |
1,618 | def clear(self):
self.tags = []
self.chars = {}
self.attribs = {}
self.handler = None
self.piece = PieceTree.PieceTree()
self.isDynamic = False
self.data["note"] = None
self.data["direction"] = None
self.data["expression"] = None
self.data["degree"] = None
self.data["frame_note"] = None
self.data["staff_id"] = 1
self.data["voice"] = 1
self.data["handleType"] = "" | Method which resets any variables held by this class, so that the parser can be used again
:return: Nothing |
1,619 | def instantiate_by_name(self, object_name):
if object_name not in self.instances:
instance = self.instantiate_from_data(self.environment[object_name])
self.instances[object_name] = instance
return instance
else:
return self.instances[object_name] | Instantiate object from the environment, possibly giving some extra arguments |
1,620 | def serialize(self, pid, record, links_factory=None):
return self.schema.tostring(
self.transform_record(pid, record, links_factory)) | Serialize a single record and persistent identifier.
:param pid: Persistent identifier instance.
:param record: Record instance.
:param links_factory: Factory function for record links. |
1,621 | def total_surface_energy(self):
tot_surface_energy = 0
for hkl in self.miller_energy_dict.keys():
tot_surface_energy += self.miller_energy_dict[hkl] * \
self.miller_area_dict[hkl]
return tot_surface_energy | Total surface energy of the Wulff shape.
Returns:
(float) sum(surface_energy_hkl * area_hkl) |
1,622 | def inflate_bbox(self):
left, top, right, bottom = self.bounding_box
self.bounding_box = (
left & 0xFFFC,
top,
right if right % 4 == 0 else (right & 0xFFFC) + 0x04,
bottom)
return self.bounding_box | Realign the left and right edges of the bounding box such that they are
inflated to align modulo 4.
This method is optional, and used mainly to accommodate devices with
COM/SEG GDDRAM structures that store pixels in 4-bit nibbles. |
1,623 | def reload(name=DEFAULT, all_names=False):
for namespace in get_namespaces_from_names(name, all_names):
for value_proxy in namespace.get_value_proxies():
value_proxy.reset() | Reload one or all :class:`ConfigNamespace`. Reload clears the cache of
:mod:`staticconf.schema` and :mod:`staticconf.getters`, allowing them to
pickup the latest values in the namespace.
Defaults to reloading just the DEFAULT namespace.
:param name: the name of the :class:`ConfigNamespace` to reload
:param all_names: If True, reload all namespaces, and ignore `name` |
1,624 | def tune(runner, kernel_options, device_options, tuning_options):
results = []
cache = {}
tuning_options["scaling"] = True
bounds, _, _ = get_bounds_x0_eps(tuning_options)
args = (kernel_options, tuning_options, runner, results, cache)
num_particles = 20
maxiter = 100
best_time_global = 1e20
best_position_global = []
swarm = []
for i in range(0, num_particles):
swarm.append(Particle(bounds, args))
for i in range(maxiter):
if tuning_options.verbose:
print("start iteration ", i, "best time global", best_time_global)
for j in range(num_particles):
swarm[j].evaluate(_cost_func)
if swarm[j].time <= best_time_global:
best_position_global = swarm[j].position
best_time_global = swarm[j].time
for j in range(0, num_particles):
swarm[j].update_velocity(best_position_global)
swarm[j].update_position(bounds)
if tuning_options.verbose:
print()
print(best_position_global)
print(best_time_global)
return results, runner.dev.get_environment() | Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: dict
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: dict
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: dict
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict() |
1,625 | def add_transcript(self, transcript):
logger.debug("Adding transcript {0} to variant {1}".format(
transcript, self[]))
self[].append(transcript) | Add the information transcript
This adds a transcript dict to variant['transcripts']
Args:
transcript (dict): A transcript dictionary |
1,626 | def stops(self):
serves = set()
for trip in self.trips():
for stop_time in trip.stop_times():
serves |= stop_time.stops()
return serves | Return stops served by this route. |
1,627 | def chunker(f, n):
f = iter(f)
x = []
while 1:
if len(x) < n:
try:
x.append(f.next())
except StopIteration:
if len(x) > 0:
yield tuple(x)
break
else:
yield tuple(x)
x = [] | Utility function to split iterable `f` into `n` chunks |
1,628 | async def write_register(self, address, value, skip_encode=False):
await self._request(, address, value, skip_encode=skip_encode) | Write a modbus register. |
1,629 | def getViews(self, path, year=None, month=None, day=None, hour=None):
if path is None:
raise TelegraphAPIException("Error while executing getViews: "
"PAGE_NOT_FOUND")
r = requests.post(BASE_URL + "getViews/" + path, data={
"year": year,
"month": month,
"day": day,
"hour": hour,
})
if r.json()[] is not True:
raise TelegraphAPIException("Error while executing getViews: " +
r.json()[])
return r.json()[] | Use this method to get the number of views for a Telegraph article.
:param path: Required. Path to the Telegraph page
(in the format Title-12-31, where 12 is the month and 31 the day the article was first published).
:type path: str
:param year: Required if month is passed.
If passed, the number of page views for the requested year will be returned.
:type year: int
:param month: Required if day is passed.
If passed, the number of page views for the requested month will be returned.
:type month: int
:param day: Required if hour is passed.
If passed, the number of page views for the requested day will be returned.
:type day: int
:param hour: If passed, the number of page views for the requested hour will be returned.
:type hour: int
:return: |
1,630 | def wait_until_element_visible(self, element, timeout=None):
return self._wait_until(self._expected_condition_find_element_visible, element, timeout) | Search element and wait until it is visible
:param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
:param timeout: max time to wait
:returns: the web element if it is visible
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
:raises TimeoutException: If the element is still not visible after the timeout |
1,631 | async def _into_id_set(client, chats):
if chats is None:
return None
if not utils.is_list_like(chats):
chats = (chats,)
result = set()
for chat in chats:
if isinstance(chat, int):
if chat < 0:
result.add(chat)
else:
result.update({
utils.get_peer_id(types.PeerUser(chat)),
utils.get_peer_id(types.PeerChat(chat)),
utils.get_peer_id(types.PeerChannel(chat)),
})
elif isinstance(chat, TLObject) and chat.SUBCLASS_OF_ID == 0x2d45687:
result.add(utils.get_peer_id(chat))
else:
chat = await client.get_input_entity(chat)
if isinstance(chat, types.InputPeerSelf):
chat = await client.get_me(input_peer=True)
result.add(utils.get_peer_id(chat))
return result | Helper util to turn the input chat or chats into a set of IDs. |
1,632 | def guess_headers(self):
name = self.name.replace("*", "")
headers = []
if name in KNOWN_TYPES:
headers.append(KNOWN_TYPES[name])
elif name in STL:
headers.append(.format(name))
elif hasattr(ROOT, name) and name.startswith("T"):
headers.append(.format(name))
elif in name:
headers.append(.format(name.replace(, )))
elif name == :
headers.append()
else:
try:
CPPGrammar.BASIC_TYPE.parseString(name, parseAll=True)
except ParseException as e:
log.warning(
"unable to guess headers required for {0}".format(name))
if self.params:
for child in self.params:
headers.extend(child.guess_headers)
return list(set(headers)) | Attempt to guess what headers may be required in order to use this
type. Returns `guess_headers` of all children recursively.
* If the typename is in the :const:`KNOWN_TYPES` dictionary, use the
header specified there
* If it's an STL type, include <{type}>
* If it exists in the ROOT namespace and begins with T,
include <{type}.h> |
1,633 | def stream_sample(self, md5, kwargs=None):
max_rows = kwargs.get(, None) if kwargs else None
| Stream the sample by giving back a generator, typically used on 'logs'.
Args:
md5: the md5 of the sample
kwargs: a way of specifying subsets of samples (None for all)
max_rows: the maximum number of rows to return
Returns:
A generator that yields rows of the file/log |
1,634 | def _encode_char(char, charmap, defaultchar):
if ord(char) < 128:
return ord(char)
if char in charmap:
return charmap[char]
return ord(defaultchar) | Encode a single character with the given encoding map
:param char: char to encode
:param charmap: dictionary for mapping characters in this code page |
1,635 | def get_printable(iterable):
if iterable:
return .join(i for i in iterable if i in string.printable)
return | Get printable characters from the specified string.
Note that str.isprintable() is not available in Python 2. |
1,636 | def _replace_bm(self):
self._block_matcher = cv2.StereoBM(preset=self._bm_preset,
ndisparities=self._search_range,
SADWindowSize=self._window_size) | Replace ``_block_matcher`` with current values. |
1,637 | def print_licences(params, metadata):
if hasattr(params, ):
if params.licenses:
_pp(metadata.licenses_desc())
sys.exit(0) | Print licenses.
:param argparse.Namespace params: parameter
:param bootstrap_py.classifier.Classifiers metadata: package metadata |
1,638 | def _RawGlobPathSpecWithNumericSchema(
file_system, parent_path_spec, segment_format, location, segment_number):
segment_files = []
while True:
segment_location = segment_format.format(location, segment_number)
segment_path_spec = path_spec_factory.Factory.NewPathSpec(
parent_path_spec.type_indicator, **kwargs)
if not file_system.FileEntryExistsByPathSpec(segment_path_spec):
break
segment_files.append(segment_path_spec)
segment_number += 1
return segment_files | Globs for path specifications according to a numeric naming schema.
Args:
file_system (FileSystem): file system.
parent_path_spec (PathSpec): parent path specification.
segment_format (str): naming schema of the segment file location.
location (str): the base segment file location string.
segment_number (int): first segment number.
Returns:
list[PathSpec]: path specifications that match the glob. |
1,639 | def socket_recv(self):
try:
data = self.sock.recv(2048)
except socket.error, ex:
print ("?? socket.recv() error from %s" %
(ex[0], ex[1], self.addrport()))
raise BogConnectionLost()
size = len(data)
if size == 0:
raise BogConnectionLost()
self.last_input_time = time.time()
self.bytes_received += size
for byte in data:
self._iac_sniffer(byte)
while True:
mark = self.recv_buffer.find()
if mark == -1:
break
cmd = self.recv_buffer[:mark].strip()
self.command_list.append(cmd)
self.cmd_ready = True
self.recv_buffer = self.recv_buffer[mark+1:] | Called by TelnetServer when recv data is ready. |
1,640 | def get_hdrgos_g_usrgos(self, usrgos):
hdrgos_for_usrgos = set()
hdrgos_all = self.get_hdrgos()
usrgo2hdrgo = self.get_usrgo2hdrgo()
for usrgo in usrgos:
if usrgo in hdrgos_all:
hdrgos_for_usrgos.add(usrgo)
continue
hdrgo_cur = usrgo2hdrgo.get(usrgo, None)
if hdrgo_cur is not None:
hdrgos_for_usrgos.add(hdrgo_cur)
return hdrgos_for_usrgos | Return hdrgos which contain the usrgos. |
1,641 | def point_in_polygon(points, x, y):
odd = False
n = len(points)
for i in range(n):
j = i < n - 1 and i + 1 or 0
x0, y0 = points[i][0], points[i][1]
x1, y1 = points[j][0], points[j][1]
if (y0 < y and y1 >= y) or (y1 < y and y0 >= y):
if x0 + (y - y0) / (y1 - y0) * (x1 - x0) < x:
odd = not odd
return odd | Ray casting algorithm.
Determines how many times a horizontal ray starting from the point
intersects with the sides of the polygon.
If it is an even number of times, the point is outside, if odd, inside.
The algorithm does not always report correctly when the point is very close to the boundary.
The polygon is passed as a list of (x,y)-tuples. |
1,642 | def get_supported_file_loaders_2(force=False):
if force or (2, 7) <= sys.version_info < (3, 4):
import imp
loaders = []
for suffix, mode, type in imp.get_suffixes():
if type == imp.PY_SOURCE:
loaders.append((SourceFileLoader2, [suffix]))
else:
loaders.append((ImpFileLoader2, [suffix]))
return loaders
elif sys.version_info >= (3, 4):
from importlib.machinery import (
SOURCE_SUFFIXES, SourceFileLoader,
BYTECODE_SUFFIXES, SourcelessFileLoader,
EXTENSION_SUFFIXES, ExtensionFileLoader,
)
extensions = ExtensionFileLoader, EXTENSION_SUFFIXES
source = SourceFileLoader, SOURCE_SUFFIXES
bytecode = SourcelessFileLoader, BYTECODE_SUFFIXES
return [extensions, source, bytecode] | Returns a list of file-based module loaders.
Each item is a tuple (loader, suffixes). |
1,643 | def get_service(self):
http_authorized = self._authorize()
return build(
, , http=http_authorized, cache_discovery=False) | Returns a BigQuery service object. |
1,644 | def pad(self, pad_length):
self.pianoroll = np.pad(
self.pianoroll, ((0, pad_length), (0, 0)), ) | Pad the pianoroll with zeros at the end along the time axis.
Parameters
----------
pad_length : int
The length to pad with zeros along the time axis. |
1,645 | def post_create_app(cls, app, **settings):
register_errorhandler = settings.pop(, True)
if register_errorhandler:
AppException.register_errorhandler(app)
return app | Register the errorhandler for the AppException to the passed in
App.
Args:
app (fleaker.base.BaseApplication): A Flask application that
extends the Fleaker Base Application, such that the hooks are
implemented.
Kwargs:
register_errorhandler (bool): A boolean indicating if we want to
automatically register an errorhandler for the
:class:`AppException` exception class after we create this App.
Pass ``False`` to prevent registration. Default is ``True``.
Returns:
fleaker.base.BaseApplication: Returns the app it was given. |
1,646 | def probe_plugins():
plugins = UwsgiRunner().get_plugins()
for plugin in sorted(plugins.generic):
click.secho(plugin)
click.secho()
for plugin in sorted(plugins.request):
click.secho(plugin) | Runs uWSGI to determine what plugins are available and prints them out.
Generic plugins come first then after blank line follow request plugins. |
1,647 | def service_upsert(path, service_name, definition):
compose_result, loaded_definition, err = __load_compose_definitions(path, definition)
if err:
return err
services = compose_result[][]
if service_name in services:
msg = .format(service_name)
return __standardize_result(False, msg, None, None)
services[service_name] = loaded_definition
return __dump_compose_file(path, compose_result,
.format(service_name),
already_existed=True) | Create or update the definition of a docker-compose service
This does not pull or up the service
This wil re-write your yaml file. Comments will be lost. Indentation is set to 2 spaces
path
Path where the docker-compose file is stored on the server
service_name
Name of the service to create
definition
Service definition as yaml or json string
CLI Example:
.. code-block:: bash
salt myminion dockercompose.service_upsert /path/where/docker-compose/stored service_name definition |
1,648 | def LOO(self, kern, X, Y, likelihood, posterior, Y_metadata=None, K=None):
g = posterior.woodbury_vector
c = posterior.woodbury_inv
c_diag = np.diag(c)[:, None]
neg_log_marginal_LOO = 0.5*np.log(2*np.pi) - 0.5*np.log(c_diag) + 0.5*(g**2)/c_diag
return -neg_log_marginal_LOO | Leave one out error as found in
"Bayesian leave-one-out cross-validation approximations for Gaussian latent variable models"
Vehtari et al. 2014. |
1,649 | def new_driver(self, testname=None):
resuebrowser
channel = self.__get_channel()
driver = self.__get_driver_for_channel(channel)
if self.__config.get(WebDriverManager.REUSE_BROWSER, True):
if driver is None:
driver = self._webdriver_factory.create_webdriver(
testname=testname)
self.__register_driver(channel, driver)
else:
try:
driver.delete_all_cookies()
driver.get("about:blank")
except:
try:
if driver.is_online():
driver.quit()
except:
pass
driver = self._webdriver_factory.create_webdriver(
testname=testname)
self.__register_driver(channel, driver)
else:
if driver is not None:
try:
driver.quit()
except:
pass
self.__unregister_driver(channel)
driver = self._webdriver_factory.create_webdriver(
testname=testname)
self.__register_driver(channel, driver)
return driver | Used at a start of a test to get a new instance of WebDriver. If the
'resuebrowser' setting is true, it will use a recycled WebDriver instance
with delete_all_cookies() called.
Kwargs:
testname (str) - Optional test name to pass to Selenium Grid. Helpful for
labeling tests on 3rd party WebDriver cloud providers.
Returns:
Webdriver - Selenium Webdriver instance.
Usage::
driver = WTF_WEBDRIVER_MANAGER.new_driver()
driver.get("http://the-internet.herokuapp.com") |
1,650 | def images(self):
api_version = self._get_api_version()
if api_version == :
from .v2016_04_30_preview.operations import ImagesOperations as OperationClass
elif api_version == :
from .v2017_03_30.operations import ImagesOperations as OperationClass
elif api_version == :
from .v2017_12_01.operations import ImagesOperations as OperationClass
elif api_version == :
from .v2018_04_01.operations import ImagesOperations as OperationClass
elif api_version == :
from .v2018_06_01.operations import ImagesOperations as OperationClass
elif api_version == :
from .v2018_10_01.operations import ImagesOperations as OperationClass
elif api_version == :
from .v2019_03_01.operations import ImagesOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) | Instance depends on the API version:
* 2016-04-30-preview: :class:`ImagesOperations<azure.mgmt.compute.v2016_04_30_preview.operations.ImagesOperations>`
* 2017-03-30: :class:`ImagesOperations<azure.mgmt.compute.v2017_03_30.operations.ImagesOperations>`
* 2017-12-01: :class:`ImagesOperations<azure.mgmt.compute.v2017_12_01.operations.ImagesOperations>`
* 2018-04-01: :class:`ImagesOperations<azure.mgmt.compute.v2018_04_01.operations.ImagesOperations>`
* 2018-06-01: :class:`ImagesOperations<azure.mgmt.compute.v2018_06_01.operations.ImagesOperations>`
* 2018-10-01: :class:`ImagesOperations<azure.mgmt.compute.v2018_10_01.operations.ImagesOperations>`
* 2019-03-01: :class:`ImagesOperations<azure.mgmt.compute.v2019_03_01.operations.ImagesOperations>` |
1,651 | def get_default_config(self):
config = super(UsersCollector, self).get_default_config()
config.update({
: ,
: None,
})
return config | Returns the default collector settings |
1,652 | def get_metric_by_name(name: str) -> Callable[..., Any]:
if name not in _REGISTRY:
raise ConfigError(f)
return fn_from_str(_REGISTRY[name]) | Returns a metric callable with a corresponding name. |
1,653 | def name(self):
name = self._element.name_val
if name is None:
return None
return BabelFish.internal2ui(name) | The UI name of this style. |
1,654 | def _load_key(key_object):
if key_object.algorithm == :
curve_type, details = key_object.curve
if curve_type != :
raise AsymmetricKeyError()
if details not in set([, , ]):
raise AsymmetricKeyError(pretty_message(
))
elif key_object.algorithm == and key_object.hash_algo == :
raise AsymmetricKeyError(pretty_message(
,
key_object.bit_size
))
elif key_object.algorithm == and key_object.hash_algo is None:
raise IncompleteAsymmetricKeyError(pretty_message(
))
if isinstance(key_object, keys.PublicKeyInfo):
source = key_object.dump()
key_class = Security.kSecAttrKeyClassPublic
else:
source = key_object.unwrap().dump()
key_class = Security.kSecAttrKeyClassPrivate
cf_source = None
cf_dict = None
cf_output = None
try:
cf_source = CFHelpers.cf_data_from_bytes(source)
key_type = {
: Security.kSecAttrKeyTypeDSA,
: Security.kSecAttrKeyTypeECDSA,
: Security.kSecAttrKeyTypeRSA,
}[key_object.algorithm]
cf_dict = CFHelpers.cf_dictionary_from_pairs([
(Security.kSecAttrKeyType, key_type),
(Security.kSecAttrKeyClass, key_class),
(Security.kSecAttrCanSign, CoreFoundation.kCFBooleanTrue),
(Security.kSecAttrCanVerify, CoreFoundation.kCFBooleanTrue),
])
error_pointer = new(CoreFoundation, )
sec_key_ref = Security.SecKeyCreateFromData(cf_dict, cf_source, error_pointer)
handle_cf_error(error_pointer)
if key_class == Security.kSecAttrKeyClassPublic:
return PublicKey(sec_key_ref, key_object)
if key_class == Security.kSecAttrKeyClassPrivate:
return PrivateKey(sec_key_ref, key_object)
finally:
if cf_source:
CoreFoundation.CFRelease(cf_source)
if cf_dict:
CoreFoundation.CFRelease(cf_dict)
if cf_output:
CoreFoundation.CFRelease(cf_output) | Common code to load public and private keys into PublicKey and PrivateKey
objects
:param key_object:
An asn1crypto.keys.PublicKeyInfo or asn1crypto.keys.PrivateKeyInfo
object
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
oscrypto.errors.AsymmetricKeyError - when the key is incompatible with the OS crypto library
OSError - when an error is returned by the OS crypto library
:return:
A PublicKey or PrivateKey object |
1,655 | def highshelf(self, gain=-20.0, frequency=3000, slope=0.5):
self.command.append()
self.command.append(gain)
self.command.append(frequency)
self.command.append(slope)
return self | highshelf takes 3 parameters: a signed number for gain or
attenuation in dB, filter frequency in Hz and slope (default=0.5).
Beware of clipping when using positive gain. |
1,656 | def is_condition_met(self, hand, *args):
pon_sets = [x for x in hand if is_pon(x)]
if len(pon_sets) != 4:
return False
count_wind_sets = 0
winds = [EAST, SOUTH, WEST, NORTH]
for item in pon_sets:
if is_pon(item) and item[0] in winds:
count_wind_sets += 1
return count_wind_sets == 4 | The hand contains four sets of winds
:param hand: list of hand's sets
:return: boolean |
1,657 | def auth_request_url(self, client_id=None, redirect_uris="urn:ietf:wg:oauth:2.0:oob",
scopes=__DEFAULT_SCOPES, force_login=False):
if client_id is None:
client_id = self.client_id
else:
if os.path.isfile(client_id):
with open(client_id, ) as secret_file:
client_id = secret_file.readline().rstrip()
params = dict()
params[] = client_id
params[] = "code"
params[] = redirect_uris
params[] = " ".join(scopes)
params[] = force_login
formatted_params = urlencode(params)
return "".join([self.api_base_url, "/oauth/authorize?", formatted_params]) | Returns the url that a client needs to request an oauth grant from the server.
To log in with oauth, send your user to this URL. The user will then log in and
get a code which you can pass to log_in.
scopes are as in `log_in()`_, redirect_uris is where the user should be redirected to
after authentication. Note that redirect_uris must be one of the URLs given during
app registration. When using urn:ietf:wg:oauth:2.0:oob, the code is simply displayed,
otherwise it is added to the given URL as the "code" request parameter.
Pass force_login if you want the user to always log in even when already logged
into web mastodon (i.e. when registering multiple different accounts in an app). |
1,658 | def collect_analysis(self):
analysis = {}
for field in self.fields.values():
for analyzer_name in (, , ):
if not hasattr(field, analyzer_name):
continue
analyzer = getattr(field, analyzer_name)
if not isinstance(analyzer, Analyzer):
continue
definition = analyzer.get_analysis_definition()
if definition is None:
continue
for key in definition:
analysis.setdefault(key, {}).update(definition[key])
return analysis | :return: a dictionary which is used to get the serialized analyzer definition from the analyzer class. |
1,659 | def database_list_folder(object_id, input_params={}, always_retry=True, **kwargs):
return DXHTTPRequest( % object_id, input_params, always_retry=always_retry, **kwargs) | Invokes the /database-xxxx/listFolder API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Databases#API-method%3A-%2Fdatabase-xxxx%2FlistFolder |
1,660 | def kl_divergence(self, logits_q, logits_p):
return (torch.exp(logits_q) * (logits_q - logits_p)).sum(1, keepdim=True) | Categorical distribution KL divergence calculation
KL(Q || P) = sum Q_i log (Q_i / P_i)
When talking about logits this is:
sum exp(Q_i) * (Q_i - P_i) |
1,661 | def assign_types_to_resources(resource_types,**kwargs):
type_ids = list(set([rt.type_id for rt in resource_types]))
db_types = db.DBSession.query(TemplateType).filter(TemplateType.id.in_(type_ids)).options(joinedload_all()).all()
types = {}
for db_type in db_types:
if types.get(db_type.id) is None:
types[db_type.id] = db_type
log.debug("Retrieved all the appropriate template types")
res_types = []
res_attrs = []
res_scenarios = []
net_id = None
node_ids = []
link_ids = []
grp_ids = []
for resource_type in resource_types:
ref_id = resource_type.ref_id
ref_key = resource_type.ref_key
if resource_type.ref_key == :
net_id = ref_id
elif resource_type.ref_key == :
node_ids.append(ref_id)
elif resource_type.ref_key == :
link_ids.append(ref_id)
elif resource_type.ref_key == :
grp_ids.append(ref_id)
if net_id:
net = db.DBSession.query(Network).filter(Network.id==net_id).one()
nodes = _get_nodes(node_ids)
links = _get_links(link_ids)
groups = _get_groups(grp_ids)
for resource_type in resource_types:
ref_id = resource_type.ref_id
ref_key = resource_type.ref_key
type_id = resource_type.type_id
if ref_key == :
resource = net
elif ref_key == :
resource = nodes[ref_id]
elif ref_key == :
resource = links[ref_id]
elif ref_key == :
resource = groups[ref_id]
ra, rt, rs= set_resource_type(resource, type_id, types)
if rt is not None:
res_types.append(rt)
if len(ra) > 0:
res_attrs.extend(ra)
if len(rs) > 0:
res_scenarios.extend(rs)
log.debug("Retrieved all the appropriate resources")
if len(res_types) > 0:
new_types = db.DBSession.execute(ResourceType.__table__.insert(), res_types)
if len(res_attrs) > 0:
new_res_attrs = db.DBSession.execute(ResourceAttr.__table__.insert(), res_attrs)
new_ras = db.DBSession.query(ResourceAttr).filter(and_(ResourceAttr.id>=new_res_attrs.lastrowid, ResourceAttr.id<(new_res_attrs.lastrowid+len(res_attrs)))).all()
ra_map = {}
for ra in new_ras:
ra_map[(ra.ref_key, ra.attr_id, ra.node_id, ra.link_id, ra.group_id, ra.network_id)] = ra.id
for rs in res_scenarios:
rs[] = ra_map[(rs[], rs[], rs[], rs[], rs[], rs[])]
if len(res_scenarios) > 0:
new_scenarios = db.DBSession.execute(ResourceScenario.__table__.insert(), res_scenarios)
db.DBSession.query(ResourceAttr).filter(ResourceAttr.attr_id==None).delete()
ret_val = [t for t in types.values()]
return ret_val | Assign new types to list of resources.
This function checks if the necessary
attributes are present and adds them if needed. Non existing attributes
are also added when the type is already assigned. This means that this
function can also be used to update resources, when a resource type has
changed. |
1,662 | def run(self, cell, is_full_fc=False, parse_fc=True):
with open(self._filename) as f:
fc_dct = self._parse_q2r(f)
self.dimension = fc_dct[]
self.epsilon = fc_dct[]
self.borns = fc_dct[]
if parse_fc:
(self.fc,
self.primitive,
self.supercell) = self._arrange_supercell_fc(
cell, fc_dct[], is_full_fc=is_full_fc) | Make supercell force constants readable for phonopy
Note
----
Born effective charges and dielectric constant tensor are read
from QE output file if they exist. But this means
dipole-dipole contributions are removed from force constants
and this force constants matrix is not usable in phonopy.
Arguments
---------
cell : PhonopyAtoms
Primitive cell used for QE/PH calculation.
is_full_fc : Bool, optional, default=False
Whether to create full or compact force constants.
parse_fc : Bool, optional, default=True
Force constants file of QE is not parsed when this is False.
False may be used when expected to parse only epsilon and born. |
1,663 | def wait_turns(self, turns, cb=None):
self.disable_input()
self.app.wait_turns(turns, cb=partial(self.enable_input, cb)) | Call ``self.app.engine.next_turn()`` ``n`` times, waiting ``self.app.turn_length`` in between
Disables input for the duration.
:param turns: number of turns to wait
:param cb: function to call when done waiting, optional
:return: ``None`` |
1,664 | def _get_notmuch_message(self, mid):
mode = Database.MODE.READ_ONLY
db = Database(path=self.path, mode=mode)
try:
return db.find_message(mid)
except:
errmsg = % mid
raise NonexistantObjectError(errmsg) | returns :class:`notmuch.database.Message` with given id |
1,665 | def delete_table_records(self, table, query_column, ids_to_delete):
table = table.get_soap_object(self.client)
result = self.call(, table, query_column, ids_to_delete)
if hasattr(result, ):
return [DeleteResult(delete_result) for delete_result in result]
return [DeleteResult(result)] | Responsys.deleteTableRecords call
Accepts:
InteractObject table
string query_column
possible values: 'RIID'|'EMAIL_ADDRESS'|'CUSTOMER_ID'|'MOBILE_NUMBER'
list ids_to_delete
Returns a list of DeleteResult instances |
1,666 | def create_signed_pair(self, name, ca_name, cert_type=crypto.TYPE_RSA,
bits=2048, years=5, alt_names=None, serial=0,
overwrite=False):
key = self.create_key_pair(cert_type, bits)
req = self.create_request(key, CN=name)
extensions = [
crypto.X509Extension(
b"extendedKeyUsage", True, b"serverAuth, clientAuth"),
]
if alt_names:
extensions.append(
crypto.X509Extension(b"subjectAltName",
False, ",".join(alt_names).encode())
)
ca_bundle = self.store.get_files(ca_name)
cacert = ca_bundle.cert.load()
cakey = ca_bundle.key.load()
cert = self.sign(req, (cacert, cakey), (0, 60*60*24*365*years),
extensions=extensions)
x509s = {: key, : cert, : None}
self.store.add_files(name, x509s, parent_ca=ca_name,
overwrite=overwrite)
self.store.add_sign_link(ca_name, name)
return self.store.get_record(name) | Create a key-cert pair
Arguments: name - The name of the key-cert pair
ca_name - The name of the CA to sign this cert
cert_type - The type of the cert. TYPE_RSA or TYPE_DSA
bits - The number of bits to use
alt_names - An array of alternative names in the format:
IP:address, DNS:address
Returns: KeyCertPair for the new signed pair |
1,667 | def transform_literals(rdf, literalmap):
affected_types = (SKOS.Concept, SKOS.Collection,
SKOSEXT.DeprecatedConcept)
props = set()
for t in affected_types:
for conc in rdf.subjects(RDF.type, t):
for p, o in rdf.predicate_objects(conc):
if isinstance(o, Literal) \
and (p in literalmap or not in_general_ns(p)):
props.add(p)
for p in props:
if mapping_match(p, literalmap):
newval = mapping_get(p, literalmap)
newuris = [v[0] for v in newval]
logging.debug("transform literal %s -> %s", p, str(newuris))
replace_predicate(
rdf, p, newuris, subjecttypes=affected_types)
else:
logging.info("Don't know what to do with literal %s", p) | Transform literal properties of Concepts, as defined by config file. |
1,668 | def netdevs():
with open() as f:
net_dump = f.readlines()
device_data={}
data = namedtuple(,[,])
for line in net_dump[2:]:
line = line.split()
if line[0].strip() != :
device_data[line[0].strip()] = data(float(line[1].split()[0])/(1024.0*1024.0),
float(line[1].split()[8])/(1024.0*1024.0))
return device_data | RX and TX bytes for each of the network devices |
1,669 | def drop_words(text, threshold=2, to_lower=True, delimiters=DEFAULT_DELIMITERS,
stop_words=None):
fox foxword wordfox foxword word wordalice bobBob alicea doga dog catbob alicea dog catonebar bahOnea doga dog catA DOGoneonea doga dog
_raise_error_if_not_sarray(text, "text")
sf = _turicreate.SFrame({: text})
fe = _feature_engineering.RareWordTrimmer(features=,
threshold=threshold,
to_lower=to_lower,
delimiters=delimiters,
stopwords=stop_words,
output_column_prefix=None)
tokens = fe.fit_transform(sf)
return tokens[] | Remove words that occur below a certain number of times in an SArray.
This is a common method of cleaning text before it is used, and can increase the
quality and explainability of the models learned on the transformed data.
RareWordTrimmer can be applied to all the string-, dictionary-, and list-typed
columns in an SArray.
* **string** : The string is first tokenized. By default, all letters are
first converted to lower case, then tokenized by space characters. Each
token is taken to be a word, and the words occurring below a threshold
number of times across the entire column are removed, then the remaining
tokens are concatenated back into a string.
* **list** : Each element of the list must be a string, where each element
is assumed to be a token. The remaining tokens are then filtered
by count occurrences and a threshold value.
* **dict** : The method first obtains the list of keys in the dictionary.
This list is then processed as a standard list, except the value of each
key must be of integer type and is considered to be the count of that key.
Parameters
----------
text : SArray[str | dict | list]
The input text data.
threshold : int, optional
The count below which words are removed from the input.
stop_words: list[str], optional
A manually specified list of stop words, which are removed regardless
of count.
to_lower : bool, optional
Indicates whether to map the input strings to lower case before counting.
delimiters: list[string], optional
A list of delimiter characters for tokenization. By default, the list
is defined to be the list of space characters. The user can define
any custom list of single-character delimiters. Alternatively, setting
`delimiters=None` will use a Penn treebank type tokenization, which
is better at handling punctuations. (See reference below for details.)
Returns
-------
out : SArray.
An SArray with words below a threshold removed.
See Also
--------
count_ngrams, tf_idf, tokenize,
References
----------
- `Penn treebank tokenization <https://web.archive.org/web/19970614072242/http://www.cis.upenn.edu:80/~treebank/tokenization.html>`_
Examples
--------
.. sourcecode:: python
>>> import turicreate
# Create input data
>>> sa = turicreate.SArray(["The quick brown fox jumps in a fox like way.",
"Word word WORD, word!!!word"])
# Run drop_words
>>> turicreate.text_analytics.drop_words(sa)
dtype: str
Rows: 2
['fox fox', 'word word']
# Run drop_words with Penn treebank style tokenization to handle
# punctuations
>>> turicreate.text_analytics.drop_words(sa, delimiters=None)
dtype: str
Rows: 2
['fox fox', 'word word word']
# Run drop_words with dictionary input
>>> sa = turicreate.SArray([{'alice bob': 1, 'Bob alice': 2},
{'a dog': 0, 'a dog cat': 5}])
>>> turicreate.text_analytics.drop_words(sa)
dtype: dict
Rows: 2
[{'bob alice': 2}, {'a dog cat': 5}]
# Run drop_words with list input
>>> sa = turicreate.SArray([['one', 'bar bah', 'One'],
['a dog', 'a dog cat', 'A DOG']])
>>> turicreate.text_analytics.drop_words(sa)
dtype: list
Rows: 2
[['one', 'one'], ['a dog', 'a dog']] |
1,670 | def filter_geoquiet(sat, maxKp=None, filterTime=None, kpData=None, kp_inst=None):
if kp_inst is not None:
kp_inst.load(date=sat.date, verifyPad=True)
kpData = kp_inst
elif kpData is None:
kp = pysat.Instrument(, , pad=pds.DateOffset(days=1))
kp.load(date=sat.date, verifyPad=True)
kpData = kp
if maxKp is None:
maxKp = 3+ 1./3.
if filterTime is None:
filterTime = 24
return | Filters pysat.Instrument data for given time after Kp drops below gate.
Loads Kp data for the same timeframe covered by sat and sets sat.data to
NaN for times when Kp > maxKp and for filterTime after Kp drops below maxKp.
Parameters
----------
sat : pysat.Instrument
Instrument to be filtered
maxKp : float
Maximum Kp value allowed. Kp values above this trigger
sat.data filtering.
filterTime : int
Number of hours to filter data after Kp drops below maxKp
kpData : pysat.Instrument (optional)
Kp pysat.Instrument object with data already loaded
kp_inst : pysat.Instrument (optional)
Kp pysat.Instrument object ready to load Kp data.Overrides kpData.
Returns
-------
None : NoneType
sat Instrument object modified in place |
1,671 | def creationTime(item):
forThisItem = _CreationTime.createdItem == item
return item.store.findUnique(_CreationTime, forThisItem).timestamp | Returns the creation time of the given item. |
1,672 | def apply_all_rules(self, *args, **kwargs):
for x in self.rules:
self._quit_check()
if self.config.chatty_rules:
self.config.logger.debug(
,
to_str(x.__class__)
)
predicate_result, action_result = x.act(*args, **kwargs)
if self.config.chatty_rules:
self.config.logger.debug(
,
predicate_result,
action_result
)
return True | cycle through all rules and apply them all without regard to
success or failure
returns:
True - since success or failure is ignored |
1,673 | def completion():
shell = env.get("SHELL", None)
if env.get("SHELL", None):
echo(
click_completion.get_code(
shell=shell.split(os.sep)[-1], prog_name="doitlive"
)
)
else:
echo(
"Please ensure that the {SHELL} environment "
"variable is set.".format(SHELL=style("SHELL", bold=True))
)
sys.exit(1) | Output completion (to be eval'd).
For bash or zsh, add the following to your .bashrc or .zshrc:
eval "$(doitlive completion)"
For fish, add the following to ~/.config/fish/completions/doitlive.fish:
eval (doitlive completion) |
1,674 | def _to_dict(self):
_dict = {}
if hasattr(self, ) and self.feedback_id is not None:
_dict[] = self.feedback_id
if hasattr(self, ) and self.user_id is not None:
_dict[] = self.user_id
if hasattr(self, ) and self.comment is not None:
_dict[] = self.comment
if hasattr(self, ) and self.created is not None:
_dict[] = datetime_to_string(self.created)
if hasattr(self, ) and self.feedback_data is not None:
_dict[] = self.feedback_data._to_dict()
return _dict | Return a json dictionary representing this model. |
1,675 | def GetTemplateID(alias,location,name):
if alias is None: alias = clc.v1.Account.GetAlias()
if location is None: location = clc.v1.Account.GetLocation()
r = Server.GetTemplates(alias,location)
for row in r:
if row[].lower() == name.lower(): return(row[])
else:
if clc.args: clc.v1.output.Status("ERROR",3,"Template %s not found in account %s datacenter %s" % (name,alias,location))
raise Exception("Template not found") | Given a template name return the unique OperatingSystem ID.
:param alias: short code for a particular account. If none will use account's default alias
:param location: datacenter where group resides
:param name: template name |
1,676 | def subbrick(dset,label,coef=False,tstat=False,fstat=False,rstat=False,number_only=False):
if coef is not False:
if coef is True:
coef = 0
label += "
elif tstat != False:
if tstat==True:
tstat = 0
label += "
elif fstat:
label += "_Fstat"
elif rstat:
label += "_R^2"
info = nl.dset_info(dset)
if info==None:
nl.notify(t get info from dset "%s"%s[%d]' % (dset,i) | returns a string referencing the given subbrick within a dset
This method reads the header of the dataset ``dset``, finds the subbrick whose
label matches ``label`` and returns a string of type ``dataset[X]``, which can
be used by most AFNI programs to refer to a subbrick within a file
The options coef, tstat, fstat, and rstat will add the suffix that is
appended to the label by 3dDeconvolve
:coef: "#0_Coef"
:tstat: "#0_Tstat"
:fstat: "_Fstat"
:rstat: "_R^2"
If ``coef`` or ``tstat`` are set to a number, it will use that parameter number
(instead of 0), for models that use multiple parameters (e.g., "TENT").
if ``number_only`` is set to ``True``, will only return the subbrick number instead of a string |
1,677 | def _translate_dst_register_oprnd(self, operand):
reg_info = self._arch_alias_mapper.get(operand.name, None)
parent_reg_constrs = []
if reg_info:
var_base_name, offset = reg_info
var_name_old = self._get_var_name(var_base_name, fresh=False)
var_name_new = self._get_var_name(var_base_name, fresh=True)
var_size = self._arch_regs_size[var_base_name]
ret_val_old = self.make_bitvec(var_size, var_name_old)
ret_val_new = self.make_bitvec(var_size, var_name_new)
ret_val = smtfunction.extract(ret_val_new, offset, operand.size)
if 0 < offset < var_size - 1:
lower_expr_1 = smtfunction.extract(ret_val_new, 0, offset)
lower_expr_2 = smtfunction.extract(ret_val_old, 0, offset)
parent_reg_constrs += [lower_expr_1 == lower_expr_2]
upper_expr_1 = smtfunction.extract(ret_val_new, offset + operand.size, var_size - offset - operand.size)
upper_expr_2 = smtfunction.extract(ret_val_old, offset + operand.size, var_size - offset - operand.size)
parent_reg_constrs += [upper_expr_1 == upper_expr_2]
elif offset == 0:
upper_expr_1 = smtfunction.extract(ret_val_new, offset + operand.size, var_size - offset - operand.size)
upper_expr_2 = smtfunction.extract(ret_val_old, offset + operand.size, var_size - offset - operand.size)
parent_reg_constrs += [upper_expr_1 == upper_expr_2]
elif offset == var_size-1:
lower_expr_1 = smtfunction.extract(ret_val_new, 0, offset)
lower_expr_2 = smtfunction.extract(ret_val_old, 0, offset)
parent_reg_constrs += [lower_expr_1 == lower_expr_2]
else:
var_name_new = self._get_var_name(operand.name, fresh=True)
ret_val = self.make_bitvec(operand.size, var_name_new)
return ret_val, parent_reg_constrs | Translate destination register operand to SMT expr. |
1,678 | def p_ident_parts(self, p):
if not isinstance(p[1], list):
p[1] = [p[1]]
p[0] = p[1] | ident_parts : ident_part
| selector
| filter_group |
1,679 | def dropEvent(self, event: QDropEvent):
items = [item for item in self.items(event.scenePos()) if isinstance(item, GraphicsItem) and item.acceptDrops()]
item = None if len(items) == 0 else items[0]
if len(event.mimeData().urls()) > 0:
self.files_dropped.emit(event.mimeData().urls())
indexes = list(event.mimeData().text().split("/")[:-1])
group_nodes = []
file_nodes = []
for index in indexes:
try:
row, column, parent = map(int, index.split(","))
if parent == -1:
parent = self.tree_root_item
else:
parent = self.tree_root_item.child(parent)
node = parent.child(row)
if node.is_group:
group_nodes.append(node)
else:
file_nodes.append(node)
except ValueError:
continue
nodes_to_add = []
for group_node in group_nodes:
nodes_to_add.extend(group_node.children)
nodes_to_add.extend([file_node for file_node in file_nodes if file_node not in nodes_to_add])
protocols_to_add = [node.protocol for node in nodes_to_add]
ref_item = item
position = None if ref_item is None else item.drop_indicator_position
self.add_protocols(ref_item, position, protocols_to_add)
super().dropEvent(event) | :type: list of ProtocolTreeItem |
1,680 | def upload_resumable(self, fd, filesize, filehash, unit_hash, unit_id,
unit_size, quick_key=None, action_on_duplicate=None,
mtime=None, version_control=None, folder_key=None,
filedrop_key=None, path=None, previous_hash=None):
action =
headers = {
: str(filesize),
: filehash,
: unit_hash,
: str(unit_id),
: str(unit_size)
}
params = QueryParams({
: quick_key,
: action_on_duplicate,
: mtime,
: version_control,
: folder_key,
: filedrop_key,
: path,
: previous_hash
})
upload_info = {
"fd": fd,
"filename": "chunk"
}
return self.request(action, params, action_token_type="upload",
upload_info=upload_info, headers=headers) | upload/resumable
http://www.mediafire.com/developers/core_api/1.3/upload/#resumable |
1,681 | def attend_fight(self, mappings, node_ip, predictions, ntp):
mappings = self.add_listen_sock(mappings)
log.debug(mappings)
self.simultaneous_cons = []
predictions = predictions.split(" ")
self.simultaneous_fight(mappings, node_ip, predictions, ntp)
if len(self.simultaneous_cons):
try:
return self.simultaneous_cons[0]
except:
log.debug("No holes found")
for mapping in mappings:
s = mapping["listen"]
r, w, e = select.select(
[s],
[],
[],
0
)
for found_sock in r:
if found_sock != s:
continue
log.debug("Accept logic works!")
client, address = s.accept()
con = Sock(blocking=0)
con.set_sock(client)
return con
return None | This function is for starting and managing a fight
once the details are known. It also handles the
task of returning any valid connections (if any) that
may be returned from threads in the simultaneous_fight function. |
1,682 | def analyze(self, mode=None, timesteps=None):
if timesteps is None:
timesteps = self.network.timeseries.timeindex
if not hasattr(timesteps, "__len__"):
timesteps = [timesteps]
if self.network.pypsa is None:
self.network.pypsa = pypsa_io.to_pypsa(
self.network, mode, timesteps)
else:
if self.network.pypsa.edisgo_mode is not mode:
self.network.pypsa = pypsa_io.to_pypsa(
self.network, mode, timesteps)
if False in [True if _ in self.network.pypsa.snapshots else False
for _ in timesteps]:
pypsa_io.update_pypsa_timeseries(self.network, timesteps=timesteps)
pf_results = self.network.pypsa.pf(timesteps)
if all(pf_results[][].tolist()):
pypsa_io.process_pfa_results(
self.network, self.network.pypsa, timesteps)
else:
raise ValueError("Power flow analysis did not converge.") | Analyzes the grid by power flow analysis
Analyze the grid for violations of hosting capacity. Means, perform a
power flow analysis and obtain voltages at nodes (load, generator,
stations/transformers and branch tees) and active/reactive power at
lines.
The power flow analysis can currently only be performed for both grid
levels MV and LV. See ToDos section for more information.
A static `non-linear power flow analysis is performed using PyPSA
<https://www.pypsa.org/doc/power_flow.html#full-non-linear-power-flow>`_.
The high-voltage to medium-voltage transformer are not included in the
analysis. The slack bus is defined at secondary side of these
transformers assuming an ideal tap changer. Hence, potential
overloading of the transformers is not studied here.
Parameters
----------
mode : str
Allows to toggle between power flow analysis (PFA) on the whole
grid topology (MV + LV), only MV or only LV. Defaults to None which
equals power flow analysis for MV + LV which is the only
implemented option at the moment. See ToDos section for
more information.
timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>`
Timesteps specifies for which time steps to conduct the power flow
analysis. It defaults to None in which case the time steps in
timeseries.timeindex (see :class:`~.grid.network.TimeSeries`) are
used.
Notes
-----
The current implementation always translates the grid topology
representation to the PyPSA format and stores it to
:attr:`self.network.pypsa`.
ToDos
------
The option to export only the edisgo MV grid (mode = 'mv') to conduct
a power flow analysis is implemented in
:func:`~.tools.pypsa_io.to_pypsa` but NotImplementedError is raised
since the rest of edisgo does not handle this option yet. The analyze
function will throw an error since
:func:`~.tools.pypsa_io.process_pfa_results`
does not handle aggregated loads and generators in the LV grids. Also,
grid reinforcement, pypsa update of time series, and probably other
functionalities do not work when only the MV grid is analysed.
Further ToDos are:
* explain how power plants are modeled, if possible use a link
* explain where to find and adjust power flow analysis defining
parameters
See Also
--------
:func:`~.tools.pypsa_io.to_pypsa`
Translator to PyPSA data format |
1,683 | def reload(self, **kwargs):
frame = self.one({: self._id}, **kwargs)
self._document = frame._document | Reload the document |
1,684 | def _decrypt_data_key(self, encrypted_data_key, algorithm, encryption_context=None):
kms_params = {"CiphertextBlob": encrypted_data_key.encrypted_data_key}
if encryption_context:
kms_params["EncryptionContext"] = encryption_context
if self.config.grant_tokens:
kms_params["GrantTokens"] = self.config.grant_tokens
try:
response = self.config.client.decrypt(**kms_params)
plaintext = response["Plaintext"]
except (ClientError, KeyError):
error_message = "Master Key {key_id} unable to decrypt data key".format(key_id=self._key_id)
_LOGGER.exception(error_message)
raise DecryptKeyError(error_message)
return DataKey(
key_provider=self.key_provider, data_key=plaintext, encrypted_data_key=encrypted_data_key.encrypted_data_key
) | Decrypts an encrypted data key and returns the plaintext.
:param data_key: Encrypted data key
:type data_key: aws_encryption_sdk.structures.EncryptedDataKey
:type algorithm: `aws_encryption_sdk.identifiers.Algorithm` (not used for KMS)
:param dict encryption_context: Encryption context to use in decryption
:returns: Decrypted data key
:rtype: aws_encryption_sdk.structures.DataKey
:raises DecryptKeyError: if Master Key is unable to decrypt data key |
1,685 | def saveNetworkToFile(self, filename, makeWrapper = 1, mode = "pickle", counter = None):
if "?" in filename:
import re
char = "?"
match = re.search(re.escape(char) + "+", filename)
if match:
num = self.epoch
if counter != None:
num = counter
elif self.totalEpoch != 0:
num = self.totalEpoch
fstring = "%%0%dd" % len(match.group())
filename = filename[:match.start()] + \
fstring % num + \
filename[match.end():]
self.lastAutoSaveNetworkFilename = filename
if mode == "pickle":
import pickle
basename = filename.split()[0]
filename += ".pickle"
fp = open(filename, )
pickle.dump(self, fp)
fp.close()
if makeWrapper:
fp = open(basename + ".py", "w")
fp.write("from pyrobot.brain.conx import *\n")
fp.write("import pickle\n")
fp.write("fp = open(, )\n" % filename)
fp.write("network = pickle.load(fp)")
fp.close()
print("To load network:")
print(" %% python -i %s " % (basename + ".py"))
print(" >>> network.train()
print("--- OR ---")
print(" % python")
print(" >>> from pyrobot.brain.conx import *")
print(" >>> network = loadNetwork(%s)" % filename)
print(" >>> network.train()
elif mode in ["plain", "conx"]:
fp = open(filename, "w")
fp.write("network, %s\n" % (self.__class__.__name__))
for layer in self.layers:
fp.write("layer, %s, %s\n" % (layer.name, layer.size))
for i in range(layer.size):
fp.write("%f " % layer.weight[i])
fp.write("\n")
for connection in self.connections:
fp.write("connection, %s, %s\n" %(connection.fromLayer.name, connection.toLayer.name))
for i in range(connection.fromLayer.size):
for j in range(connection.toLayer.size):
fp.write("%f " % connection.weight[i][j])
fp.write("\n")
fp.close() | Deprecated. |
1,686 | def StyleFactory(style_elm):
style_cls = {
WD_STYLE_TYPE.PARAGRAPH: _ParagraphStyle,
WD_STYLE_TYPE.CHARACTER: _CharacterStyle,
WD_STYLE_TYPE.TABLE: _TableStyle,
WD_STYLE_TYPE.LIST: _NumberingStyle
}[style_elm.type]
return style_cls(style_elm) | Return a style object of the appropriate |BaseStyle| subclass, according
to the type of *style_elm*. |
1,687 | def __QueryFeed(self,
path,
type,
id,
result_fn,
create_fn,
query,
options=None,
partition_key_range_id=None):
if options is None:
options = {}
if query:
__GetBodiesFromQueryResult = result_fn
else:
def __GetBodiesFromQueryResult(result):
if result is not None:
return [create_fn(self, body) for body in result_fn(result)]
else:
return []
initial_headers = self.default_headers.copy()
path,
id,
type,
options,
partition_key_range_id)
result, self.last_response_headers = self.__Post(path,
request,
query,
headers)
return __GetBodiesFromQueryResult(result) | Query for more than one Azure Cosmos resources.
:param str path:
:param str type:
:param str id:
:param function result_fn:
:param function create_fn:
:param (str or dict) query:
:param dict options:
The request options for the request.
:param str partition_key_range_id:
Specifies partition key range id.
:rtype:
list
:raises SystemError: If the query compatibility mode is undefined. |
1,688 | def toxml(self):
return +\
(.format(self.name) if self.name else ) +\
(.format(self.symbol) if self.symbol else ) +\
(.format(self.value) if self.value else ) +\
(.format(self.dimension) if self.dimension else ) +\
(.format(self.description) if self.description else ) +\
| Exports this object into a LEMS XML object |
1,689 | def authenticate_direct_credentials(self, username, password):
bind_user = .format(
self.config.get(),
username,
self.config.get()
)
connection = self._make_connection(
bind_user=bind_user,
bind_password=password,
)
response = AuthenticationResponse()
try:
connection.bind()
response.status = AuthenticationResponseStatus.success
response.user_id = username
log.debug(
"Authentication was successful for user ".format(username))
if self.config.get():
user_filter = .format(
search_attr=self.config.get(),
username=username
)
search_filter = .format(
self.config.get(),
user_filter,
)
connection.search(
search_base=self.full_user_search_dn,
search_filter=search_filter,
search_scope=getattr(
ldap3, self.config.get()),
attributes=self.config.get(),
)
if len(connection.response) == 0 or \
(self.config.get() and
len(connection.response) > 1):
response.status = AuthenticationResponseStatus.fail
except Exception as e:
log.error(e)
response.status = AuthenticationResponseStatus.fail
self.destroy_connection(connection)
return response | Performs a direct bind, however using direct credentials. Can be used
if interfacing with an Active Directory domain controller which
authenticates using [email protected] directly.
Performing this kind of lookup limits the information we can get from
ldap. Instead we can only deduce whether or not their bind was
successful. Do not use this method if you require more user info.
Args:
username (str): Username for the user to bind with.
LDAP_BIND_DIRECT_PREFIX will be prepended and
LDAP_BIND_DIRECT_SUFFIX will be appended.
password (str): User's password to bind with.
Returns:
AuthenticationResponse |
1,690 | def describe_instance_health(self, load_balancer_name, instances=None):
params = { : load_balancer_name}
if instances:
self.build_list_params(params, instances,
)
return self.get_list(, params,
[(, InstanceState)]) | Get current state of all Instances registered to an Load Balancer.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type instances: List of strings
:param instances: The instance ID's of the EC2 instances
to return status for. If not provided,
the state of all instances will be returned.
:rtype: List of :class:`boto.ec2.elb.instancestate.InstanceState`
:return: list of state info for instances in this Load Balancer. |
1,691 | def decr(name, value=1, rate=1, tags=None):
client().decr(name, value, rate, tags) | Decrement a metric by value.
>>> import statsdecor
>>> statsdecor.decr('my.metric') |
1,692 | def available(name):
*
path = .format(name)
return os.path.isfile(path) and os.access(path, os.X_OK) | .. versionadded:: 2014.7.0
Returns ``True`` if the specified service is available, otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt '*' service.available sshd |
1,693 | def start_capture(self, port_number, output_file, data_link_type="DLT_EN10MB"):
if port_number not in self._mappings:
raise DynamipsError("Port {} is not allocated".format(port_number))
nio = self._mappings[port_number]
data_link_type = data_link_type.lower()
if data_link_type.startswith("dlt_"):
data_link_type = data_link_type[4:]
if nio.input_filter[0] is not None and nio.output_filter[0] is not None:
raise DynamipsError("Port {} has already a filter applied".format(port_number))
yield from nio.bind_filter("both", "capture")
yield from nio.setup_filter("both", .format(data_link_type, output_file))
log.info(.format(name=self._name,
id=self._id,
port=port_number)) | Starts a packet capture.
:param port_number: allocated port number
:param output_file: PCAP destination file for the capture
:param data_link_type: PCAP data link type (DLT_*), default is DLT_EN10MB |
1,694 | def _Execute(self, options):
whitelist = dict(
name=options["name"],
description=options.get("description", "<empty>"))
return self._agent.client.compute.security_groups.create(**whitelist) | Handles security groups operations. |
1,695 | def union(self, other):
if not isinstance(other, self.__class__):
m = "You can only union striplogs with each other."
raise StriplogError(m)
result = []
for iv in deepcopy(self):
for jv in other:
if iv.any_overlaps(jv):
iv = iv.union(jv)
result.append(iv)
return Striplog(result) | Makes a striplog of all unions.
Args:
Striplog. The striplog instance to union with.
Returns:
Striplog. The result of the union. |
1,696 | def createSystem(self, topology, nonbondedMethod=NoCutoff,
nonbondedCutoff=1.0 * u.nanometer, constraints=None,
rigidWater=True, removeCMMotion=True, hydrogenMass=None,
**args):
self._SystemData = app.ForceField._SystemData()
data = self._SystemData
data.atoms = list(topology.atoms())
for atom in data.atoms:
data.excludeAtomWith.append([])
for bond in topology.bonds():
data.bonds.append(app.ForceField._BondData(bond[0].index, bond[1].index))
bonded_to_atom = []
for i in range(len(data.atoms)):
bonded_to_atom.append(set())
data.atomBonds.append([])
for i in range(len(data.bonds)):
bond = data.bonds[i]
bonded_to_atom[bond.atom1].add(bond.atom2)
bonded_to_atom[bond.atom2].add(bond.atom1)
data.atomBonds[bond.atom1].append(i)
data.atomBonds[bond.atom2].append(i)
nonbonded_params = None
for generator in self.getGenerators():
if isinstance(generator, NonbondedGenerator):
nonbonded_params = generator.params.paramsForType
break
for chain in topology.chains():
for res in chain.residues():
for atom in res.atoms():
data.atomType[atom] = atom.id
if nonbonded_params:
params = nonbonded_params[atom.id]
data.atomParameters[atom] = params
sys = mm.System()
for atom in topology.atoms():
if atom not in data.atomType:
raise Exception("Could not identify atom type for atom ." % str(atom))
typename = data.atomType[atom]
if typename not in self._atomTypes:
msg = "Could not find typename for atom in list of known atom types.\n" % (typename, str(atom))
msg += "Known atom types are: %s" % str(self._atomTypes.keys())
raise Exception(msg)
mass = self._atomTypes[typename].mass
sys.addParticle(mass)
if hydrogenMass is not None:
if not u.is_quantity(hydrogenMass):
hydrogenMass *= u.dalton
for atom1, atom2 in topology.bonds():
if atom1.element == elem.hydrogen:
(atom1, atom2) = (atom2, atom1)
if atom2.element == elem.hydrogen and atom1.element not in (elem.hydrogen, None):
transfer_mass = hydrogenMass - sys.getParticleMass(atom2.index)
sys.setParticleMass(atom2.index, hydrogenMass)
mass = sys.getParticleMass(atom1.index) - transfer_mass
sys.setParticleMass(atom1.index, mass)
box_vectors = topology.getPeriodicBoxVectors()
if box_vectors is not None:
sys.setDefaultPeriodicBoxVectors(box_vectors[0],
box_vectors[1],
box_vectors[2])
elif nonbondedMethod not in [NoCutoff, CutoffNonPeriodic]:
raise ValueError(
)
unique_angles = set()
for bond in data.bonds:
for atom in bonded_to_atom[bond.atom1]:
if atom != bond.atom2:
if atom < bond.atom2:
unique_angles.add((atom, bond.atom1, bond.atom2))
else:
unique_angles.add((bond.atom2, bond.atom1, atom))
for atom in bonded_to_atom[bond.atom2]:
if atom != bond.atom1:
if atom > bond.atom1:
unique_angles.add((bond.atom1, bond.atom2, atom))
else:
unique_angles.add((atom, bond.atom2, bond.atom1))
data.angles = sorted(list(unique_angles))
unique_propers = set()
for angle in data.angles:
for atom in bonded_to_atom[angle[0]]:
if atom not in angle:
if atom < angle[2]:
unique_propers.add((atom, angle[0], angle[1], angle[2]))
else:
unique_propers.add((angle[2], angle[1], angle[0], atom))
for atom in bonded_to_atom[angle[2]]:
if atom not in angle:
if atom > angle[0]:
unique_propers.add((angle[0], angle[1], angle[2], atom))
else:
unique_propers.add((atom, angle[2], angle[1], angle[0]))
data.propers = sorted(list(unique_propers))
for atom in range(len(bonded_to_atom)):
bonded_to = bonded_to_atom[atom]
if len(bonded_to) > 2:
for subset in itertools.combinations(bonded_to, 3):
data.impropers.append((atom, subset[0], subset[1], subset[2]))
if constraints == AllBonds or constraints == HAngles:
for bond in data.bonds:
bond.isConstrained = True
elif constraints == HBonds:
for bond in data.bonds:
atom1 = data.atoms[bond.atom1]
atom2 = data.atoms[bond.atom2]
bond.isConstrained = atom1.name.startswith() or atom2.name.startswith()
if rigidWater:
for bond in data.bonds:
atom1 = data.atoms[bond.atom1]
atom2 = data.atoms[bond.atom2]
if atom1.residue.name == and atom2.residue.name == :
bond.isConstrained = True
if constraints == HAngles:
for angle in data.angles:
atom1 = data.atoms[angle[0]]
atom2 = data.atoms[angle[1]]
atom3 = data.atoms[angle[2]]
numH = 0
if atom1.name.startswith():
numH += 1
if atom3.name.startswith():
numH += 1
data.isAngleConstrained.append(numH == 2 or (numH == 1 and atom2.name.startswith()))
else:
data.isAngleConstrained = len(data.angles)*[False]
if rigidWater:
for i in range(len(data.angles)):
angle = data.angles[i]
atom1 = data.atoms[angle[0]]
atom2 = data.atoms[angle[1]]
atom3 = data.atoms[angle[2]]
if atom1.residue.name == and atom2.residue.name == and atom3.residue.name == :
data.isAngleConstrained[i] = True
for atom in data.virtualSites:
(site, atoms, excludeWith) = data.virtualSites[atom]
index = atom.index
data.excludeAtomWith[excludeWith].append(index)
if site.type == :
sys.setVirtualSite(index, mm.TwoParticleAverageSite(
atoms[0], atoms[1], site.weights[0], site.weights[1]))
elif site.type == :
sys.setVirtualSite(index, mm.ThreeParticleAverageSite(
atoms[0], atoms[1], atoms[2],
site.weights[0], site.weights[1], site.weights[2]))
elif site.type == :
sys.setVirtualSite(index, mm.OutOfPlaneSite(
atoms[0], atoms[1], atoms[2],
site.weights[0], site.weights[1], site.weights[2]))
elif site.type == :
local_coord_site = mm.LocalCoordinatesSite(
atoms[0], atoms[1], atoms[2],
mm.Vec3(site.originWeights[0], site.originWeights[1], site.originWeights[2]),
mm.Vec3(site.xWeights[0], site.xWeights[1], site.xWeights[2]),
mm.Vec3(site.yWeights[0], site.yWeights[1], site.yWeights[2]),
mm.Vec3(site.localPos[0], site.localPos[1], site.localPos[2]))
sys.setVirtualSite(index, local_coord_site)
for force in self._forces:
force.createForce(sys, data, nonbondedMethod, nonbondedCutoff, args)
if removeCMMotion:
sys.addForce(mm.CMMotionRemover())
for force in self._forces:
if in dir(force):
force.postprocessSystem(sys, data, args)
for script in self._scripts:
exec(script, locals())
return sys | Construct an OpenMM System representing a Topology with this force field.
Parameters
----------
topology : Topology
The Topology for which to create a System
nonbondedMethod : object=NoCutoff
The method to use for nonbonded interactions. Allowed values are
NoCutoff, CutoffNonPeriodic, CutoffPeriodic, Ewald, or PME.
nonbondedCutoff : distance=1*nanometer
The cutoff distance to use for nonbonded interactions
constraints : object=None
Specifies which bonds and angles should be implemented with constraints.
Allowed values are None, HBonds, AllBonds, or HAngles.
rigidWater : boolean=True
If true, water molecules will be fully rigid regardless of the value
passed for the constraints argument
removeCMMotion : boolean=True
If true, a CMMotionRemover will be added to the System
hydrogenMass : mass=None
The mass to use for hydrogen atoms bound to heavy atoms. Any mass
added to a hydrogen is subtracted from the heavy atom to keep
their total mass the same.
args
Arbitrary additional keyword arguments may also be specified.
This allows extra parameters to be specified that are specific to
particular force fields.
Returns
-------
system
the newly created System |
1,697 | def select_key(self, key, bucket_name=None,
expression=,
expression_type=,
input_serialization=None,
output_serialization=None):
if input_serialization is None:
input_serialization = {: {}}
if output_serialization is None:
output_serialization = {: {}}
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
response = self.get_conn().select_object_content(
Bucket=bucket_name,
Key=key,
Expression=expression,
ExpressionType=expression_type,
InputSerialization=input_serialization,
OutputSerialization=output_serialization)
return .join(event[][].decode()
for event in response[]
if in event) | Reads a key with S3 Select.
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which the file is stored
:type bucket_name: str
:param expression: S3 Select expression
:type expression: str
:param expression_type: S3 Select expression type
:type expression_type: str
:param input_serialization: S3 Select input data serialization format
:type input_serialization: dict
:param output_serialization: S3 Select output data serialization format
:type output_serialization: dict
:return: retrieved subset of original data by S3 Select
:rtype: str
.. seealso::
For more details about S3 Select parameters:
http://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.select_object_content |
1,698 | def paste_to_current_cell(self, tl_key, data, freq=None):
self.pasting = True
grid_rows, grid_cols, __ = self.grid.code_array.shape
self.need_abort = False
tl_row, tl_col, tl_tab = self._get_full_key(tl_key)
row_overflow = False
col_overflow = False
no_pasted_cells = 0
for src_row, row_data in enumerate(data):
target_row = tl_row + src_row
if self.grid.actions._is_aborted(src_row, _("Pasting cells... "),
freq=freq):
self._abort_paste()
return False
if target_row >= grid_rows:
row_overflow = True
break
for src_col, cell_data in enumerate(row_data):
target_col = tl_col + src_col
if target_col >= grid_cols:
col_overflow = True
break
if cell_data is not None:
key = target_row, target_col, tl_tab
try:
CellActions.set_code(self, key, cell_data)
no_pasted_cells += 1
except KeyError:
pass
if row_overflow or col_overflow:
self._show_final_overflow_message(row_overflow, col_overflow)
else:
self._show_final_paste_message(tl_key, no_pasted_cells)
self.pasting = False | Pastes data into grid from top left cell tl_key
Parameters
----------
ul_key: Tuple
\key of top left cell of paste area
data: iterable of iterables where inner iterable returns string
\tThe outer iterable represents rows
freq: Integer, defaults to None
\tStatus message frequency |
1,699 | def cli(env, sortby, columns, datacenter, username, storage_type):
file_manager = SoftLayer.FileStorageManager(env.client)
file_volumes = file_manager.list_file_volumes(datacenter=datacenter,
username=username,
storage_type=storage_type,
mask=columns.mask())
table = formatting.Table(columns.columns)
table.sortby = sortby
for file_volume in file_volumes:
table.add_row([value or formatting.blank()
for value in columns.row(file_volume)])
env.fout(table) | List file storage. |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.