Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
385,600 | def push_channel(self, content, channel, content_url=None):
parameters = {
: self.app_key,
: self.app_secret,
: channel
}
return self._push(content, , parameters, content_url) | Push a notification to a Pushed channel.
Param: content -> content of Pushed notification message
channel -> string identifying a Pushed channel
content_url (optional) -> enrich message with URL
Returns Shipment ID as string |
385,601 | def singleton(cls):
instances = {}
def get_instance(*args, **kwargs):
if cls not in instances:
instances[cls] = cls(*args, **kwargs)
return instances[cls]
return get_instance | See <Singleton> design pattern for detail: http://www.oodesign.com/singleton-pattern.html
Python <Singleton> reference: http://stackoverflow.com/questions/6760685/creating-a-singleton-in-python
Recommend use Singleton as a metaclass
Usage:
@singleton
class MyClass(object):
pass |
385,602 | def remove_ancestors_of(self, node):
if isinstance(node, int):
warnings.warn(
,
DeprecationWarning, 2)
node = self._id_to_node[node]
anc = nx.ancestors(self._multi_graph, node)
for anc_node in anc:
if anc_node.type == "op":
self.remove_op_node(anc_node) | Remove all of the ancestor operation nodes of node. |
385,603 | def _LegacyCheckHashesWithFileStore(self):
if not self.state.pending_hashes:
return
file_hashes = {}
hash_to_tracker = {}
for index, tracker in iteritems(self.state.pending_hashes):
if tracker.get("hash_obj") is None:
continue
hash_obj = tracker["hash_obj"]
digest = hash_obj.sha256
file_hashes[index] = hash_obj
hash_to_tracker.setdefault(digest, []).append(tracker)
files_in_filestore = {}
filestore_obj = aff4.FACTORY.Open(
legacy_filestore.FileStore.PATH,
legacy_filestore.FileStore,
mode="r",
token=self.token)
for file_store_urn, hash_obj in filestore_obj.CheckHashes(
itervalues(file_hashes), external=self.state.use_external_stores):
for tracker in hash_to_tracker[hash_obj.sha256]:
self.state.files_skipped += 1
file_hashes.pop(tracker["index"])
files_in_filestore[file_store_urn] = hash_obj
self.state.pending_hashes.pop(tracker["index"])
self.state.files_hashed_since_check = 0
for filestore_file_urn, hash_obj in iteritems(files_in_filestore):
for file_tracker in hash_to_tracker.get(hash_obj.sha256, []):
stat_entry = file_tracker["stat_entry"]
target_urn = stat_entry.pathspec.AFF4Path(self.client_urn)
aff4.FACTORY.Copy(
filestore_file_urn, target_urn, update_timestamps=True)
with aff4.FACTORY.Open(
target_urn, mode="rw", token=self.token) as new_fd:
new_fd.Set(new_fd.Schema.STAT, stat_entry)
if new_fd.size == 0:
new_fd.size = (file_tracker["bytes_read"] or stat_entry.st_size)
if data_store.RelationalDBEnabled():
path_info = rdf_objects.PathInfo.FromStatEntry(stat_entry)
path_info.hash_entry = hash_obj
data_store.REL_DB.WritePathInfos(self.client_id, [path_info])
filestore_obj.AddURNToIndex(str(hash_obj.sha256), target_urn)
self._ReceiveFetchedFile(file_tracker)
for index in file_hashes:
file_tracker = self.state.pending_hashes.pop(index)
self.state.pending_files[index] = file_tracker
if file_tracker["bytes_read"] > 0:
file_tracker["size_to_download"] = file_tracker["bytes_read"]
else:
file_tracker["size_to_download"] = file_tracker["stat_entry"].st_size
expected_number_of_hashes = (
file_tracker["size_to_download"] // self.CHUNK_SIZE + 1)
self.state.files_to_fetch += 1
for i in range(expected_number_of_hashes):
if i == expected_number_of_hashes - 1:
length = file_tracker["size_to_download"] % self.CHUNK_SIZE
else:
length = self.CHUNK_SIZE
self.CallClient(
server_stubs.HashBuffer,
pathspec=file_tracker["stat_entry"].pathspec,
offset=i * self.CHUNK_SIZE,
length=length,
next_state="CheckHash",
request_data=dict(index=index))
if self.state.files_hashed % 100 == 0:
self.Log("Hashed %d files, skipped %s already stored.",
self.state.files_hashed, self.state.files_skipped) | Check all queued up hashes for existence in file store (legacy).
Hashes which do not exist in the file store will be downloaded. This
function flushes the entire queue (self.state.pending_hashes) in order to
minimize the round trips to the file store.
If a file was found in the file store it is copied from there into the
client's VFS namespace. Otherwise, we request the client to hash every block
in the file, and add it to the file tracking queue
(self.state.pending_files). |
385,604 | def lookup(self, query=):
res = []
query = re.compile( % re.escape(query), self.reflags)
for name, email in self.get_contacts():
if query.match(name) or query.match(email):
res.append((name, email))
return res | looks up all contacts where name or address match query |
385,605 | def _get_image_workaround_seek(self, idx):
warnings.warn("imageio workaround used!")
cap = self.video_handle
mult = 50
for ii in range(idx//mult):
cap.get_data(ii*mult)
final = cap.get_data(idx)
return final | Same as __getitem__ but seek through the video beforehand
This is a workaround for an all-zero image returned by `imageio`. |
385,606 | def convert_units(self, desired, guess=False):
units._convert_units(self, desired, guess)
return self | Convert the units of the mesh into a specified unit.
Parameters
----------
desired : string
Units to convert to (eg 'inches')
guess : boolean
If self.units are not defined should we
guess the current units of the document and then convert? |
385,607 | def mine(self):
if PyFunceble.CONFIGURATION["mining"]:
try:
history = PyFunceble.requests.get(
self.to_get,
timeout=PyFunceble.CONFIGURATION["seconds_before_http_timeout"],
headers=self.headers,
).history
mined = {self.to_get_bare: []}
for element in history:
element = element.url
if PyFunceble.INTERN["to_test_type"] == "url":
to_append = Check().is_url_valid(element, return_base=False)
elif PyFunceble.INTERN["to_test_type"] == "domain":
to_append = Check().is_url_valid(element, return_base=True)
else:
raise Exception("Unknown tested.")
if to_append:
if to_append.endswith(":80"):
to_append = to_append[:-3]
if to_append != self.to_get_bare:
mined[self.to_get_bare].append(to_append)
if mined[self.to_get_bare]:
return mined
return None
except (
PyFunceble.requests.ConnectionError,
PyFunceble.requests.exceptions.Timeout,
PyFunceble.requests.exceptions.InvalidURL,
PyFunceble.socket.timeout,
urllib3_exceptions.InvalidHeader,
UnicodeDecodeError,
):
return None
return None | Search for domain or URL related to the original URL or domain.
:return: The mined domains or URL.
:rtype: dict |
385,608 | def _write_comparison_plot_table(spid, models, options, core_results,
fit_results):
is_curve = in core_results[0][1]
df = core_results[spid][1]
df.rename(columns={: }, inplace=True)
if not is_curve:
x = np.arange(len(df)) + 1
df = df.sort(columns=)
df.insert(0, , x[::-1])
for model in models:
fit_result = fit_results[spid][model]
df[model] = fit_result[1]
df[model + "_residual"] = df[model] - df[]
if is_curve:
df = df.sort(columns=)
f_path = _get_file_path(spid, options, )
p_path = _get_file_path(spid, options, )
df.to_csv(f_path, index=False, float_format=)
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.scatter(df[], df[], color=)
ax1.plot(df[], df[models])
ax1.legend(models + [], loc=)
ax1.set_xlabel()
ax1.set_ylabel()
ax2.hlines(0, np.min(df[]), np.max(df[]))
ax2.plot(df[], df[[x + for x in models]])
ax2.legend(models + [], loc=)
ax2.set_xlabel()
ax2.set_ylabel()
ax2.set_xlim(ax1.get_xlim())
ax2.set_ylim(min(ax2.get_ylim()[0], -1), max(ax2.get_ylim()[1], 1))
if options.get(, None):
ax1.set_yscale()
ax2.set_yscale(, linthreshy=1)
if options.get(, None):
ax1.set_xscale()
ax2.set_xscale()
if not options.get(, None) and not options.get(, None):
ax1.set_ylim(bottom=0)
ax1.set_xlim(left=0)
ax1 = _pad_plot_frame(ax1)
ax2 = _pad_plot_frame(ax2)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fig.tight_layout()
fig.savefig(p_path)
plt.close() | Notes
-----
Only applies to analysis using functions from empirical in which models are
also given. |
385,609 | def _pload(offset, size):
output = []
indirect = offset[0] ==
if indirect:
offset = offset[1:]
I = int(offset)
if I >= 0:
I += 4 + (size % 2 if not indirect else 0)
ix_changed = (indirect or size < 5) and (abs(I) + size) > 127
if ix_changed:
output.append()
output.append( % I)
output.append()
I = 0
elif size == 5:
output.append()
output.append()
output.append( % I)
output.append()
I = 0
if indirect:
output.append( % (I + 1))
output.append( % I)
if size == 1:
output.append()
elif size == 2:
output.append()
output.append()
output.append()
output.append()
elif size == 4:
output.append()
REQUIRES.add()
else:
output.append()
REQUIRES.add()
else:
if size == 1:
output.append( % I)
else:
if size <= 4:
output.append( % I)
output.append( % (I + 1))
if size > 2:
output.append( % (I + 2))
output.append( % (I + 3))
else:
output.append()
REQUIRES.add()
if ix_changed:
output.append()
return output | Generic parameter loading.
Emmits output code for setting IX at the right location.
size = Number of bytes to load:
1 => 8 bit value
2 => 16 bit value / string
4 => 32 bit value / f16 value
5 => 40 bit value |
385,610 | def iterfd(fd):
surrogatepass
unpk = msgpack.Unpacker(fd, **unpacker_kwargs)
for mesg in unpk:
yield mesg | Generator which unpacks a file object of msgpacked content.
Args:
fd: File object to consume data from.
Notes:
String objects are decoded using utf8 encoding. In order to handle
potentially malformed input, ``unicode_errors='surrogatepass'`` is set
to allow decoding bad input strings.
Yields:
Objects from a msgpack stream. |
385,611 | def toggle(self, rows):
for r in Progress(rows, , total=len(self.rows)):
if not self.unselectRow(r):
self.selectRow(r) | Toggle selection of given `rows`. |
385,612 | def get_index(self, field_name, catalog):
index = catalog.Indexes.get(field_name, None)
if not index and field_name == "Title":
return self.get_index("sortable_title", catalog)
return index | Returns the index of the catalog for the given field_name, if any |
385,613 | def dirsplit(path):
r
parts = []
remain = path
part = True
while part != and remain != :
remain, part = split(remain)
parts.append(part)
parts = [p for p in parts if p != ]
if remain != :
parts.append(remain)
parts = parts[::-1]
return parts | r"""
Args:
path (str):
Returns:
list: components of the path
CommandLine:
python -m utool.util_path --exec-dirsplit
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> paths = []
>>> paths.append('E:/window file/foo')
>>> paths.append('/normal/foo')
>>> paths.append('~/relative/path')
>>> results = [dirsplit(path) for path in paths]
>>> import re
>>> results2 = [re.split('\\/', path) for path in paths]
>>> print(results2)
>>> result = ut.repr2(results)
>>> print(result) |
385,614 | def get_token_func():
print("{}: token updater was triggered".format(datetime.datetime.now()))
context = adal.AuthenticationContext(
str.format("https://login.microsoftonline.com/{}", settings.ACTIVE_DIRECTORY_TENANT_ID),
api_version=None, validate_authority=True)
oauth_token = context.acquire_token_with_client_credentials(
"https://storage.azure.com",
settings.ACTIVE_DIRECTORY_APPLICATION_ID,
settings.ACTIVE_DIRECTORY_APPLICATION_SECRET)
return oauth_token[], oauth_token[] - 180 | This function makes a call to AAD to fetch an OAuth token
:return: the OAuth token and the interval to wait before refreshing it |
385,615 | def list_all_customers(cls, **kwargs):
kwargs[] = True
if kwargs.get():
return cls._list_all_customers_with_http_info(**kwargs)
else:
(data) = cls._list_all_customers_with_http_info(**kwargs)
return data | List Customers
Return a list of Customers
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_customers(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[Customer]
If the method is called asynchronously,
returns the request thread. |
385,616 | def libvlc_media_get_mrl(p_md):
f = _Cfunctions.get(, None) or \
_Cfunction(, ((1,),), string_result,
ctypes.c_void_p, Media)
return f(p_md) | Get the media resource locator (mrl) from a media descriptor object.
@param p_md: a media descriptor object.
@return: string with mrl of media descriptor object. |
385,617 | def update_url(self, url=None, regex=None):
if not url and not regex:
raise ValueError("Neither a url or regex was provided to update_url.")
headers = {
: self.token,
: ,
}
data = {
: settings.PRERENDER_TOKEN,
}
if url:
data["url"] = url
if regex:
data["regex"] = regex
r = self.session.post(self.RECACHE_URL, headers=headers, data=data)
return r.status_code < 500 | Accepts a fully-qualified url, or regex.
Returns True if successful, False if not successful. |
385,618 | def _get_component_from_result(self, result, lookup):
for component in result[]:
if lookup[] in component[]:
return component.get(lookup[], )
return | Helper function to get a particular address component from a Google result.
Since the address components in results are an array of objects containing a types array,
we have to search for a particular component rather than being able to look it up directly.
Returns the first match, so this should be used for unique component types (e.g.
'locality'), not for categories (e.g. 'political') that can describe multiple components.
:arg dict result: A results dict with an 'address_components' key, as returned by the
Google geocoder.
:arg dict lookup: The type (e.g. 'street_number') and key ('short_name' or 'long_name') of
the desired address component value.
:returns: address component or empty string |
385,619 | def update_machine_state(state_path):
charmhelpers.contrib.templating.contexts.juju_state_to_yaml(
salt_grains_path)
subprocess.check_call([
,
,
,
state_path,
]) | Update the machine state using the provided state declaration. |
385,620 | def friendly_load(parser, token):
bits = token.contents.split()
if len(bits) >= 4 and bits[-2] == "from":
name = bits[-1]
try:
lib = find_library(parser, name)
subset = load_from_library(lib, name, bits[1:-2])
parser.add_library(subset)
except TemplateSyntaxError:
pass
else:
for name in bits[1:]:
try:
lib = find_library(parser, name)
parser.add_library(lib)
except TemplateSyntaxError:
pass
return LoadNode() | Tries to load a custom template tag set. Non existing tag libraries
are ignored.
This means that, if used in conjunction with ``if_has_tag``, you can try to
load the comments template tag library to enable comments even if the
comments framework is not installed.
For example::
{% load friendly_loader %}
{% friendly_load comments webdesign %}
{% if_has_tag render_comment_list %}
{% render_comment_list for obj %}
{% else %}
{% if_has_tag lorem %}
{% lorem %}
{% endif_has_tag %}
{% endif_has_tag %} |
385,621 | def _update_pwm(self):
if self._is_on:
values = self._get_pwm_values()
else:
values = [0] * len(self._driver.pins)
self._driver.set_pwm(values) | Update the pwm values of the driver regarding the current state. |
385,622 | def fill_package(app_name, build_dir=None, install_dir=None):
zip_path = os.path.join(install_dir, % app_name)
with zipfile.ZipFile(zip_path, ) as zip_file:
fill_package_zip(zip_file, os.path.dirname(build_dir), prefix=app_name)
return zip_path | Creates the theme package (.zip) from templates and optionally
assets installed in the ``build_dir``. |
385,623 | def _path_to_id(path):
if path.endswith("/"):
path = path[:-1]
return os.path.basename(path) | Name of the root directory is used as ``<packageid>`` in ``info.xml``.
This function makes sure, that :func:`os.path.basename` doesn't return
blank string in case that there is `/` at the end of the `path`.
Args:
path (str): Path to the root directory.
Returns:
str: Basename of the `path`. |
385,624 | def as_dtype(type_value):
if isinstance(type_value, DType):
return type_value
try:
return _INTERN_TABLE[type_value]
except KeyError:
pass
try:
return _STRING_TO_TF[type_value]
except KeyError:
pass
try:
return _PYTHON_TO_TF[type_value]
except KeyError:
pass
if isinstance(type_value, np.dtype):
if type_value.type == np.string_ or type_value.type == np.unicode_:
return string
if isinstance(type_value, (type, np.dtype)):
for key, val in _NP_TO_TF:
try:
if key == type_value:
return val
except TypeError as e:
raise TypeError(
"Cannot convert {} to a dtype. {}".format(type_value, e)
)
raise TypeError("Cannot convert value %r to a TensorFlow DType." % type_value) | Converts the given `type_value` to a `DType`.
Args:
type_value: A value that can be converted to a `tf.DType` object. This may
currently be a `tf.DType` object, a [`DataType`
enum](https://www.tensorflow.org/code/tensorflow/core/framework/types.proto),
a string type name, or a `numpy.dtype`.
Returns:
A `DType` corresponding to `type_value`.
Raises:
TypeError: If `type_value` cannot be converted to a `DType`. |
385,625 | def _make_concept(self, entity):
name = self._sanitize(entity[])
db_refs = _get_grounding(entity)
concept = Concept(name, db_refs=db_refs)
metadata = {arg[]: arg[][]
for arg in entity[]}
return concept, metadata | Return Concept from a Hume entity. |
385,626 | def populate(self, compound_dict=None, x=1, y=1, z=1):
error_dict = {0: , 1: , 2: }
try:
x = int(x)
y = int(y)
z = int(z)
except (ValueError, TypeError):
raise ValueError(
.format(x, y, z))
for replication_amount in x, y, z:
if replication_amount is None:
raise ValueError(
)
for replication_amount, index in zip([x, y, z], range(3)):
if replication_amount < 1:
raise ValueError(
.format(error_dict[index],
replication_amount))
if ((isinstance(compound_dict, dict)) or (compound_dict is None)):
pass
else:
raise TypeError(
.format(type(compound_dict)))
cell = defaultdict(list)
[a, b, c] = self.lattice_spacing
transform_mat = self.lattice_vectors
transform_mat = np.asarray(transform_mat, dtype=np.float64)
transform_mat = np.reshape(transform_mat, newshape=(3,3))
norms = np.linalg.norm(transform_mat, axis=1)
unit_vecs = np.divide(transform_mat.transpose(), norms)
for key, locations in self.lattice_points.items():
for coords in locations:
for replication in it.product(range(x), range(y), range(z)):
temp_location = list()
new_coords = np.asarray(coords, dtype=np.float64)
new_coords = np.reshape(new_coords, (1, 3), order=)
new_coords[0][0] = new_coords[0][0] + replication[0]
new_coords[0][1] = new_coords[0][1] + replication[1]
new_coords[0][2] = new_coords[0][2] + replication[2]
new_coords = np.dot(unit_vecs, new_coords.transpose())
new_coords[0] = new_coords[0] * a
new_coords[1] = new_coords[1] * b
new_coords[2] = new_coords[2] * c
new_coords = np.reshape(new_coords, (1, 3), order=)
tuple_of_coords = tuple(new_coords.flatten())
cell[key].append(tuple_of_coords)
ret_lattice = mb.Compound()
if compound_dict is None:
for key_id, all_pos in cell.items():
particle = mb.Compound(name=key_id, pos=[0, 0, 0])
for pos in all_pos:
particle_to_add = mb.clone(particle)
particle_to_add.translate_to(list(pos))
ret_lattice.add(particle_to_add)
else:
for key_id, all_pos in cell.items():
if isinstance(compound_dict[key_id], mb.Compound):
compound_to_move = compound_dict[key_id]
for pos in all_pos:
tmp_comp = mb.clone(compound_to_move)
tmp_comp.translate_to(list(pos))
ret_lattice.add(tmp_comp)
else:
err_type = type(compound_dict.get(key_id))
raise TypeError(
.format(key_id, err_type))
ret_lattice.periodicity = np.asarray([a * x, b * y, c * z], dtype=np.float64)
warn(
)
tolerance = 1e-12
ret_lattice.xyz_with_ports[ret_lattice.xyz_with_ports <= tolerance] = 0.
return ret_lattice | Expand lattice and create compound from lattice.
populate will expand lattice based on user input. The user must also
pass in a dictionary that contains the keys that exist in the
basis_dict. The corresponding Compound will be the full lattice
returned to the user.
If no dictionary is passed to the user, Dummy Compounds will be used.
Parameters
----------
x : int, optional, default=1
How many iterations in the x direction.
y : int, optional, default=1
How many iterations in the y direction.
z : int, optional, default=1
How many iterations in the z direction.
compound_dict : dictionary, optional, default=None
Link between basis_dict and Compounds.
Exceptions Raised
-----------------
ValueError : incorrect x,y, or z values.
TypeError : incorrect type for basis vector
Call Restrictions
-----------------
Called after constructor by user. |
385,627 | def projR(gamma, p):
return np.multiply(gamma.T, p / np.maximum(np.sum(gamma, axis=1), 1e-10)).T | return the KL projection on the row constrints |
385,628 | def _normalize_histogram2d(self, counts, type):
counts = (255 * (counts - np.nanmin(counts)) /
(np.nanmax(counts) - np.nanmin(counts)))
if type == :
counts = 255 - counts
return counts.astype(np.uint8) | Normalize the values of the counts for a 2D histogram.
This normalizes the values of a numpy array to the range 0-255.
:param counts: a NumPy array which is to be rescaled.
:param type: either 'bw' or 'reverse_bw'. |
385,629 | def _buildElementTree(self,):
t_elt = ctree.Element(self.name)
for k,v in [ (key,value) for key,value in self.__dict__.items() if key != ]:
if v and v != :
t_elt.set(k if k != else , str(v).lower())
self._etree = t_elt
return t_elt | Turn object into an ElementTree |
385,630 | def _tarboton_slopes_directions(data, dX, dY, facets, ang_adj):
shp = np.array(data.shape) - 1
direction = np.full(data.shape, FLAT_ID_INT, )
mag = np.full(data.shape, FLAT_ID_INT, )
slc0 = [slice(1, -1), slice(1, -1)]
for ind in xrange(8):
e1 = facets[ind][1]
e2 = facets[ind][2]
ang = ang_adj[ind]
slc1 = [slice(1 + e1[0], shp[0] + e1[0]),
slice(1 + e1[1], shp[1] + e1[1])]
slc2 = [slice(1 + e2[0], shp[0] + e2[0]),
slice(1 + e2[1], shp[1] + e2[1])]
d1, d2, theta = _get_d1_d2(dX, dY, ind, e1, e2, shp)
mag, direction = _calc_direction(data, mag, direction, ang, d1, d2,
theta, slc0, slc1, slc2)
ids1 = (direction[:, 1] > np.pi / 2) \
& (direction[:, 1] < 3 * np.pi / 2)
direction[ids1, 0] = direction[ids1, 1]
mag[ids1, 0] = mag[ids1, 1]
ids1 = (direction[:, -2] < np.pi / 2) \
| (direction[:, -2] > 3 * np.pi / 2)
direction[ids1, -1] = direction[ids1, -2]
mag[ids1, -1] = mag[ids1, -2]
ids1 = (direction[1, :] > 0) & (direction[1, :] < np.pi)
direction[0, ids1] = direction[1, ids1]
mag[0, ids1] = mag[1, ids1]
ids1 = (direction[-2, :] > np.pi) & (direction[-2, :] < 2 * np.pi)
direction[-1, ids1] = direction[-2, ids1]
mag[-1, ids1] = mag[-2, ids1]
slc0 = [slice(1, -1), slice(0, 1)]
for ind in [0, 1, 6, 7]:
e1 = facets[ind][1]
e2 = facets[ind][2]
ang = ang_adj[ind]
slc1 = [slice(1 + e1[0], shp[0] + e1[0]), slice(e1[1], 1 + e1[1])]
slc2 = [slice(1 + e2[0], shp[0] + e2[0]), slice(e2[1], 1 + e2[1])]
d1, d2, theta = _get_d1_d2(dX, dY, ind, e1, e2, shp)
mag, direction = _calc_direction(data, mag, direction, ang, d1, d2,
theta, slc0, slc1, slc2)
slc0 = [slice(1, -1), slice(-1, None)]
for ind in [2, 3, 4, 5]:
e1 = facets[ind][1]
e2 = facets[ind][2]
ang = ang_adj[ind]
slc1 = [slice(1 + e1[0], shp[0] + e1[0]),
slice(shp[1] + e1[1], shp[1] + 1 + e1[1])]
slc2 = [slice(1 + e2[0], shp[0] + e2[0]),
slice(shp[1] + e2[1], shp[1] + 1 + e2[1])]
d1, d2, theta = _get_d1_d2(dX, dY, ind, e1, e2, shp)
mag, direction = _calc_direction(data, mag, direction, ang, d1, d2,
theta, slc0, slc1, slc2)
slc0 = [slice(0, 1), slice(1, -1)]
for ind in [4, 5, 6, 7]:
e1 = facets[ind][1]
e2 = facets[ind][2]
ang = ang_adj[ind]
slc1 = [slice(e1[0], 1 + e1[0]), slice(1 + e1[1], shp[1] + e1[1])]
slc2 = [slice(e2[0], 1 + e2[0]), slice(1 + e2[1], shp[1] + e2[1])]
d1, d2, theta = _get_d1_d2(dX, dY, ind, e1, e2, shp, )
mag, direction = _calc_direction(data, mag, direction, ang, d1, d2,
theta, slc0, slc1, slc2)
slc0 = [slice(-1, None), slice(1, -1)]
for ind in [0, 1, 2, 3]:
e1 = facets[ind][1]
e2 = facets[ind][2]
ang = ang_adj[ind]
slc1 = [slice(shp[0] + e1[0], shp[0] + 1 + e1[0]),
slice(1 + e1[1], shp[1] + e1[1])]
slc2 = [slice(shp[0] + e2[0], shp[0] + 1 + e2[0]),
slice(1 + e2[1], shp[1] + e2[1])]
d1, d2, theta = _get_d1_d2(dX, dY, ind, e1, e2, shp, )
mag, direction = _calc_direction(data, mag, direction, ang, d1, d2,
theta, slc0, slc1, slc2)
slc0 = [slice(0, 1), slice(0, 1)]
for ind in [6, 7]:
e1 = facets[ind][1]
e2 = facets[ind][2]
ang = ang_adj[ind]
slc1 = [slice(e1[0], 1 + e1[0]), slice(e1[1], 1 + e1[1])]
slc2 = [slice(e2[0], 1 + e2[0]), slice(e2[1], 1 + e2[1])]
d1, d2, theta = _get_d1_d2(dX, dY, ind, e1, e2, shp, )
mag, direction = _calc_direction(data, mag, direction, ang, d1, d2,
theta, slc0, slc1, slc2)
slc0 = [slice(0, 1), slice(-1, None)]
for ind in [4, 5]:
e1 = facets[ind][1]
e2 = facets[ind][2]
ang = ang_adj[ind]
slc1 = [slice(e1[0], 1 + e1[0]),
slice(shp[1] + e1[1], shp[1] + 1 + e1[1])]
slc2 = [slice(e2[0], 1 + e2[0]),
slice(shp[1] + e2[1], shp[1] + 1 + e2[1])]
d1, d2, theta = _get_d1_d2(dX, dY, ind, e1, e2, shp, )
mag, direction = _calc_direction(data, mag, direction, ang, d1, d2,
theta, slc0, slc1, slc2)
slc0 = [slice(-1, None), slice(0, 1)]
for ind in [0, 1]:
e1 = facets[ind][1]
e2 = facets[ind][2]
ang = ang_adj[ind]
slc1 = [slice(shp[0] + e1[0], shp[0] + 1 + e1[0]),
slice(e1[1], 1 + e1[1])]
slc2 = [slice(shp[0] + e2[0], shp[0] + 1 + e2[0]),
slice(e2[1], 1 + e2[1])]
d1, d2, theta = _get_d1_d2(dX, dY, ind, e1, e2, shp, )
mag, direction = _calc_direction(data, mag, direction, ang, d1, d2,
theta, slc0, slc1, slc2)
slc0 = [slice(-1, None), slice(-1, None)]
for ind in [3, 4]:
e1 = facets[ind][1]
e2 = facets[ind][2]
ang = ang_adj[ind]
slc1 = [slice(shp[0] + e1[0], shp[0] + 1 + e1[0]),
slice(shp[1] + e1[1], shp[1] + 1 + e1[1])]
slc2 = [slice(shp[0] + e2[0], shp[0] + 1 + e2[0]),
slice(shp[1] + e2[1], shp[1] + 1 + e2[1])]
d1, d2, theta = _get_d1_d2(dX, dY, ind, e1, e2, shp, )
mag, direction = _calc_direction(data, mag, direction, ang, d1, d2,
theta, slc0, slc1, slc2)
mag[mag > 0] = np.sqrt(mag[mag > 0])
return mag, direction | Calculate the slopes and directions based on the 8 sections from
Tarboton http://www.neng.usu.edu/cee/faculty/dtarb/96wr03137.pdf |
385,631 | def launch_tor(config, reactor,
tor_binary=None,
progress_updates=None,
connection_creator=None,
timeout=None,
kill_on_stderr=True,
stdout=None, stderr=None):
from .controller import launch
tor = yield launch(
reactor,
stdout=stdout,
stderr=stderr,
progress_updates=progress_updates,
tor_binary=tor_binary,
connection_creator=connection_creator,
timeout=timeout,
kill_on_stderr=kill_on_stderr,
_tor_config=config,
)
defer.returnValue(tor.process) | Deprecated; use launch() instead.
See also controller.py |
385,632 | def pupv_to_vRvz(pu,pv,u,v,delta=1.,oblate=False):
if oblate:
denom= delta*(sc.sinh(u)**2.+sc.cos(v)**2.)
vR= (pu*sc.sinh(u)*sc.sin(v)+pv*sc.cosh(u)*sc.cos(v))/denom
vz= (pu*sc.cosh(u)*sc.cos(v)-pv*sc.sinh(u)*sc.sin(v))/denom
else:
denom= delta*(sc.sinh(u)**2.+sc.sin(v)**2.)
vR= (pu*sc.cosh(u)*sc.sin(v)+pv*sc.sinh(u)*sc.cos(v))/denom
vz= (pu*sc.sinh(u)*sc.cos(v)-pv*sc.cosh(u)*sc.sin(v))/denom
return (vR,vz) | NAME:
pupv_to_vRvz
PURPOSE:
calculate cylindrical vR and vz from momenta in prolate or oblate confocal u and v coordinates for a given focal length delta
INPUT:
pu - u momentum
pv - v momentum
u - u coordinate
v - v coordinate
delta= focus
oblate= (False) if True, compute oblate confocal coordinates instead of prolate
OUTPUT:
(vR,vz)
HISTORY:
2017-12-04 - Written - Bovy (UofT) |
385,633 | def upload_to_s3(self, key, filename):
extra_args = {: self.acl}
guess = mimetypes.guess_type(filename)
content_type = guess[0]
encoding = guess[1]
if content_type:
extra_args[] = content_type
if (self.gzip and content_type in self.gzip_content_types) or encoding == :
extra_args[] =
if content_type in self.cache_control:
extra_args[] = .join((
,
str(self.cache_control[content_type])
))
if not self.dry_run:
logger.debug("Uploading %s" % filename)
if self.verbosity > 0:
self.stdout.write("Uploading %s" % filename)
s3_obj = self.s3_resource.Object(self.aws_bucket_name, key)
s3_obj.upload_file(filename, ExtraArgs=extra_args)
self.uploaded_files += 1
self.uploaded_file_list.append(filename) | Set the content type and gzip headers if applicable
and upload the item to S3 |
385,634 | def get_cutoff(value: float, cutoff: Optional[float] = None) -> int:
cutoff = cutoff if cutoff is not None else 0
if value > cutoff:
return 1
if value < (-1 * cutoff):
return - 1
return 0 | Assign if a value is greater than or less than a cutoff. |
385,635 | def _get_comments(session, group_or_user_id, wall_id):
return session.fetch_items("wall.getComments", Comment.from_json, count=100, owner_id=group_or_user_id, post_id=wall_id, need_likes=1) | https://vk.com/dev/wall.getComments |
385,636 | def leaves(self):
self._ensure_parameters()
return self.exclude(
**{"%s__id__in" % self.model._cte_node_children: self.all()}
) | Returns a :class:`QuerySet` of all leaf nodes (nodes with no
children).
:return: A :class:`QuerySet` of all leaf nodes (nodes with no
children). |
385,637 | def union(self, *iterables):
return self.__class__(chain(iter(self), *iterables), key=self._key) | Return a new SortedSet with elements from the set and all *iterables*. |
385,638 | def _register(self, name):
templatehook = TemplateHook()
self._registry[name] = templatehook
return templatehook | @Api private
Add new :py:class:`TemplateHook` into the registry
:param str name: Hook name
:return: Instance of :py:class:`TemplateHook`
:rtype: :py:class:`TemplateHook` |
385,639 | def make_shell_logfile_data_url(host, shell_port, instance_id, offset, length):
return "http://%s:%d/filedata/log-files/%s.log.0?offset=%s&length=%s" % \
(host, shell_port, instance_id, offset, length) | Make the url for log-file data in heron-shell
from the info stored in stmgr. |
385,640 | def jinja_fragment_extension(tag, endtag=None, name=None, tag_only=False, allow_args=True, callblock_args=None):
if endtag is None:
endtag = "end" + tag
def decorator(f):
def parse(self, parser):
lineno = parser.stream.next().lineno
args = []
kwargs = []
if allow_args:
args, kwargs = parse_block_signature(parser)
call = self.call_method("support_method", args, kwargs, lineno=lineno)
if tag_only:
return nodes.Output([call], lineno=lineno)
call_args = []
if callblock_args is not None:
for arg in callblock_args:
call_args.append(nodes.Name(arg, , lineno=lineno))
body = parser.parse_statements([ + endtag], drop_needle=True)
return nodes.CallBlock(call, call_args, [], body, lineno=lineno)
def support_method(self, *args, **kwargs):
return f(*args, **kwargs)
attrs = {"tags": set([tag]), "parse": parse, "support_method": support_method}
return type(name or f.__name__, (Extension,), attrs)
return decorator | Decorator to easily create a jinja extension which acts as a fragment. |
385,641 | def align_epi_anat(anatomy,epi_dsets,skull_strip_anat=True):
if isinstance(epi_dsets,basestring):
epi_dsets = [epi_dsets]
if len(epi_dsets)==0:
nl.notify( % anatomy,level=nl.level.warning)
return
if all(os.path.exists(nl.suffix(x,)) for x in epi_dsets):
return
anatomy_use = anatomy
if skull_strip_anat:
nl.skull_strip(anatomy,)
anatomy_use = nl.suffix(anatomy,)
inputs = [anatomy_use] + epi_dsets
dset_products = lambda dset: [nl.suffix(dset,), nl.prefix(dset)+, nl.prefix(dset)+]
products = nl.flatten([dset_products(dset) for dset in epi_dsets])
with nl.run_in_tmp(inputs,products):
if nl.is_nifti(anatomy_use):
anatomy_use = nl.afni_copy(anatomy_use)
epi_dsets_use = []
for dset in epi_dsets:
if nl.is_nifti(dset):
epi_dsets_use.append(nl.afni_copy(dset))
else:
epi_dsets_use.append(dset)
cmd = ["align_epi_anat.py", "-epi2anat", "-anat_has_skull", "no", "-epi_strip", "3dAutomask","-anat", anatomy_use, "-epi_base", "5", "-epi", epi_dsets_use[0]]
if len(epi_dsets_use)>1:
cmd += [] + epi_dsets_use[1:]
out = nl.run(cmd)
for dset in epi_dsets:
if nl.is_nifti(dset):
dset_nifti = nl.nifti_copy(nl.prefix(dset)+)
if dset_nifti and os.path.exists(dset_nifti) and dset_nifti.endswith() and dset.endswith():
nl.run([,dset_nifti]) | aligns epis to anatomy using ``align_epi_anat.py`` script
:epi_dsets: can be either a string or list of strings of the epi child datasets
:skull_strip_anat: if ``True``, ``anatomy`` will be skull-stripped using the default method
The default output suffix is "_al" |
385,642 | def colors(palette):
all_colors = {
: [, , , ],
: [, , ]
}
if palette == :
result = all_colors
else:
result = {palette: all_colors.get(palette)}
return jsonify(result) | Example endpoint return a list of colors by palette
This is using docstring for specifications
---
tags:
- colors
parameters:
- name: palette
in: path
type: string
enum: ['all', 'rgb', 'cmyk']
required: true
default: all
description: Which palette to filter?
operationId: get_colors
consumes:
- application/json
produces:
- application/json
security:
colors_auth:
- 'write:colors'
- 'read:colors'
schemes: ['http', 'https']
deprecated: false
externalDocs:
description: Project repository
url: http://github.com/rochacbruno/flasgger
definitions:
Palette:
type: object
properties:
palette_name:
type: array
items:
$ref: '#/definitions/Color'
Color:
type: string
responses:
200:
description: A list of colors (may be filtered by palette)
schema:
$ref: '#/definitions/Palette'
examples:
rgb: ['red', 'green', 'blue'] |
385,643 | def mkCuttingStock(s):
w,q = [],[]
for item in sorted(s):
if w == [] or item != w[-1]:
w.append(item)
q.append(1)
else:
q[-1] += 1
return w,q | mkCuttingStock: convert a bin packing instance into cutting stock format |
385,644 | def add_listener(self, on_message=None):
request = topic_add_message_listener_codec.encode_request(self.name, False)
def handle(item, publish_time, uuid):
member = self._client.cluster.get_member_by_uuid(uuid)
item_event = TopicMessage(self.name, item, publish_time, member, self._to_object)
on_message(item_event)
return self._start_listening(request,
lambda m: topic_add_message_listener_codec.handle(m, handle),
lambda r: topic_add_message_listener_codec.decode_response(r)[],
self.partition_key) | Subscribes to this topic. When someone publishes a message on this topic, on_message() function is called if
provided.
:param on_message: (Function), function to be called when a message is published.
:return: (str), a registration id which is used as a key to remove the listener. |
385,645 | def build_query(self, case_id, query=None, variant_ids=None, category=):
query = query or {}
mongo_query = {}
gene_query = None
for criterion in FUNDAMENTAL_CRITERIA:
if criterion == :
LOG.debug("Building a mongo query for %s" % case_id)
mongo_query[] = case_id
elif criterion == and variant_ids:
LOG.debug("Adding variant_ids %s to query" % .join(variant_ids))
mongo_query[] = {: variant_ids}
elif criterion == :
LOG.debug("Querying category %s" % category)
mongo_query[] = category
elif criterion == :
mongo_query[] = query.get(, )
LOG.debug("Set variant type to %s", mongo_query[])
elif criterion in [, ] and gene_query is None:
gene_query = self.gene_filter(query, mongo_query)
elif criterion == and query.get():
self.coordinate_filter(query, mongo_query)
elif criterion == and variant_ids:
LOG.debug("Adding variant_ids %s to query" % .join(variant_ids))
mongo_query[] = {: variant_ids}
primary_terms = False
secondary_terms = False
for term in PRIMARY_CRITERIA:
if query.get(term):
primary_terms = True
for term in SECONDARY_CRITERIA:
if query.get(term):
secondary_terms = True
if primary_terms is True:
clinsign_filter = self.clinsig_query(query, mongo_query)
if secondary_terms is True:
secondary_filter = self.secondary_query(query, mongo_query)
if primary_terms is False:
if gene_query:
mongo_query[] = [ {: gene_query}, {: secondary_filter}]
else:
mongo_query[] = secondary_filter
if primary_terms is True:
if query.get() == True:
if gene_query:
mongo_query[] = [
{: gene_query},
{
: [
{: secondary_filter}, clinsign_filter
]
}
]
else:
mongo_query[] = [ {: secondary_filter}, clinsign_filter ]
else:
secondary_filter.append(clinsign_filter)
if gene_query:
mongo_query[] = [ {: gene_query}, {: secondary_filter}]
else:
mongo_query[] = secondary_filter
elif primary_terms is True:
mongo_query[] = clinsign_filter[]
if gene_query:
mongo_query[] = [{ : gene_query }]
elif gene_query:
mongo_query[] = [{ : gene_query }]
LOG.info("mongo query: %s", mongo_query)
return mongo_query | Build a mongo query
These are the different query options:
{
'genetic_models': list,
'chrom': str,
'thousand_genomes_frequency': float,
'exac_frequency': float,
'clingen_ngi': int,
'cadd_score': float,
'cadd_inclusive": boolean,
'genetic_models': list(str),
'hgnc_symbols': list,
'region_annotations': list,
'functional_annotations': list,
'clinsig': list,
'clinsig_confident_always_returned': boolean,
'variant_type': str(('research', 'clinical')),
'chrom': str,
'start': int,
'end': int,
'svtype': list,
'size': int,
'size_shorter': boolean,
'gene_panels': list(str),
'mvl_tag": boolean,
'decipher": boolean,
}
Arguments:
case_id(str)
query(dict): a dictionary of query filters specified by the users
variant_ids(list(str)): A list of md5 variant ids
Returns:
mongo_query : A dictionary in the mongo query format |
385,646 | def warn_quirks(message, recommend, pattern, index):
import traceback
import bs4
paths = (MODULE, sys.modules[].__path__[0])
tb = traceback.extract_stack()
previous = None
filename = None
lineno = None
for entry in tb:
if (PY35 and entry.filename.startswith(paths)) or (not PY35 and entry[0].startswith(paths)):
break
previous = entry
if previous:
filename = previous.filename if PY35 else previous[0]
lineno = previous.lineno if PY35 else previous[1]
context, line = get_pattern_context(pattern, index)[0:2]
warnings.warn_explicit(
"\nCSS selector pattern:\n" +
" {}\n".format(message) +
" This behavior is only allowed temporarily for Beautiful Soup's transition to Soup Sieve.\n" +
" In order to confrom to the CSS spec, {}\n".format(recommend) +
" It is strongly recommended the selector be altered to conform to the CSS spec " +
"as an exception will be raised for this case in the future.\n" +
"pattern line {}:\n{}".format(line, context),
QuirksWarning,
filename,
lineno
) | Warn quirks. |
385,647 | def vsan_datastore_configured(name, datastore_name):
s VSAN datastore
WARNING: The VSAN datastore is created automatically after the first
ESXi host is added to the cluster; the state assumes that the datastore
exists and errors if it doesn
cluster_name, datacenter_name = \
__salt__[]()[], \
__salt__[]()[]
display_name = .format(datacenter_name, cluster_name)
log.info(%s\, display_name)
ret = {: name,
: {}, : None,
: }
comments = []
changes = {}
changes_required = False
try:
si = __salt__[]()
vsan_ds = _get_vsan_datastore(si, cluster_name)
if vsan_ds[] == datastore_name:
comments.append({0}\
.format(vsan_ds[]))
log.info(comments[-1])
else:
changes_required = True
if __opts__[]:
comments.append(
{1}\.format(name, datastore_name))
log.info(comments[-1])
else:
log.trace(%s\%s\,
vsan_ds[], datastore_name)
__salt__[](
datastore_name=vsan_ds[],
new_datastore_name=datastore_name,
service_instance=si)
comments.append({0}\
.format(datastore_name))
changes = {: {: {: datastore_name},
: {: vsan_ds[]}}}
log.info(comments[-1])
__salt__[](si)
ret.update({: True if (not changes_required) else None if
__opts__[] else True,
: .join(comments),
: changes})
return ret
except salt.exceptions.CommandExecutionError as exc:
log.exception()
if si:
__salt__[](si)
ret.update({
: False,
: exc.strerror})
return ret | Configures the cluster's VSAN datastore
WARNING: The VSAN datastore is created automatically after the first
ESXi host is added to the cluster; the state assumes that the datastore
exists and errors if it doesn't. |
385,648 | def TBH(cpu, dest):
base_addr = dest.get_mem_base_addr()
if dest.mem.base in (, ):
base_addr = cpu.PC
offset = cpu.read_int(base_addr + dest.get_mem_offset(), 16)
offset = Operators.ZEXTEND(offset, cpu.address_bit_size)
cpu.PC += (offset << 1) | Table Branch Halfword causes a PC-relative forward branch using a table of single halfword offsets. A base
register provides a pointer to the table, and a second register supplies an index into the table. The branch
length is twice the value of the halfword returned from the table.
:param ARMv7Operand dest: see below; register |
385,649 | def hashleftjoin(left, right, key=None, lkey=None, rkey=None, missing=None,
cache=True, lprefix=None, rprefix=None):
lkey, rkey = keys_from_args(left, right, key, lkey, rkey)
return HashLeftJoinView(left, right, lkey, rkey, missing=missing,
cache=cache, lprefix=lprefix, rprefix=rprefix) | Alternative implementation of :func:`petl.transform.joins.leftjoin`,
where the join is executed by constructing an in-memory lookup for the
right hand table, then iterating over rows from the left hand table.
May be faster and/or more resource efficient where the right table is small
and the left table is large.
By default data from right hand table is cached to improve performance
(only available when `key` is given).
Left and right tables with different key fields can be handled via the
`lkey` and `rkey` arguments. |
385,650 | def write_kwargs_to_attrs(cls, attrs, **kwargs):
for arg, val in kwargs.items():
if val is None:
val = str(None)
if isinstance(val, dict):
attrs[arg] = val.keys()
cls.write_kwargs_to_attrs(attrs, **val)
else:
attrs[arg] = val | Writes the given keywords to the given ``attrs``.
If any keyword argument points to a dict, the keyword will point to a
list of the dict's keys. Each key is then written to the attrs with its
corresponding value.
Parameters
----------
attrs : an HDF attrs
The ``attrs`` of an hdf file or a group in an hdf file.
\**kwargs :
The keywords to write. |
385,651 | def search(self, title=None, libtype=None, **kwargs):
args = {}
if title:
args[] = title
if libtype:
args[] = utils.searchType(libtype)
for attr, value in kwargs.items():
args[attr] = value
key = % utils.joinArgs(args)
return self.fetchItems(key) | Searching within a library section is much more powerful. It seems certain
attributes on the media objects can be targeted to filter this search down
a bit, but I havent found the documentation for it.
Example: "studio=Comedy%20Central" or "year=1999" "title=Kung Fu" all work. Other items
such as actor=<id> seem to work, but require you already know the id of the actor.
TLDR: This is untested but seems to work. Use library section search when you can. |
385,652 | def create_linear(num_finite_buckets, width, offset):
if num_finite_buckets <= 0:
raise ValueError(_BAD_NUM_FINITE_BUCKETS)
if width <= 0.0:
raise ValueError(_BAD_FLOAT_ARG % (u, 0.0))
return sc_messages.Distribution(
bucketCounts=[0] * (num_finite_buckets + 2),
linearBuckets=sc_messages.LinearBuckets(
numFiniteBuckets=num_finite_buckets,
width=width,
offset=offset)) | Creates a new instance of distribution with linear buckets.
Args:
num_finite_buckets (int): initializes number of finite buckets
width (float): initializes the width of each bucket
offset (float): initializes the offset
Return:
:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`
Raises:
ValueError: if the args are invalid for creating an instance |
385,653 | def uhstack(arrs):
v = np.hstack(arrs)
v = _validate_numpy_wrapper_units(v, arrs)
return v | Stack arrays in sequence horizontally while preserving units
This is a wrapper around np.hstack that preserves units.
Examples
--------
>>> from unyt import km
>>> a = [1, 2, 3]*km
>>> b = [2, 3, 4]*km
>>> print(uhstack([a, b]))
[1 2 3 2 3 4] km
>>> a = [[1],[2],[3]]*km
>>> b = [[2],[3],[4]]*km
>>> print(uhstack([a, b]))
[[1 2]
[2 3]
[3 4]] km |
385,654 | def observable(operator, rho, unfolding, complex=False):
r
if len(rho.shape) == 2:
return np.array([observable(operator, i, unfolding) for i in rho])
Ne = unfolding.Ne
Mu = unfolding.Mu
obs = 0
if unfolding.normalized:
rho11 = 1 - sum([rho[Mu(1, i, i)] for i in range(1, Ne)])
for i in range(Ne):
for k in range(Ne):
if unfolding.real:
if k == 0 and i == 0:
obs += operator[i, k]*rho11
else:
if k < i:
u, v = (i, k)
else:
u, v = (k, i)
obs += operator[i, k]*rho[Mu(1, u, v)]
if k != i:
if k < i:
obs += 1j*operator[i, k]*rho[Mu(-1, u, v)]
else:
obs += -1j*operator[i, k]*rho[Mu(-1, u, v)]
else:
if k == 0 and i == 0:
obs += operator[i, k]*rho11
else:
obs += operator[i, k]*rho[Mu(0, k, i)]
if not complex:
obs = np.real(obs)
return obs | r"""Return an observable ammount.
INPUT:
- ``operator`` - An square matrix representing a hermitian operator \
in thesame basis as the density matrix.
- ``rho`` - A density matrix in unfolded format, or a list of such \
density matrices.
- ``unfolding`` - A mapping from matrix element indices to unfolded \
indices.
>>> Ne = 2
>>> unfolding = Unfolding(Ne, True, True, True)
>>> rho = np.array([[0.6, 1+2j], [1-2j, 0.4]])
>>> rho = unfolding(rho)
>>> sx = np.array([[0, 1], [1, 0]])
>>> print(observable(sx, rho, unfolding))
2.0 |
385,655 | def intersection(self, other):
ivs = set()
shorter, longer = sorted([self, other], key=len)
for iv in shorter:
if iv in longer:
ivs.add(iv)
return IntervalTree(ivs) | Returns a new tree of all intervals common to both self and
other. |
385,656 | def _consolidate_repo_sources(sources):
if not isinstance(sources, sourceslist.SourcesList):
raise TypeError(
{0}\{1}\.format(
type(sources),
sourceslist.SourcesList
)
)
consolidated = {}
delete_files = set()
base_file = sourceslist.SourceEntry().file
repos = [s for s in sources.list if not s.invalid]
for repo in repos:
repo.uri = repo.uri.rstrip()
key = str((getattr(repo, , []),
repo.disabled, repo.type, repo.uri, repo.dist))
if key in consolidated:
combined = consolidated[key]
combined_comps = set(repo.comps).union(set(combined.comps))
consolidated[key].comps = list(combined_comps)
else:
consolidated[key] = sourceslist.SourceEntry(salt.utils.pkg.deb.strip_uri(repo.line))
if repo.file != base_file:
delete_files.add(repo.file)
sources.list = list(consolidated.values())
sources.save()
for file_ in delete_files:
try:
os.remove(file_)
except OSError:
pass
return sources | Consolidate APT sources. |
385,657 | def init(self):
"Initialize the message-digest and set all fields to zero."
self.length = 0L
self.input = []
self.A = 0x67452301L
self.B = 0xefcdab89L
self.C = 0x98badcfeL
self.D = 0x10325476L | Initialize the message-digest and set all fields to zero. |
385,658 | def add_states(self, *states):
for state in states:
self.states[state] = EventManagerPlus(self) | Add @states. |
385,659 | def load_data(filespec, idx=None, logger=None, **kwargs):
global loader_registry
info = iohelper.get_fileinfo(filespec)
filepath = info.filepath
if idx is None:
idx = info.numhdu
try:
typ, subtyp = iohelper.guess_filetype(filepath)
except Exception as e:
if logger is not None:
logger.warning("error determining file type: %s; "
"assuming " % (str(e)))
try:
loader_info = loader_registry[ % (typ, subtyp)]
data_loader = loader_info.loader
except KeyError:
data_loader = load_fits
data_obj = data_loader(filepath, idx=idx, logger=logger,
**kwargs)
return data_obj | Load data from a file.
This call is used to load a data item from a filespec (path or URL)
Parameters
----------
filespec : str
The path of the file to load (can be a URL).
idx : int or string (optional)
The index or name of the data unit in the file (e.g. an HDU name)
logger : python logger (optional)
A logger to record progress opening the item
All other keyword parameters are passed to the opener chosen for
the file type.
Returns
-------
data_obj : a data object for a ginga viewer |
385,660 | def run_work(self):
if os.path.exists(LOCAL_EVAL_ROOT_DIR):
sudo_remove_dirtree(LOCAL_EVAL_ROOT_DIR)
self.run_attacks()
self.run_defenses() | Run attacks and defenses |
385,661 | def move_items(self, from_group, to_group):
if from_group not in self.keys() or len(self.groups[from_group]) == 0:
return
self.groups.setdefault(to_group, list()).extend(self.groups.get
(from_group, list()))
if from_group in self.groups:
del self.groups[from_group] | Take all elements from the from_group and add it to the to_group. |
385,662 | def _attr_sort_func(model, iter1, iter2, attribute):
attr1 = getattr(model[iter1][0], attribute, None)
attr2 = getattr(model[iter2][0], attribute, None)
return cmp(attr1, attr2) | Internal helper |
385,663 | def _find_usage_security_groups(self):
vpc_count = 0
paginator = self.conn.get_paginator()
for page in paginator.paginate():
for group in page[]:
if in group and group[] is not None:
vpc_count += 1
self.limits[]._add_current_usage(
len(group["EC2SecurityGroups"]) + len(group["IPRanges"]),
aws_type=,
resource_id=group[]
)
self.limits[]._add_current_usage(
vpc_count,
aws_type=,
) | find usage for security groups |
385,664 | def diagnose_cluster(
self,
project_id,
region,
cluster_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
if "diagnose_cluster" not in self._inner_api_calls:
self._inner_api_calls[
"diagnose_cluster"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.diagnose_cluster,
default_retry=self._method_configs["DiagnoseCluster"].retry,
default_timeout=self._method_configs["DiagnoseCluster"].timeout,
client_info=self._client_info,
)
request = clusters_pb2.DiagnoseClusterRequest(
project_id=project_id, region=region, cluster_name=cluster_name
)
operation = self._inner_api_calls["diagnose_cluster"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=clusters_pb2.DiagnoseClusterResults,
) | Gets cluster diagnostic information. After the operation completes, the
Operation.response field contains ``DiagnoseClusterOutputLocation``.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `region`:
>>> region = ''
>>>
>>> # TODO: Initialize `cluster_name`:
>>> cluster_name = ''
>>>
>>> response = client.diagnose_cluster(project_id, region, cluster_name)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
cluster_name (str): Required. The cluster name.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. |
385,665 | def fetch_access_token(self):
return self._fetch_access_token(
url=,
params={
: ,
: self.appid,
: self.secret
}
) | 获取 access token
详情请参考 http://mp.weixin.qq.com/wiki/index.php?title=通用接口文档
:return: 返回的 JSON 数据包 |
385,666 | def removi(item, inset):
assert isinstance(inset, stypes.SpiceCell)
assert inset.dtype == 2
item = ctypes.c_int(item)
libspice.removi_c(item, ctypes.byref(inset)) | Remove an item from an integer set.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/removi_c.html
:param item: Item to be removed.
:type item: int
:param inset: Set to be updated.
:type inset: spiceypy.utils.support_types.SpiceCell |
385,667 | def _get_representative(self, obj):
if obj not in self._parents:
self._parents[obj] = obj
self._weights[obj] = 1
self._prev_next[obj] = [obj, obj]
self._min_values[obj] = obj
return obj
path = [obj]
root = self._parents[obj]
while root != path[-1]:
path.append(root)
root = self._parents[root]
for ancestor in path:
self._parents[ancestor] = root
return root | Finds and returns the root of the set containing `obj`. |
385,668 | def set_finished(self):
component_name = self.get_component_name()
self.log(
logging.INFO,
"Component [%s] is being marked as finished.",
component_name)
existing_state = self.__get_state(component_name)
assert existing_state == fss.constants.PCS_RUNNING, \
"Can not change to state from unsupported " \
"state: (" + str(existing_state) + ")"
self.__set_data(, self.__push_count)
self.__set_state(fss.constants.PCS_FINISHED) | This stores the number of items that have been pushed, and
transitions the current component to the FINISHED state (which precedes
the STOPPED state). The FINISHED state isn't really necessary unless
methods/hooks are overridden to depend on it, but the count must be
stored at one point so that thenext components knows how many items to
expect. This is done by default after the loop breaks, but can be
manually called sooner, if desired. |
385,669 | def parse_keys_and_ranges(i_str, keyfunc, rangefunc):
while i_str:
m = _STREAM_ID_RE.match(i_str)
if m:
for retval in keyfunc(stream_id_to_kvlayer_key(m.group())):
yield retval
i_str = i_str[m.end():]
while i_str and ((i_str[0] == ) or (i_str[0] == )):
i_str = i_str[1:]
continue
if len(i_str) == SI_KEY_LENGTH:
key = parse_si_key(i_str)
for retval in keyfunc(key):
yield retval
return
keya = i_str[:SI_KEY_LENGTH]
splitc = i_str[SI_KEY_LENGTH]
if splitc == :
keyb = i_str[SI_KEY_LENGTH+1:SI_KEY_LENGTH+1+SI_KEY_LENGTH]
i_str = i_str[SI_KEY_LENGTH+1+SI_KEY_LENGTH:]
keya = parse_si_key(keya)
keyb = parse_si_key(keyb)
for retval in rangefunc(keya, keyb):
yield retval
elif splitc == :
keya = parse_si_key(keya)
for retval in keyfunc(keya):
yield retval
i_str = i_str[SI_KEY_LENGTH+1+1:]
else:
logger.error(, splitc, i_str)
return | Parse the :class:`from_kvlayer` input string.
This accepts two formats. In the textual format, it accepts any
number of stream IDs in timestamp-docid format, separated by ``,``
or ``;``, and processes those as individual stream IDs. In the
binary format, it accepts 20-byte key blobs (16 bytes md5 hash, 4
bytes timestamp) split by ``;`` or ``<``; e.g., ``a<f;x`` loads
scans keys `a` through `f` and loads singly key `x`.
`keyfunc` and `rangefunc` are run as generators and their yields
are yielded from this function. |
385,670 | def list_versions(self, layer_id):
target_url = self.client.get_url(, , , {: layer_id})
return base.Query(self, target_url, valid_filter_attributes=(,), valid_sort_attributes=()) | Filterable list of versions of a layer, always ordered newest to oldest.
If the version’s source supports revisions, you can get a specific revision using
``.filter(data__source__revision=value)``. Specific values depend on the source type.
Use ``data__source_revision__lt`` or ``data__source_revision__gte`` to filter
using ``<`` or ``>=`` operators respectively. |
385,671 | def sigma_clipping(date, mag, err, threshold=3, iteration=1):
if (len(date) != len(mag)) \
or (len(date) != len(err)) \
or (len(mag) != len(err)):
raise RuntimeError()
for i in range(int(iteration)):
mean = np.median(mag)
std = np.std(mag)
index = (mag >= mean - threshold*std) & (mag <= mean + threshold*std)
date = date[index]
mag = mag[index]
err = err[index]
return date, mag, err | Remove any fluctuated data points by magnitudes.
Parameters
----------
date : array_like
An array of dates.
mag : array_like
An array of magnitudes.
err : array_like
An array of magnitude errors.
threshold : float, optional
Threshold for sigma-clipping.
iteration : int, optional
The number of iteration.
Returns
-------
date : array_like
Sigma-clipped dates.
mag : array_like
Sigma-clipped magnitudes.
err : array_like
Sigma-clipped magnitude errors. |
385,672 | def delete(self, id):
try:
response = yield self.client.delete(id)
if response.get("n") > 0:
self.write({"message": "Deleted %s object: %s" % (self.object_name, id) })
return
self.raise_error(404, "Resource not found")
except InvalidId as ex:
self.raise_error(400, message="Your ID is malformed: %s" % id)
except:
self.raise_error()
self.finish() | Delete a resource by bson id
:raises: 404 Not Found
:raises: 400 Bad request
:raises: 500 Server Error |
385,673 | def get_sequence_rule_mdata():
return {
: {
: {
: ,
: str(DEFAULT_LANGUAGE_TYPE),
: str(DEFAULT_SCRIPT_TYPE),
: str(DEFAULT_FORMAT_TYPE),
},
: {
: ,
: str(DEFAULT_LANGUAGE_TYPE),
: str(DEFAULT_SCRIPT_TYPE),
: str(DEFAULT_FORMAT_TYPE),
},
: False,
: False,
: False,
: False,
: [],
: ,
: [],
},
: {
: {
: ,
: str(DEFAULT_LANGUAGE_TYPE),
: str(DEFAULT_SCRIPT_TYPE),
: str(DEFAULT_FORMAT_TYPE),
},
: {
: ,
: str(DEFAULT_LANGUAGE_TYPE),
: str(DEFAULT_SCRIPT_TYPE),
: str(DEFAULT_FORMAT_TYPE),
},
: False,
: False,
: False,
: False,
: [None],
: ,
},
: {
: {
: ,
: str(DEFAULT_LANGUAGE_TYPE),
: str(DEFAULT_SCRIPT_TYPE),
: str(DEFAULT_FORMAT_TYPE),
},
: {
: ,
: str(DEFAULT_LANGUAGE_TYPE),
: str(DEFAULT_SCRIPT_TYPE),
: str(DEFAULT_FORMAT_TYPE),
},
: False,
: False,
: False,
: False,
: [],
: ,
: [],
},
: {
: {
: ,
: str(DEFAULT_LANGUAGE_TYPE),
: str(DEFAULT_SCRIPT_TYPE),
: str(DEFAULT_FORMAT_TYPE),
},
: {
: ,
: str(DEFAULT_LANGUAGE_TYPE),
: str(DEFAULT_SCRIPT_TYPE),
: str(DEFAULT_FORMAT_TYPE),
},
: False,
: False,
: False,
: False,
: [None],
: ,
: None,
: None,
: []
},
: {
: {
: ,
: str(DEFAULT_LANGUAGE_TYPE),
: str(DEFAULT_SCRIPT_TYPE),
: str(DEFAULT_FORMAT_TYPE),
},
: {
: ,
: str(DEFAULT_LANGUAGE_TYPE),
: str(DEFAULT_SCRIPT_TYPE),
: str(DEFAULT_FORMAT_TYPE),
},
: False,
: False,
: False,
: False,
: [None],
: ,
: None,
: None,
: []
},
} | Return default mdata map for SequenceRule |
385,674 | def down_capture(returns, factor_returns, **kwargs):
return down(returns, factor_returns, function=capture, **kwargs) | Compute the capture ratio for periods when the benchmark return is negative
Parameters
----------
returns : pd.Series or np.ndarray
Returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns : pd.Series or np.ndarray
Noncumulative returns of the factor to which beta is
computed. Usually a benchmark such as the market.
- This is in the same style as returns.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Value ignored if `annualization` parameter is specified.
Defaults are::
'monthly':12
'weekly': 52
'daily': 252
Returns
-------
down_capture : float
Note
----
See http://www.investopedia.com/terms/d/down-market-capture-ratio.asp for
more information. |
385,675 | def _setup_subpix(self,nside=2**16):
if hasattr(self,): return
self.roi_radius = self.config[][]
logger.info("Setup subpixels...")
self.nside_pixel = self.config[][]
self.nside_subpixel = self.nside_pixel * 2**4
epsilon = np.degrees(hp.max_pixrad(self.nside_pixel))
subpix = ugali.utils.healpix.query_disc(self.nside_subpixel,self.roi.vec,self.roi_radius+epsilon)
superpix = ugali.utils.healpix.superpixel(subpix,self.nside_subpixel,self.nside_pixel)
self.subpix = subpix[np.in1d(superpix,self.roi.pixels)] | Subpixels for random position generation. |
385,676 | def insert_from_segmentlistdict(self, seglists, name, version = None, comment = None, valid=None):
for instrument, segments in seglists.items():
if valid is None:
curr_valid = ()
else:
curr_valid = valid[instrument]
self.add(LigolwSegmentList(active = segments, instruments = set([instrument]), name = name, version = version, comment = comment, valid = curr_valid)) | Insert the segments from the segmentlistdict object
seglists as a new list of "active" segments into this
LigolwSegments object. The dictionary's keys are assumed
to provide the instrument name for each segment list. A
new entry will be created in the segment_definer table for
the segment lists, and the dictionary's keys, the name, and
comment will be used to populate the entry's metadata. |
385,677 | def assign(self, attrs):
for k, v in attrs.items():
setattr(self, k, v) | Merge new attributes |
385,678 | def discretize_wd_style(N, q, F, d, Phi):
DEBUG = False
Ts = []
potential =
r0 = libphoebe.roche_pole(q, F, d, Phi)
pot_name = potential
dpdx = globals()[%(pot_name)]
dpdy = globals()[%(pot_name)]
dpdz = globals()[%(pot_name)]
if DEBUG:
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
fig = plt.figure()
ax1 = fig.add_subplot(131)
ax2 = fig.add_subplot(132)
ax3 = fig.add_subplot(133)
ax1.set_xlim(-0.3, 0.3)
ax1.set_ylim(-0.3, 0.3)
ax2.set_xlim(-0.3, 0.3)
ax2.set_ylim(-0.3, 0.3)
ax3.set_xlim(-0.3, 0.3)
ax3.set_ylim(-0.3, 0.3)
ax1.set_xlabel()
ax1.set_ylabel()
ax2.set_xlabel()
ax2.set_ylabel()
ax3.set_xlabel()
ax3.set_ylabel()
theta = np.array([np.pi/2*(k-0.5)/N for k in range(1, N+2)])
phi = np.array([[np.pi*(l-0.5)/Mk for l in range(1, Mk+1)] for Mk in np.array(1 + 1.3*N*np.sin(theta), dtype=int)])
for t in range(len(theta)-1):
dtheta = theta[t+1]-theta[t]
for i in range(len(phi[t])):
dphi = phi[t][1]-phi[t][0]
rc = np.array((r0*sin(theta[t])*cos(phi[t][i]), r0*sin(theta[t])*sin(phi[t][i]), r0*cos(theta[t])))
vc = project_onto_potential(rc, potential, d, q, F, Phi).r
cosgamma = np.dot(vc, nc)/np.sqrt(np.dot(vc, vc))/np.sqrt(np.dot(nc, nc))
dsigma = np.abs(np.dot(vc, vc)*np.sin(theta[t])/cosgamma*dtheta*dphi)
side1 = sqrt((r1[0]-r2[0])**2 + (r1[1]-r2[1])**2 + (r1[2]-r2[2])**2)
side2 = sqrt((r1[0]-r3[0])**2 + (r1[1]-r3[1])**2 + (r1[2]-r3[2])**2)
side3 = sqrt((r2[0]-r3[0])**2 + (r2[1]-r3[1])**2 + (r2[2]-r3[2])**2)
s = 0.5*(side1 + side2 + side3)
dsigma_t_sq = s*(s-side1)*(s-side2)*(s-side3)
dsigma_t = sqrt(dsigma_t_sq) if dsigma_t_sq > 0 else 0.0
if DEBUG:
fc =
verts = [(r1[0], r1[1]), (r2[0], r2[1]), (r3[0], r3[1]), (r4[0], r4[1]), (r1[0], r1[1])]
codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY]
path = Path(verts, codes)
patch = patches.PathPatch(path, facecolor=fc, lw=2)
ax1.add_patch(patch)
verts = [(r1[0], r1[2]), (r2[0], r2[2]), (r3[0], r3[2]), (r4[0], r4[2]), (r1[0], r1[2])]
codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY]
path = Path(verts, codes)
patch = patches.PathPatch(path, facecolor=fc, lw=2)
ax2.add_patch(patch)
verts = [(r1[1], r1[2]), (r2[1], r2[2]), (r3[1], r3[2]), (r4[1], r4[2]), (r1[1], r1[2])]
codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY]
path = Path(verts, codes)
patch = patches.PathPatch(path, facecolor=fc, lw=2)
ax3.add_patch(patch)
Ts.append(np.array((vc[0], vc[1], vc[2], dsigma/2, r1[0], r1[1], r1[2], r2[0], r2[1], r2[2], r3[0], r3[1], r3[2], nc[0], nc[1], nc[2], theta[t], phi[t][0], dsigma_t)))
Ts.append(np.array((vc[0], vc[1], vc[2], dsigma/2, r3[0], r3[1], r3[2], r4[0], r4[1], r4[2], r1[0], r1[1], r1[2], nc[0], nc[1], nc[2], theta[t], phi[t][0], dsigma_t)))
Ts.append(np.array((vc[0], -vc[1], vc[2], dsigma/2, r1[0], -r1[1], r1[2], r2[0], -r2[1], r2[2], r3[0], -r3[1], r3[2], nc[0], -nc[1], nc[2], theta[t], -phi[t][0], dsigma_t)))
Ts.append(np.array((vc[0], -vc[1], vc[2], dsigma/2, r3[0], -r3[1], r3[2], r4[0], -r4[1], r4[2], r1[0], -r1[1], r1[2], nc[0], -nc[1], nc[2], theta[t], -phi[t][0], dsigma_t)))
Ts.append(np.array((vc[0], vc[1], -vc[2], dsigma/2, r1[0], r1[1], -r1[2], r2[0], r2[1], -r2[2], r3[0], r3[1], -r3[2], nc[0], nc[1], -nc[2], np.pi-theta[t], phi[t][0], dsigma_t)))
Ts.append(np.array((vc[0], vc[1], -vc[2], dsigma/2, r3[0], r3[1], -r3[2], r4[0], r4[1], -r4[2], r1[0], r1[1], -r1[2], nc[0], nc[1], -nc[2], np.pi-theta[t], phi[t][0], dsigma_t)))
Ts.append(np.array((vc[0], -vc[1], -vc[2], dsigma/2, r1[0], -r1[1], -r1[2], r2[0], -r2[1], -r2[2], r3[0], -r3[1], -r3[2], nc[0], -nc[1], -nc[2], np.pi-theta[t], -phi[t][0], dsigma_t)))
Ts.append(np.array((vc[0], -vc[1], -vc[2], dsigma/2, r3[0], -r3[1], -r3[2], r4[0], -r4[1], -r4[2], r1[0], -r1[1], -r1[2], nc[0], -nc[1], -nc[2], np.pi-theta[t], -phi[t][0], dsigma_t)))
if DEBUG:
plt.show()
table = np.array(Ts)
return table | TODO: add documentation
New implementation. I'll make this work first, then document. |
385,679 | def on_service_departure(self, svc_ref):
with self._lock:
if svc_ref is self.reference:
self._value.unset_service()
self.reference = None
self._pending_ref = self._context.get_service_reference(
self.requirement.specification, self.requirement.filter
)
if self._pending_ref is None:
self.__still_valid = True
self.__timer_args = (self._value, svc_ref)
self.__timer = threading.Timer(
self.__timeout, self.__unbind_call, (False,)
)
self.__timer.start()
else:
self._ipopo_instance.unbind(self, self._value, svc_ref)
return True
return None | Called when a service has been unregistered from the framework
:param svc_ref: A service reference |
385,680 | def run_friedman_smooth(x, y, span):
N = len(x)
weight = numpy.ones(N)
results = numpy.zeros(N)
residuals = numpy.zeros(N)
mace.smooth(x, y, weight, span, 1, 1e-7, results, residuals)
return results, residuals | Run the FORTRAN smoother. |
385,681 | def forwards(self, orm):
"Write your forwards methods here."
db_table = orm[]._meta.db_table
db.execute(.format(db_table))
db.execute(.format(db_table))
cohorts = list(orm[].objects.all())
for c in cohorts:
db.execute(, [c.pk]) | Write your forwards methods here. |
385,682 | def ObsBandpass(obstring, graphtable=None, comptable=None, component_dict={}):
ob=ObservationMode(obstring,graphtable=graphtable,
comptable=comptable,component_dict=component_dict)
if len(ob) > 1:
return ObsModeBandpass(ob)
else:
return TabularSpectralElement(ob.components[0].throughput_name) | Generate a bandpass object from observation mode.
If the bandpass consists of multiple throughput files
(e.g., "acs,hrc,f555w"), then `ObsModeBandpass` is returned.
Otherwise, if it consists of a single throughput file
(e.g., "johnson,v"), then `~pysynphot.spectrum.TabularSpectralElement`
is returned.
See :ref:`pysynphot-obsmode-bandpass` and :ref:`pysynphot-appendixb`
for more details.
Parameters
----------
obstring : str
Observation mode.
graphtable, comptable, component_dict
See `~pysynphot.observationmode.ObservationMode`.
Returns
-------
bp : `~pysynphot.spectrum.TabularSpectralElement` or `ObsModeBandpass`
Examples
--------
>>> bp1 = S.ObsBandpass('acs,hrc,f555w')
>>> bp2 = S.ObsBandpass('johnson,v') |
385,683 | def getLayout(kind=None,theme=None,title=,xTitle=,yTitle=,zTitle=,barmode=,bargap=None,bargroupgap=None,
margin=None, dimensions=None, width=None, height=None,
annotations=None,is3d=False,**kwargs):
for key in list(kwargs.keys()):
if key not in __LAYOUT_KWARGS:
raise Exception("Invalid keyword : ".format(key))
if not theme:
theme = auth.get_config_file()[]
theme_data = getTheme(theme)
layout=theme_data[]
layout[].update({:xTitle})
layout[].update({:yTitle})
fontfamily=kwargs.pop(,None)
if fontfamily:
deep_update(layout,{:{:fontfamily}})
if barmode:
layout.update({:barmode})
if bargroupgap:
layout.update({:bargroupgap})
if bargap:
layout.update(bargap=bargap)
if title:
layout.update({:title})
if annotations:
layout.update({:annotations})
def update_axis(layout,axis=,**vals):
for _x in axis:
for k,v in list(vals.items()):
if v==None:
vals.pop(k)
for k in layout:
if .format(_x,) in k:
layout[k].update(**vals)
return layout
axis_kwargs=check_kwargs(kwargs,__LAYOUT_AXIS,{},True)
xaxis_kwargs=kwargs_from_keyword(kwargs,{},,True)
yaxis_kwargs=kwargs_from_keyword(kwargs,{},,True)
for _x,_vals in ((,axis_kwargs),(,xaxis_kwargs),(,yaxis_kwargs)):
layout=update_axis(layout,_x,**_vals)
if margin:
if isinstance(margin,dict):
margin=margin
else:
margin=dict(list(zip((,,,),margin)))
layout.update(margin=margin)
if dimensions:
layout.update(width=dimensions[0])
layout.update(height=dimensions[1])
if height:
layout.update(height=height)
if width:
layout.update(width=width)
if is3d:
if in theme_data:
layout=deep_update(layout,theme_data[])
zaxis=layout[].copy()
zaxis.update(title=zTitle)
scene=dict(xaxis=layout[].copy(),yaxis=layout[].copy(),zaxis=zaxis)
layout.update(scene=scene)
del layout[]
del layout[]
for r in [,,]:
if .format(r) in kwargs:
if is3d:
layout[][.format(r)].update(range=kwargs[.format(r)])
else:
layout[.format(r)].update(range=kwargs[.format(r)])
if kind in (,,):
layout[]=layout[].copy()
layout[].update(showticklabels=False)
if in kwargs:
if type(kwargs[])==bool:
layout[]=kwargs[]
elif type(kwargs[])==str:
if kwargs[]==:
layout[].update(orientation=,yanchor=,x=.3,y=.95)
elif kwargs[]==:
layout[].update(orientation=,yanchor=,x=.3,y=-0.5)
layout[]=True
else:
layout[]=kwargs[]
layout[]=True
if in kwargs:
layout[]=kwargs[]
for _ in [,,]:
if .format(_) in kwargs:
if is3d:
if kwargs[.format(_)]:
layout[][.format(_)][]=
else:
if kwargs[.format(_)]:
layout[.format(_)][]=
if any(k in kwargs for k in [,,,,]):
shapes=[]
def get_shapes(xline):
orientation=xline[0]
xline=kwargs[xline]
if isinstance(xline,list):
for x_i in xline:
if isinstance(x_i,dict):
x_i[]=
shapes.append(get_shape(**x_i))
else:
if orientation==:
shapes.append(get_shape(kind=,y=x_i))
else:
shapes.append(get_shape(kind=,x=x_i))
elif isinstance(xline,dict):
shapes.append(get_shape(**xline))
else:
if orientation==:
shapes.append(get_shape(kind=,y=xline))
else:
shapes.append(get_shape(kind=,x=xline))
def get_span(xspan):
orientation=xspan[0]
xspan=kwargs[xspan]
if isinstance(xspan,list):
for x_i in xspan:
if isinstance(x_i,dict):
x_i[]=
shapes.append(get_shape(**x_i))
else:
v0,v1=x_i
if orientation==:
shapes.append(get_shape(kind=,y0=v0,y1=v1,fill=True,opacity=.5))
else:
shapes.append(get_shape(kind=,x0=v0,x1=v1,fill=True,opacity=.5))
elif isinstance(xspan,dict):
xspan[]=
shapes.append(get_shape(**xspan))
elif isinstance(xspan,tuple):
v0,v1=xspan
if orientation==:
shapes.append(get_shape(kind=,y0=v0,y1=v1,fill=True,opacity=.5))
else:
shapes.append(get_shape(kind=,x0=v0,x1=v1,fill=True,opacity=.5))
else:
raise Exception(.format(orientation,xspan))
if in kwargs:
get_shapes()
if in kwargs:
get_shapes()
if in kwargs:
get_span()
if in kwargs:
get_span()
if in kwargs:
shapes_=kwargs[]
if isinstance(shapes_,list):
for i in shapes_:
shp=i if in i else get_shape(**i)
shapes.append(shp)
elif isinstance(shapes_,dict):
shp=shapes_ if in shapes_ else get_shape(**shapes_)
shapes.append(shp)
else:
raise Exception("Shapes need to be either a dict or list of dicts")
layout[]=shapes
if kind in (,):
kw=check_kwargs(kwargs,__GEO_KWARGS)
defaults={:{:},:False,:False}
for k,v in list(defaults.items()):
if k not in kw:
kw[k]=v
kw_=kwargs_from_keyword(kw,{},)
deep_update(kw,kw_)
layout[]=kw
del layout[]
del layout[]
if not margin:
layout[]={:True}
if in kwargs:
rs=kwargs[]
if in rs:
axis=rs[]
del rs[]
else:
axis=
layout[axis][]=get_range_selector(**rs)
if in kwargs:
if type(kwargs[])==bool:
if kwargs[]:
layout[][]=dict(visible=kwargs[])
else:
layout[][]=dict(visible=False)
else:
layout[][]=kwargs[]
else:
if kind in (,,):
layout[][]=dict(visible=False)
if in kwargs:
layout=deep_update(layout,kwargs[])
return layout | Generates a plotly Layout
Parameters:
-----------
theme : string
Layout Theme
solar
pearl
white
title : string
Chart Title
xTitle : string
X Axis Title
yTitle : string
Y Axis Title
zTitle : string
Z Axis Title
Applicable only for 3d charts
barmode : string
Mode when displaying bars
group
stack
overlay
bargap : float
Sets the gap between bars
[0,1)
Applicabe for bar and histogram plots
bargroupgap : float
Set the gap between groups
[0,1)
Applicabe for bar and histogram plots
gridcolor : string
grid color
zerolinecolor : string
zero line color
margin : dict or tuple
Dictionary (l,r,b,t) or
Tuple containing the left,
right, bottom and top margins
dimensions : tuple
Dimensions of figure
annotations : dict or list
Dictionary of annotations
{x_point : text}
or
List of Plotly Annotations
is3d : bool
Indicates if the layout is for a 3D chart
Other Kwargs
============
Shapes
hline : int, list or dict
Draws a horizontal line at the
indicated y position(s)
Extra parameters can be passed in
the form of a dictionary (see shapes)
vline : int, list or dict
Draws a vertical line at the
indicated x position(s)
Extra parameters can be passed in
the form of a dictionary (see shapes)
hspan : (y0,y1)
Draws a horizontal rectangle at the
indicated (y0,y1) positions.
Extra parameters can be passed in
the form of a dictionary (see shapes)
vspan : (x0,x1)
Draws a vertical rectangle at the
indicated (x0,x1) positions.
Extra parameters can be passed in
the form of a dictionary (see shapes)
shapes : dict or list(dict)
List of dictionaries with the
specifications of a given shape.
See help(cufflinks.tools.get_shape)
for more information
Axis Ranges
xrange : [lower_bound,upper_bound]
Sets the range for the x axis
yrange : [lower_bound,upper_bound]
Sets the range for the y axis
zrange : [lower_bound,upper_bound]
Sets the range for the z axis
Explicit Layout Updates
layout_update : dict
The layout will be modified with all
the explicit values stated in the
dictionary
Range Selector
rangeselector : dict
Defines a rangeselector object
see help(cf.tools.get_range_selector) for more information
Example:
{'steps':['1y','2 months','5 weeks','ytd','2mtd'],
'axis':'xaxis', 'bgcolor' : ('blue',.3),
'x': 0.2 , 'y' : 0.9}
Range Slider
rangeslider : bool or dict
Defines if a rangeslider is displayed
If bool:
True : Makes it visible
if dict:
Rangeslider object
Example:
{'bgcolor':('blue',.3),'autorange':True}
Annotations
fontcolor : str
Text color for annotations
fontsize : int
Text size for annotations
textangle : int
Textt angle
See https://plot.ly/python/reference/#layout-annotations
for a complete list of valid parameters. |
385,684 | def memoized(maxsize=1024):
cache = SimpleCache(maxsize=maxsize)
def decorator(obj):
@wraps(obj)
def new_callable(*a, **kw):
def create_new():
return obj(*a, **kw)
key = (a, tuple(kw.items()))
return cache.get(key, create_new)
return new_callable
return decorator | Momoization decorator for immutable classes and pure functions. |
385,685 | def build_map_type_validator(item_validator):
def validate_mapping(value):
return dict(item_validator(item) for item in validate_list(value))
return validate_mapping | Return a function which validates that the value is a mapping of
items. The function should return pairs of items that will be
passed to the `dict` constructor. |
385,686 | def calc_remotedemand_v1(self):
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
flu.remotedemand = max(con.remotedischargeminimum[der.toy[self.idx_sim]] -
flu.naturalremotedischarge, 0.) | Estimate the discharge demand of a cross section far downstream.
Required control parameter:
|RemoteDischargeMinimum|
Required derived parameters:
|dam_derived.TOY|
Required flux sequence:
|dam_derived.TOY|
Calculated flux sequence:
|RemoteDemand|
Basic equation:
:math:`RemoteDemand =
max(RemoteDischargeMinimum - NaturalRemoteDischarge, 0`
Examples:
Low water elevation is often restricted to specific month of the year.
Sometimes the pursued lowest discharge value varies over the year
to allow for a low flow variability that is in some agreement with
the natural flow regime. The HydPy-Dam model supports such
variations. Hence we define a short simulation time period first.
This enables us to show how the related parameters values can be
defined and how the calculation of the `remote` water demand
throughout the year actually works:
>>> from hydpy import pub
>>> pub.timegrids = '2001.03.30', '2001.04.03', '1d'
Prepare the dam model:
>>> from hydpy.models.dam import *
>>> parameterstep()
Assume the required discharge at a gauge downstream being 2 m³/s
in the hydrological summer half-year (April to October). In the
winter month (November to May), there is no such requirement:
>>> remotedischargeminimum(_11_1_12=0.0, _03_31_12=0.0,
... _04_1_12=2.0, _10_31_12=2.0)
>>> derived.toy.update()
Prepare a test function, that calculates the remote discharge demand
based on the parameter values defined above and for natural remote
discharge values ranging between 0 and 3 m³/s:
>>> from hydpy import UnitTest
>>> test = UnitTest(model, model.calc_remotedemand_v1, last_example=4,
... parseqs=(fluxes.naturalremotedischarge,
... fluxes.remotedemand))
>>> test.nexts.naturalremotedischarge = range(4)
On April 1, the required discharge is 2 m³/s:
>>> model.idx_sim = pub.timegrids.init['2001.04.01']
>>> test()
| ex. | naturalremotedischarge | remotedemand |
-----------------------------------------------
| 1 | 0.0 | 2.0 |
| 2 | 1.0 | 1.0 |
| 3 | 2.0 | 0.0 |
| 4 | 3.0 | 0.0 |
On May 31, the required discharge is 0 m³/s:
>>> model.idx_sim = pub.timegrids.init['2001.03.31']
>>> test()
| ex. | naturalremotedischarge | remotedemand |
-----------------------------------------------
| 1 | 0.0 | 0.0 |
| 2 | 1.0 | 0.0 |
| 3 | 2.0 | 0.0 |
| 4 | 3.0 | 0.0 | |
385,687 | def create_frames(until=None):
now = Date.now()
if until:
get_orbit(until, now)
else:
for body in list_bodies():
get_orbit(body.name, now) | Create frames available in the JPL files
Args:
until (str): Name of the body you want to create the frame of, and all frames in between.
If ``None`` all the frames available in the .bsp files will be created
Example:
.. code-block:: python
# All frames between Earth and Mars are created (Earth, EarthBarycenter,
# SolarSystemBarycenter, MarsBarycenter and Mars)
create_frames(until='Mars')
# All frames between Earth and Phobos are created (Earth, EarthBarycenter,
# SolarSystemBarycenter, MarsBarycenter and Phobos)
create_frames(until='Phobos')
# All frames available in the .bsp files are created
create_frames() |
385,688 | def edit_item(self):
index = self.currentIndex()
if not index.isValid():
return
self.edit(index.child(index.row(), 3)) | Edit item |
385,689 | def _find_cont_fitfunc(fluxes, ivars, contmask, deg, ffunc, n_proc=1):
nstars = fluxes.shape[0]
npixels = fluxes.shape[1]
cont = np.zeros(fluxes.shape)
if n_proc == 1:
for jj in range(nstars):
flux = fluxes[jj,:]
ivar = ivars[jj,:]
pix = np.arange(0, npixels)
y = flux[contmask]
x = pix[contmask]
yivar = ivar[contmask]
yivar[yivar == 0] = SMALL**2
if ffunc=="sinusoid":
p0 = np.ones(deg*2)
L = max(x)-min(x)
pcont_func = _partial_func(_sinusoid, L=L, y=flux)
popt, pcov = opt.curve_fit(pcont_func, x, y, p0=p0,
sigma=1./np.sqrt(yivar))
elif ffunc=="chebyshev":
fit = np.polynomial.chebyshev.Chebyshev.fit(x=x,y=y,w=yivar,deg=deg)
for element in pix:
if ffunc=="sinusoid":
cont[jj,element] = _sinusoid(element, popt, L=L, y=flux)
elif ffunc=="chebyshev":
cont[jj,element] = fit(element)
else:
pool = mp.Pool(processes=n_proc)
mp_results = []
for i in xrange(nstars):
mp_results.append(pool.apply_async(\
_find_cont_fitfunc,
(fluxes[i, :].reshape((1, -1)),
ivars[i, :].reshape((1, -1)),
contmask[:]),
{:deg, :ffunc}))
pool.close()
pool.join()
cont = np.array([mp_results[i].get().flatten() for i in xrange(nstars)])
return cont | Fit a continuum to a continuum pixels in a segment of spectra
Functional form can be either sinusoid or chebyshev, with specified degree
Parameters
----------
fluxes: numpy ndarray of shape (nstars, npixels)
training set or test set pixel intensities
ivars: numpy ndarray of shape (nstars, npixels)
inverse variances, parallel to fluxes
contmask: numpy ndarray of length (npixels)
boolean pixel mask, True indicates that pixel is continuum
deg: int
degree of fitting function
ffunc: str
type of fitting function, chebyshev or sinusoid
Returns
-------
cont: numpy ndarray of shape (nstars, npixels)
the continuum, parallel to fluxes |
385,690 | def _polar(self):
try:
return self._hidden_polar_axes
except AttributeError:
fig = self.get_figure()
self._hidden_polar_axes = fig.add_axes(self.get_position(True),
frameon=False, projection=)
self._hidden_polar_axes.format_coord = self._polar_format_coord
return self._hidden_polar_axes | The "hidden" polar axis used for azimuth labels. |
385,691 | def covar_plotter3d_plotly(embedding, rieman_metric, inspect_points_idx,
colors, **kwargs):
def rgb2hex(rgb):
return % tuple(rgb)
return [ plt_data for idx in inspect_points_idx
for plt_data in plot_ellipse_plotly(
rieman_metric[idx], embedding[idx],
color=rgb2hex(colors[idx]), **kwargs) ] | 3 Dimensional Covariance plotter using matplotlib backend. |
385,692 | def make_input_from_plain_string(sentence_id: SentenceId, string: str) -> TranslatorInput:
return TranslatorInput(sentence_id, tokens=list(data_io.get_tokens(string)), factors=None) | Returns a TranslatorInput object from a plain string.
:param sentence_id: Sentence id.
:param string: An input string.
:return: A TranslatorInput. |
385,693 | def delete(self, name):
conn = self._client.connect()
conn.execute( self._table.delete().where(self._table.c.name==name) ) | Delete time series by name across all intervals. Returns the number of
records deleted. |
385,694 | def max_profit_optimized(prices):
cur_max, max_so_far = 0, 0
for i in range(1, len(prices)):
cur_max = max(0, cur_max + prices[i] - prices[i-1])
max_so_far = max(max_so_far, cur_max)
return max_so_far | input: [7, 1, 5, 3, 6, 4]
diff : [X, -6, 4, -2, 3, -2]
:type prices: List[int]
:rtype: int |
385,695 | def find_view_function(module_name, function_name, fallback_app=None, fallback_template=None, verify_decorator=True):
dmp = apps.get_app_config()
try:
spec = find_spec(module_name)
except ValueError:
spec = None
if spec is None:
try:
return create_view_for_template(fallback_app, fallback_template)
except TemplateDoesNotExist as e:
raise ViewDoesNotExist(.format(module_name, fallback_template, e))
try:
module = import_module(module_name)
func = getattr(module, function_name)
func.view_type =
except ImportError as e:
raise ViewDoesNotExist(.format(module_name, e))
except AttributeError as e:
raise ViewDoesNotExist(.format(module_name, function_name, e))
if inspect.isclass(func) and issubclass(func, View):
func = func.as_view()
func.view_type =
elif verify_decorator and not view_function.is_decorated(func):
raise ViewDoesNotExist("view {}.{} was found successfully, but it must be decorated with @view_function or be a subclass of django.views.generic.View.".format(module_name, function_name))
if dmp.options[] is not None:
try:
converter = import_qualified(dmp.options[])(func)
setattr(func, CONVERTER_ATTRIBUTE_NAME, converter)
except ImportError as e:
raise ImproperlyConfigured(.format(str(e)))
return func | Finds a view function, class-based view, or template view.
Raises ViewDoesNotExist if not found. |
385,696 | def get_timedelta_str(timedelta, exclude_zeros=False):
if timedelta == datetime.timedelta(0):
return
days = timedelta.days
hours, rem = divmod(timedelta.seconds, 3600)
minutes, seconds = divmod(rem, 60)
fmtstr_list = []
fmtdict = {}
def append_cases(unit, fmtlbl):
if not exclude_zeros or unit != 0:
if unit == 1:
fmtstr_list.append( % (fmtlbl, fmtlbl))
else:
fmtstr_list.append( % (fmtlbl, fmtlbl))
fmtdict[fmtlbl] = unit
if abs(days) > 0:
append_cases(days, )
if len(fmtstr_list) > 0 or abs(hours) > 0:
append_cases(hours, )
if len(fmtstr_list) > 0 or abs(minutes) > 0:
append_cases(minutes, )
if len(fmtstr_list) > 0 or abs(seconds) > 0:
append_cases(seconds, )
fmtstr = .join(fmtstr_list)
timedelta_str = fmtstr.format(**fmtdict)
return timedelta_str | get_timedelta_str
Returns:
str: timedelta_str, formated time string
References:
http://stackoverflow.com/questions/8906926/formatting-python-timedelta-objects
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_time import * # NOQA
>>> timedelta = get_unix_timedelta(10)
>>> timedelta_str = get_timedelta_str(timedelta)
>>> result = (timedelta_str)
>>> print(result)
10 seconds |
385,697 | def from_response(cls, response, attrs):
proj = response[]
index = cls(proj[], response[],
attrs[response[][1][]],
proj.get())
index.response = response
return index | Create an index from returned Dynamo data |
385,698 | def patch_clean_fields(model):
old_clean_fields = model.clean_fields
def new_clean_fields(self, exclude=None):
if hasattr(self, ):
for field_name, value in self._mt_form_pending_clear.items():
field = self._meta.get_field(field_name)
orig_field_name = field.translated_field.name
if orig_field_name in exclude:
field.save_form_data(self, value, check=False)
delattr(self, )
old_clean_fields(self, exclude)
model.clean_fields = new_clean_fields | Patch clean_fields method to handle different form types submission. |
385,699 | def get(self, name):
config = self.get_block( % name)
if not config:
return None
resource = super(EthernetInterface, self).get(name)
resource.update(dict(name=name, type=))
resource.update(self._parse_sflow(config))
resource.update(self._parse_flowcontrol_send(config))
resource.update(self._parse_flowcontrol_receive(config))
return resource | Returns an interface as a set of key/value pairs
Args:
name (string): the interface identifier to retrieve the from
the configuration
Returns:
A Python dictionary object of key/value pairs that represent
the current configuration for the specified node. If the
specified interface name does not exist, then None is returned::
{
"name": <string>,
"type": "ethernet",
"sflow": [true, false],
"flowcontrol_send": [on, off],
"flowcontrol_receive": [on, off]
} |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.