Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
381,300 | def threshold_img(data, threshold, mask=None, mask_out=):
if mask is not None:
mask = threshold_img(mask, threshold, mask_out=mask_out)
return data * mask.astype(bool)
if mask_out.startswith():
data[data < threshold] = 0
elif mask_out.startswith():
data[data > threshold] = 0
return data | Threshold data, setting all values in the array above/below threshold
to zero.
Args:
data (ndarray): The image data to threshold.
threshold (float): Numeric threshold to apply to image.
mask (ndarray): Optional 1D-array with the same length as the data. If
passed, the threshold is first applied to the mask, and the
resulting indices are used to threshold the data. This is primarily
useful when, e.g., applying a statistical threshold to a z-value
image based on a p-value threshold.
mask_out (str): Thresholding direction. Can be 'below' the threshold
(default) or 'above' the threshold. Note: use 'above' when masking
based on p values. |
381,301 | def devices(self):
devices = None
with self.socket.Connect():
devices = self._command("host:devices")
return parse_device_list(devices) | Return a list of connected devices in the form (*serial*, *status*) where status can
be any of the following:
1. device
2. offline
3. unauthorized
:returns: A list of tuples representing connected devices |
381,302 | def _get_griddistrict(ding0_filepath):
grid_district = os.path.basename(ding0_filepath)
grid_district_search = re.search(, grid_district)
if grid_district_search:
grid_district = int(grid_district_search.group(0)[2:])
return grid_district
else:
raise (KeyError(.format(grid_district))) | Just get the grid district number from ding0 data file path
Parameters
----------
ding0_filepath : str
Path to ding0 data ending typically
`/path/to/ding0_data/"ding0_grids__" + str(``grid_district``) + ".xxx"`
Returns
-------
int
grid_district number |
381,303 | def validate_scopes(self, client_id, scopes, client, request,
*args, **kwargs):
if hasattr(client, ):
return client.validate_scopes(scopes)
return set(client.default_scopes).issuperset(set(scopes)) | Ensure the client is authorized access to requested scopes. |
381,304 | def create_spot_instances(ec2, price, image_id, spec, num_instances=1, timeout=None, tentative=False, tags=None):
def spotRequestNotFound(e):
return e.error_code == "InvalidSpotInstanceRequestID.NotFound"
for attempt in retry_ec2(retry_for=a_long_time,
retry_while=inconsistencies_detected):
with attempt:
requests = ec2.request_spot_instances(
price, image_id, count=num_instances, **spec)
if tags is not None:
for requestID in (request.id for request in requests):
for attempt in retry_ec2(retry_while=spotRequestNotFound):
with attempt:
ec2.create_tags([requestID], tags)
num_active, num_other = 0, 0
yield ec2.get_only_instances(instance_ids)
if not num_active:
message =
if tentative:
log.warn(message + )
else:
raise RuntimeError(message)
if num_other:
log.warn(, num_other) | :rtype: Iterator[list[Instance]] |
381,305 | def as_dict(self):
return {
"destination": self.destination,
"packet_transmit": self.packet_transmit,
"packet_receive": self.packet_receive,
"packet_loss_count": self.packet_loss_count,
"packet_loss_rate": self.packet_loss_rate,
"rtt_min": self.rtt_min,
"rtt_avg": self.rtt_avg,
"rtt_max": self.rtt_max,
"rtt_mdev": self.rtt_mdev,
"packet_duplicate_count": self.packet_duplicate_count,
"packet_duplicate_rate": self.packet_duplicate_rate,
} | ping statistics.
Returns:
|dict|:
Examples:
>>> import pingparsing
>>> parser = pingparsing.PingParsing()
>>> parser.parse(ping_result)
>>> parser.as_dict()
{
"destination": "google.com",
"packet_transmit": 60,
"packet_receive": 60,
"packet_loss_rate": 0.0,
"packet_loss_count": 0,
"rtt_min": 61.425,
"rtt_avg": 99.731,
"rtt_max": 212.597,
"rtt_mdev": 27.566,
"packet_duplicate_rate": 0.0,
"packet_duplicate_count": 0
} |
381,306 | def disable(self):
self.ticker_text_label.hide()
if self.current_observed_sm_m:
self.stop_sm_m_observation(self.current_observed_sm_m) | Relieve all state machines that have no active execution and hide the widget |
381,307 | def add_api_compression(self, api_id, min_compression_size):
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[
{
: ,
: ,
: str(min_compression_size)
}
]
) | Add Rest API compression |
381,308 | def _profile(self, frame, event, arg):
if event.startswith():
return
time1 = self.timer()
frames = self.frame_stack(frame)
if frames:
frames.pop()
parent_stats = self.stats
for f in frames:
parent_stats = parent_stats.ensure_child(f.f_code, void)
code = frame.f_code
frame_key = id(frame)
time2 = self.timer()
self.overhead += time2 - time1
if event == :
time = time2 - self.overhead
self.record_entering(time, code, frame_key, parent_stats)
elif event == :
time = time1 - self.overhead
self.record_leaving(time, code, frame_key, parent_stats)
time3 = self.timer()
self.overhead += time3 - time2 | The callback function to register by :func:`sys.setprofile`. |
381,309 | def unmount(self, client):
getattr(client, self.unmount_fun)(mount_point=self.path) | Unmounts a backend within Vault |
381,310 | def check_version(server, version, filename, timeout=SHORT_TIMEOUT):
address = VERSION_ENDPOINT.format(server=server)
print()
log.info(, version)
log.info(, address)
try:
response = requests.get(address, timeout=timeout)
response.raise_for_status()
except (requests.exceptions.RequestException, requests.exceptions.BaseHTTPError) as e:
print()
log.warning(, address,
str(e), stack_info=True)
return False
response_json = response.json()
if not _validate_api_response(response_json):
print()
log.info(, address, response.text)
return False
current_version = response_json[][][0][]
if current_version == version:
print()
return True
download_link = response_json[][][0][]
log.info(, current_version, download_link)
try:
response = requests.get(download_link, timeout=timeout)
response.raise_for_status()
except (requests.exceptions.RequestException, requests.exceptions.BaseHTTPError) as e:
print()
log.warning(, str(e),
stack_info=True)
return False
log.info(, filename)
zip_binary = response.content
try:
_write_zip(filename, zip_binary)
except IOError as e:
print()
log.warning(, filename, str(e))
return False
else:
print(.format(current_version))
log.info(, filename)
return True | Check for the latest version of OK and update accordingly. |
381,311 | def _stinespring_to_choi(data, input_dim, output_dim):
trace_dim = data[0].shape[0] // output_dim
stine_l = np.reshape(data[0], (output_dim, trace_dim, input_dim))
if data[1] is None:
stine_r = stine_l
else:
stine_r = np.reshape(data[1], (output_dim, trace_dim, input_dim))
return np.reshape(
np.einsum(, stine_l, stine_r.conj()),
2 * [input_dim * output_dim]) | Transform Stinespring representation to Choi representation. |
381,312 | def _cdf(self, xloc, left, right, cache):
left = evaluation.get_forward_cache(left, cache)
right = evaluation.get_forward_cache(right, cache)
if isinstance(left, Dist):
if isinstance(right, Dist):
raise evaluation.DependencyError(
"under-defined distribution {} or {}".format(left, right))
elif not isinstance(right, Dist):
return numpy.asfarray(left+right <= xloc)
else:
left, right = right, left
xloc = (xloc.T-numpy.asfarray(right).T).T
output = evaluation.evaluate_forward(left, xloc, cache=cache)
assert output.shape == xloc.shape
return output | Cumulative distribution function.
Example:
>>> print(chaospy.Uniform().fwd([-0.5, 0.5, 1.5, 2.5]))
[0. 0.5 1. 1. ]
>>> print(chaospy.Add(chaospy.Uniform(), 1).fwd([-0.5, 0.5, 1.5, 2.5]))
[0. 0. 0.5 1. ]
>>> print(chaospy.Add(1, chaospy.Uniform()).fwd([-0.5, 0.5, 1.5, 2.5]))
[0. 0. 0.5 1. ]
>>> print(chaospy.Add(1, 1).fwd([-0.5, 0.5, 1.5, 2.5]))
[0. 0. 0. 1.] |
381,313 | def dup_idx(arr):
_, b = np.unique(arr, return_inverse=True)
return np.nonzero(np.logical_or.reduce(
b[:, np.newaxis] == np.nonzero(np.bincount(b) > 1),
axis=1))[0] | Return the indices of all duplicated array elements.
Parameters
----------
arr : array-like object
An array-like object
Returns
-------
idx : NumPy array
An array containing the indices of the duplicated elements
Examples
--------
>>> from root_numpy import dup_idx
>>> dup_idx([1, 2, 3, 4, 5])
array([], dtype=int64)
>>> dup_idx([1, 2, 3, 4, 5, 5])
array([4, 5])
>>> dup_idx([1, 2, 3, 4, 5, 5, 1])
array([0, 4, 5, 6]) |
381,314 | def get_feature_sequence(self, feature_id, organism=None, sequence=None):
data = {
: ,
: [
{: feature_id}
]
}
data = self._update_data(data, organism, sequence)
return self.post(, data) | [CURRENTLY BROKEN] Get the sequence of a feature
:type feature_id: str
:param feature_id: Feature UUID
:type organism: str
:param organism: Organism Common Name
:type sequence: str
:param sequence: Sequence Name
:rtype: dict
:return: A standard apollo feature dictionary ({"features": [{...}]}) |
381,315 | def SelfReferenceProperty(label=None, collection_name=None, **attrs):
if in attrs:
raise ConfigurationError(
)
return ReferenceProperty(_SELF_REFERENCE, label, collection_name, **attrs) | Create a self reference. |
381,316 | def update(self):
self._tmpdir = tempfile.mkdtemp()
try:
self._rebase_file = self._tmpdir +
print
url =
header = {: }
req = urllib2.Request(url, headers=header)
con = urllib2.urlopen(req)
with open(self._rebase_file, ) as rebase_file:
rebase_file.write(con.read())
self._process_file()
except urllib2.HTTPError, e:
print .format(e.code, url)
print
self._enzyme_dict = coral.constants.fallback_enzymes
except urllib2.URLError, e:
print .format(e.reason, url)
print
self._enzyme_dict = coral.constants.fallback_enzymes
print
self.restriction_sites = {}
for key, (site, cuts) in self._enzyme_dict.iteritems():
try:
r = coral.RestrictionSite(coral.DNA(site), cuts, name=key)
self.restriction_sites[key] = r
except ValueError:
pass | Update definitions. |
381,317 | def mktmp(self):
try:
if not os.path.isdir(self.location):
os.makedirs(self.location)
except:
log.debug(self.location, exc_info=1)
return self | Make the I{location} directory if it doesn't already exits. |
381,318 | def _call_variants_samtools(align_bams, ref_file, items, target_regions, tx_out_file):
config = items[0]["config"]
mpileup = prep_mpileup(align_bams, ref_file, config,
target_regions=target_regions, want_bcf=True)
bcftools = config_utils.get_program("bcftools", config)
samtools_version = programs.get_version("samtools", config=config)
if samtools_version and LooseVersion(samtools_version) <= LooseVersion("0.1.19"):
raise ValueError("samtools calling not supported with pre-1.0 samtools")
bcftools_opts = "call -v -m"
compress_cmd = "| bgzip -c" if tx_out_file.endswith(".gz") else ""
fix_ambig_ref = vcfutils.fix_ambiguous_cl()
fix_ambig_alt = vcfutils.fix_ambiguous_cl(5)
cmd = ("{mpileup} "
"| {bcftools} {bcftools_opts} - "
"| {fix_ambig_ref} | {fix_ambig_alt} "
"| vt normalize -n -q -r {ref_file} - "
"| sed "
"| sed "
"| sed "
"| sed "
"{compress_cmd} > {tx_out_file}")
do.run(cmd.format(**locals()), "Variant calling with samtools", items[0]) | Call variants with samtools in target_regions.
Works around a GATK VCF 4.2 compatibility issue in samtools 1.0
by removing addition 4.2-only isms from VCF header lines. |
381,319 | def action_create(self, courseid, taskid, path):
path = path.strip()
if not path.startswith("/"):
path = "/" + path
want_directory = path.endswith("/")
wanted_path = self.verify_path(courseid, taskid, path, True)
if wanted_path is None:
return self.show_tab_file(courseid, taskid, _("Invalid new path"))
task_fs = self.task_factory.get_task_fs(courseid, taskid)
if want_directory:
task_fs.from_subfolder(wanted_path).ensure_exists()
else:
task_fs.put(wanted_path, b"")
return self.show_tab_file(courseid, taskid) | Delete a file or a directory |
381,320 | def set_device_name(self, newname):
return self.write(request.SetDeviceName(self.seq, *self.prep_str(newname))) | Sets internal device name. (not announced bluetooth name).
requires utf-8 encoded string. |
381,321 | def construct_rest_of_worlds(self, excluded, fp=None, use_mp=True, simplify=True):
geoms = {}
raw_data = []
for key in sorted(excluded):
locations = excluded[key]
for location in locations:
assert location in self.locations, "Can't find location {}".format(location)
included = self.all_faces.difference(
{face for loc in locations for face in self.data[loc]}
)
raw_data.append((key, self.faces_fp, included))
if use_mp:
with Pool(cpu_count() - 1) as pool:
results = pool.map(_union, raw_data)
geoms = dict(results)
else:
geoms = dict([_union(row) for row in raw_data])
if simplify:
geoms = {k: v.simplify(0.05) for k, v in geoms.items()}
if fp:
labels = sorted(geoms)
self.write_geoms_to_file(fp, [geoms[key] for key in labels], labels)
return fp
else:
return geoms | Construct many rest-of-world geometries and optionally write to filepath ``fp``.
``excluded`` must be a **dictionary** of {"rest-of-world label": ["names", "of", "excluded", "locations"]}``. |
381,322 | def parse_qtype(self, param_type, param_value):
if param_type == :
return self._parse_quniform(param_value)
if param_type == :
param_value[:2] = np.log(param_value[:2])
return list(np.exp(self._parse_quniform(param_value)))
raise RuntimeError("Not supported type: %s" % param_type) | parse type of quniform or qloguniform |
381,323 | def _integrate_scipy(self, intern_xout, intern_y0, intern_p,
atol=1e-8, rtol=1e-8, first_step=None, with_jacobian=None,
force_predefined=False, name=None, **kwargs):
from scipy.integrate import ode
ny = intern_y0.shape[-1]
nx = intern_xout.shape[-1]
results = []
for _xout, _y0, _p in zip(intern_xout, intern_y0, intern_p):
if name is None:
if self.j_cb is None:
name =
else:
name =
if with_jacobian is None:
if name == :
with_jacobian = True
elif name in (, ):
with_jacobian = False
elif name == :
with_jacobian = kwargs.get(, ) ==
def rhs(t, y, p=()):
rhs.ncall += 1
return self.f_cb(t, y, p)
rhs.ncall = 0
if self.j_cb is not None:
def jac(t, y, p=()):
jac.ncall += 1
return self.j_cb(t, y, p)
jac.ncall = 0
r = ode(rhs, jac=jac if with_jacobian else None)
if in kwargs or in kwargs or in kwargs:
raise ValueError("lband and uband set locally (set `band` at initialization instead)")
if self.band is not None:
kwargs[], kwargs[] = self.band
r.set_integrator(name, atol=atol, rtol=rtol, **kwargs)
if len(_p) > 0:
r.set_f_params(_p)
r.set_jac_params(_p)
r.set_initial_value(_y0, _xout[0])
if nx == 2 and not force_predefined:
mode =
if name in (, ):
warnings.warn(" mode with SciPyadaptives integrator is unreliable, consider using e.g. cvode")
ysteps = [_y0]
xsteps = [_xout[0]]
while r.t < _xout[1]:
r.integrate(_xout[1], step=True)
if not r.successful():
raise RuntimeError("failed")
xsteps.append(r.t)
ysteps.append(r.y)
else:
xsteps, ysteps = [], []
def solout(x, y):
xsteps.append(x)
ysteps.append(y)
r.set_solout(solout)
r.integrate(_xout[1])
if not r.successful():
raise RuntimeError("failed")
_yout = np.array(ysteps)
_xout = np.array(xsteps)
else:
mode =
_yout = np.empty((nx, ny))
_yout[0, :] = _y0
for idx in range(1, nx):
r.integrate(_xout[idx])
if not r.successful():
raise RuntimeError("failed")
_yout[idx, :] = r.y
info = {
: _xout,
: _yout,
: _p,
: r.successful(),
: rhs.ncall,
: -1,
results.append(info)
return results | Do not use directly (use ``integrate('scipy', ...)``).
Uses `scipy.integrate.ode <http://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.ode.html>`_
Parameters
----------
\*args :
See :meth:`integrate`.
name : str (default: 'lsoda'/'dopri5' when jacobian is available/not)
What integrator wrapped in scipy.integrate.ode to use.
\*\*kwargs :
Keyword arguments passed onto `set_integrator(...) <
http://docs.scipy.org/doc/scipy/reference/generated/
scipy.integrate.ode.set_integrator.html#scipy.integrate.ode.set_integrator>`_
Returns
-------
See :meth:`integrate`. |
381,324 | def from_dict(config):
return ProxyConfig(
http=config.get(),
https=config.get(),
ftp=config.get(),
no_proxy=config.get(),
) | Instantiate a new ProxyConfig from a dictionary that represents a
client configuration, as described in `the documentation`_.
.. _the documentation:
https://docs.docker.com/network/proxy/#configure-the-docker-client |
381,325 | def set_value(self, value: datetime):
assert isinstance(value, datetime)
self.value = value | Sets the current value |
381,326 | def absent(name, orgname=None, profile=):
grafana
if isinstance(profile, string_types):
profile = __salt__[](profile)
ret = {: name, : None, : None, : {}}
datasource = __salt__[](name, orgname, profile)
if not datasource:
ret[] = True
ret[] = .format(name)
return ret
if __opts__[]:
ret[] = .format(name)
return ret
__salt__[](datasource[], profile=profile)
ret[] = True
ret[][name] =
ret[] = .format(name)
return ret | Ensure that a data source is present.
name
Name of the data source to remove.
orgname
Name of the organization from which the data source should be absent.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'. |
381,327 | def plot(self,
resolution_constant_regions=20,
resolution_smooth_regions=200):
if self.eps == 0:
x = []; y = []
for I, value in zip(self._indicator_functions, self._values):
x.append(I.L)
y.append(value)
x.append(I.R)
y.append(value)
return x, y
else:
n = float(resolution_smooth_regions)/self.eps
if len(self.data) == 1:
return [self.L, self.R], [self._values[0], self._values[0]]
else:
x = [np.linspace(self.data[0][0], self.data[1][0]-self.eps,
resolution_constant_regions+1)]
for I in self._indicator_functions[1:]:
x.append(np.linspace(I.L-self.eps, I.L+self.eps,
resolution_smooth_regions+1))
x.append(np.linspace(I.L+self.eps, I.R-self.eps,
resolution_constant_regions+1))
x.append(np.linspace(I.R-self.eps, I.R, 3))
x = np.concatenate(x)
y = self(x)
return x, y | Return arrays x, y for plotting the piecewise constant function.
Just the minimum number of straight lines are returned if
``eps=0``, otherwise `resolution_constant_regions` plotting intervals
are insed in the constant regions with `resolution_smooth_regions`
plotting intervals in the smoothed regions. |
381,328 | def trim_sparse(M, n_std=3, s_min=None, s_max=None):
try:
from scipy.sparse import coo_matrix
except ImportError as e:
print(str(e))
print("I am peforming dense normalization by default.")
return trim_dense(M.todense())
r = M.tocoo()
sparsity = np.array(r.sum(axis=1)).flatten()
mean = np.mean(sparsity)
std = np.std(sparsity)
if s_min is None:
s_min = mean - n_std * std
if s_max is None:
s_max = mean + n_std * std
f = (sparsity > s_min) * (sparsity < s_max)
indices = [u for u in range(len(r.data)) if f[r.row[u]] and f[r.col[u]]]
rows = np.array([r.row[i] for i in indices])
cols = np.array([r.col[j] for j in indices])
data = np.array([r.data[k] for k in indices])
N = coo_matrix((data, (rows, cols)))
return N | Apply the trimming procedure to a sparse matrix. |
381,329 | def main():
parser = arg_parser(usage=)
parser.add_option(, , type=, default=10,
help=
)
parser.add_option(, , action=,
help=(
))
parser.add_option(, , type=, default=10,
help=
)
options, args = parser.parse_args()
if options.verbose == 1:
logger.setLevel(logging.INFO)
elif options.verbose > 1:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.NOTSET)
logger.addHandler(logging.StreamHandler())
if len(args) != 2:
parser.error()
subreddit, view = args
check_for_updates(options)
srs = SubredditStats(subreddit, options.site, options.distinguished)
result = srs.run(view, options.submitters, options.commenters)
if result:
print(result.permalink)
return 0 | Provide the entry point to the subreddit_stats command. |
381,330 | def create_encoder_config(args: argparse.Namespace,
max_seq_len_source: int,
max_seq_len_target: int,
config_conv: Optional[encoder.ConvolutionalEmbeddingConfig],
num_embed_source: int) -> Tuple[encoder.EncoderConfig, int]:
encoder_num_layers, _ = args.num_layers
config_encoder = None
if args.decoder_only:
if args.encoder in (C.TRANSFORMER_TYPE, C.TRANSFORMER_WITH_CONV_EMBED_TYPE):
encoder_num_hidden = args.transformer_model_size[0]
elif args.encoder == C.CONVOLUTION_TYPE:
encoder_num_hidden = args.cnn_num_hidden
else:
encoder_num_hidden = args.rnn_num_hidden
config_encoder = encoder.EmptyEncoderConfig(num_embed=num_embed_source,
num_hidden=encoder_num_hidden)
elif args.encoder in (C.TRANSFORMER_TYPE, C.TRANSFORMER_WITH_CONV_EMBED_TYPE):
encoder_transformer_preprocess, _ = args.transformer_preprocess
encoder_transformer_postprocess, _ = args.transformer_postprocess
encoder_transformer_model_size = args.transformer_model_size[0]
total_source_factor_size = sum(args.source_factors_num_embed)
if args.source_factors_combine == C.SOURCE_FACTORS_COMBINE_CONCAT and total_source_factor_size > 0:
logger.info("Encoder transformer-model-size adjusted to account for source factor embeddings: %d -> %d" % (
encoder_transformer_model_size, num_embed_source + total_source_factor_size))
encoder_transformer_model_size = num_embed_source + total_source_factor_size
config_encoder = transformer.TransformerConfig(
model_size=encoder_transformer_model_size,
attention_heads=args.transformer_attention_heads[0],
feed_forward_num_hidden=args.transformer_feed_forward_num_hidden[0],
act_type=args.transformer_activation_type,
num_layers=encoder_num_layers,
dropout_attention=args.transformer_dropout_attention,
dropout_act=args.transformer_dropout_act,
dropout_prepost=args.transformer_dropout_prepost,
positional_embedding_type=args.transformer_positional_embedding_type,
preprocess_sequence=encoder_transformer_preprocess,
postprocess_sequence=encoder_transformer_postprocess,
max_seq_len_source=max_seq_len_source,
max_seq_len_target=max_seq_len_target,
conv_config=config_conv,
lhuc=args.lhuc is not None and (C.LHUC_ENCODER in args.lhuc or C.LHUC_ALL in args.lhuc))
encoder_num_hidden = encoder_transformer_model_size
elif args.encoder == C.CONVOLUTION_TYPE:
cnn_kernel_width_encoder, _ = args.cnn_kernel_width
cnn_config = convolution.ConvolutionConfig(kernel_width=cnn_kernel_width_encoder,
num_hidden=args.cnn_num_hidden,
act_type=args.cnn_activation_type,
weight_normalization=args.weight_normalization)
cnn_num_embed = num_embed_source
if args.source_factors_combine == C.SOURCE_FACTORS_COMBINE_CONCAT:
cnn_num_embed += sum(args.source_factors_num_embed)
config_encoder = encoder.ConvolutionalEncoderConfig(num_embed=cnn_num_embed,
max_seq_len_source=max_seq_len_source,
cnn_config=cnn_config,
num_layers=encoder_num_layers,
positional_embedding_type=args.cnn_positional_embedding_type)
encoder_num_hidden = args.cnn_num_hidden
else:
encoder_rnn_dropout_inputs, _ = args.rnn_dropout_inputs
encoder_rnn_dropout_states, _ = args.rnn_dropout_states
encoder_rnn_dropout_recurrent, _ = args.rnn_dropout_recurrent
config_encoder = encoder.RecurrentEncoderConfig(
rnn_config=rnn.RNNConfig(cell_type=args.rnn_cell_type,
num_hidden=args.rnn_num_hidden,
num_layers=encoder_num_layers,
dropout_inputs=encoder_rnn_dropout_inputs,
dropout_states=encoder_rnn_dropout_states,
dropout_recurrent=encoder_rnn_dropout_recurrent,
residual=args.rnn_residual_connections,
first_residual_layer=args.rnn_first_residual_layer,
forget_bias=args.rnn_forget_bias,
lhuc=args.lhuc is not None and (C.LHUC_ENCODER in args.lhuc or C.LHUC_ALL in args.lhuc)),
conv_config=config_conv,
reverse_input=args.rnn_encoder_reverse_input)
encoder_num_hidden = args.rnn_num_hidden
return config_encoder, encoder_num_hidden | Create the encoder config.
:param args: Arguments as returned by argparse.
:param max_seq_len_source: Maximum source sequence length.
:param max_seq_len_target: Maximum target sequence length.
:param config_conv: The config for the convolutional encoder (optional).
:param num_embed_source: The size of the source embedding.
:return: The encoder config and the number of hidden units of the encoder. |
381,331 | def adjustReplicas(self,
old_required_number_of_instances: int,
new_required_number_of_instances: int):
replica_num = old_required_number_of_instances
while replica_num < new_required_number_of_instances:
self.replicas.add_replica(replica_num)
self.processStashedMsgsForReplica(replica_num)
replica_num += 1
while replica_num > new_required_number_of_instances:
replica_num -= 1
self.replicas.remove_replica(replica_num)
pop_keys(self.msgsForFutureReplicas, lambda inst_id: inst_id < new_required_number_of_instances)
if len(self.primaries_disconnection_times) < new_required_number_of_instances:
self.primaries_disconnection_times.extend(
[None] * (new_required_number_of_instances - len(self.primaries_disconnection_times)))
elif len(self.primaries_disconnection_times) > new_required_number_of_instances:
self.primaries_disconnection_times = self.primaries_disconnection_times[:new_required_number_of_instances] | Add or remove replicas depending on `f` |
381,332 | def InitFromHuntObject(self,
hunt_obj,
hunt_counters=None,
with_full_summary=False):
self.urn = rdfvalue.RDFURN("hunts").Add(str(hunt_obj.hunt_id))
self.hunt_id = hunt_obj.hunt_id
if (hunt_obj.args.hunt_type ==
rdf_hunt_objects.HuntArguments.HuntType.STANDARD):
self.name = "GenericHunt"
else:
self.name = "VariableGenericHunt"
self.state = str(hunt_obj.hunt_state)
self.crash_limit = hunt_obj.crash_limit
self.client_limit = hunt_obj.client_limit
self.client_rate = hunt_obj.client_rate
self.created = hunt_obj.create_time
self.duration = hunt_obj.duration
self.creator = hunt_obj.creator
self.description = hunt_obj.description
self.is_robot = hunt_obj.creator in ["GRRWorker", "Cron"]
if hunt_counters is not None:
self.results_count = hunt_counters.num_results
self.clients_with_results_count = hunt_counters.num_clients_with_results
self.clients_queued_count = (
hunt_counters.num_clients - hunt_counters.num_successful_clients -
hunt_counters.num_failed_clients - hunt_counters.num_crashed_clients)
self.total_cpu_usage = hunt_counters.total_cpu_seconds or 0
self.total_net_usage = hunt_counters.total_network_bytes_sent
if with_full_summary:
self.all_clients_count = hunt_counters.num_clients
self.completed_clients_count = (
hunt_counters.num_successful_clients +
hunt_counters.num_failed_clients)
self.remaining_clients_count = (
self.all_clients_count - self.completed_clients_count)
else:
self.results_count = 0
self.clients_with_results_count = 0
self.clients_queued_count = 0
self.total_cpu_usage = 0
self.total_net_usage = 0
if with_full_summary:
self.all_clients_count = 0
self.completed_clients_count = 0
self.remaining_clients_count = 0
if hunt_obj.original_object.object_type != "UNKNOWN":
ref = ApiFlowLikeObjectReference()
self.original_object = ref.FromFlowLikeObjectReference(
hunt_obj.original_object)
if with_full_summary:
hra = self.hunt_runner_args = rdf_hunts.HuntRunnerArgs(
hunt_name=self.name,
description=hunt_obj.description,
client_rule_set=hunt_obj.client_rule_set,
crash_limit=hunt_obj.crash_limit,
avg_results_per_client_limit=hunt_obj.avg_results_per_client_limit,
avg_cpu_seconds_per_client_limit=hunt_obj
.avg_cpu_seconds_per_client_limit,
avg_network_bytes_per_client_limit=hunt_obj
.avg_network_bytes_per_client_limit,
client_rate=hunt_obj.client_rate,
original_object=hunt_obj.original_object)
if hunt_obj.HasField("output_plugins"):
hra.output_plugins = hunt_obj.output_plugins
if hunt_obj.client_limit != 100:
hra.client_limit = hunt_obj.client_limit
if hunt_obj.HasField("per_client_cpu_limit"):
hra.per_client_cpu_limit = hunt_obj.per_client_cpu_limit
if hunt_obj.HasField("per_client_network_limit_bytes"):
hra.per_client_network_limit_bytes = (
hunt_obj.per_client_network_bytes_limit)
if hunt_obj.HasField("total_network_bytes_limit"):
hra.network_bytes_limit = hunt_obj.total_network_bytes_limit
self.client_rule_set = hunt_obj.client_rule_set
if (hunt_obj.args.hunt_type ==
rdf_hunt_objects.HuntArguments.HuntType.STANDARD):
self.flow_name = hunt_obj.args.standard.flow_name
self.flow_args = hunt_obj.args.standard.flow_args
return self | Initialize API hunt object from a database hunt object.
Args:
hunt_obj: rdf_hunt_objects.Hunt to read the data from.
hunt_counters: Optional db.HuntCounters object with counters information.
with_full_summary: if True, hunt_runner_args, completion counts and a few
other fields will be filled in. The way to think about it is that with
with_full_summary==True ApiHunt will have the data to render "Hunt
Overview" page and with with_full_summary==False it will have enough
data to be rendered as a hunts list row.
Returns:
Self. |
381,333 | def run_single(workflow, *, registry, db_file, always_cache=True):
with JobDB(db_file, registry) as db:
job_logger = make_logger("worker", push_map, db)
result_logger = make_logger("worker", pull_map, db)
@pull
def pass_job(source):
for msg in source():
key, job = msg
status, retrieved_result = db.add_job_to_db(key, job)
if status == :
yield retrieved_result
continue
elif status == :
continue
result = run_job(key, job)
attached = db.store_result_in_db(result, always_cache=True)
yield result
yield from (ResultMessage(key, , result.value, None)
for key in attached)
scheduler = Scheduler(job_keeper=db)
queue = Queue()
job_front_end = job_logger >> queue.sink
result_front_end = queue.source >> pass_job >> result_logger
single_worker = Connection(result_front_end, job_front_end)
return scheduler.run(single_worker, get_workflow(workflow)) | Run workflow in a single thread, storing results in a Sqlite3
database.
:param workflow: Workflow or PromisedObject to be evaluated.
:param registry: serialization Registry function.
:param db_file: filename of Sqlite3 database, give `':memory:'` to
keep the database in memory only.
:param always_cache: Currently ignored. always_cache is true.
:return: Evaluated result. |
381,334 | def shape(self):
if len(self) == 0:
return ()
elif self.is_power_space:
try:
sub_shape = self[0].shape
except AttributeError:
sub_shape = ()
else:
sub_shape = ()
return (len(self),) + sub_shape | Total spaces per axis, computed recursively.
The recursion ends at the fist level that does not have a shape.
Examples
--------
>>> r2, r3 = odl.rn(2), odl.rn(3)
>>> pspace = odl.ProductSpace(r2, r3)
>>> pspace.shape
(2,)
>>> pspace2 = odl.ProductSpace(pspace, 3)
>>> pspace2.shape
(3, 2)
If the space is a "pure" product space, shape recurses all the way
into the components:
>>> r2_2 = odl.ProductSpace(r2, 3)
>>> r2_2.shape
(3, 2) |
381,335 | def close(self):
if self._image is None:
return
empty_image = fits.HDUList()
for u in self._image:
empty_image.append(u.__class__(data=None, header=None))
self._image.close()
self._image = empty_image | Close the object nicely and release all the data
arrays from memory YOU CANT GET IT BACK, the pointers
and data are gone so use the getData method to get
the data array returned for future use. You can use
putData to reattach a new data array to the imageObject. |
381,336 | def list_of_mined(cls):
result = []
if PyFunceble.CONFIGURATION["mining"]:
if PyFunceble.INTERN["file_to_test"] in PyFunceble.INTERN["mined"]:
for element in PyFunceble.INTERN["mined"][
PyFunceble.INTERN["file_to_test"]
]:
result.extend(
PyFunceble.INTERN["mined"][PyFunceble.INTERN["file_to_test"]][
element
]
)
result = List(result).format()
return result | Provide the list of mined so they can be added to the list
queue.
:return: The list of mined domains or URL.
:rtype: list |
381,337 | def render(self, container, descender, state, space_below=0,
first_line_only=False):
indent_first = (float(self.get_style(, container))
if state.initial else 0)
line_width = float(container.width)
line_spacing = self.get_style(, container)
text_align = self.get_style(, container)
tab_stops = self.get_style(, container)
if not tab_stops:
tab_width = 2 * self.get_style(, container)
tab_stops = DefaultTabStops(tab_width)
saved_state = copy(state)
prev_state = copy(state)
max_line_width = 0
def typeset_line(line, last_line=False):
nonlocal state, saved_state, max_line_width, descender, space_below
max_line_width = max(max_line_width, line.cursor)
advance = (line.ascender(container) if descender is None
else line_spacing.advance(line, descender, container))
descender = line.descender(container)
line.advance = advance
total_advance = advance + (space_below if last_line else 0) - descender
if container.remaining_height < total_advance:
raise EndOfContainer(saved_state)
assert container.advance2(advance)
line.typeset(container, text_align, last_line)
assert container.advance2(- descender)
state.initial = False
saved_state = copy(state)
return Line(tab_stops, line_width, container,
significant_whitespace=self.significant_whitespace)
first_line = line = Line(tab_stops, line_width, container,
indent_first, self.significant_whitespace)
while True:
try:
word = state.next_word()
except StopIteration:
break
try:
if not line.append_word(word):
for first, second in word.hyphenate(container):
if line.append_word(first):
state.prepend_word(second)
break
else:
state = prev_state
line = typeset_line(line)
if first_line_only:
break
continue
except NewLineException:
line.append(word.glyphs_span)
line = typeset_line(line, last_line=True)
if first_line_only:
break
prev_state = copy(state)
if line:
typeset_line(line, last_line=True)
if self._width(container) == FlowableWidth.AUTO:
if text_align == TextAlign.CENTER:
container.left -= float(container.width - max_line_width) / 2
if text_align == TextAlign.RIGHT:
container.left -= float(container.width - max_line_width)
return max_line_width, first_line.advance, descender | Typeset the paragraph
The paragraph is typeset in the given container starting below the
current cursor position of the container. When the end of the container
is reached, the rendering state is preserved to continue setting the
rest of the paragraph when this method is called with a new container.
Args:
container (Container): the container to render to
descender (float or None): descender height of the preceeding line
state (ParagraphState): the state where rendering will continue
first_line_only (bool): typeset only the first line |
381,338 | def get_example(cls) -> list:
if cls.example is not None:
return cls.example
if cls.items is not None:
if isinstance(cls.items, list):
return [item.get_example() for item in cls.items]
else:
return [cls.items.get_example()]
return [1] | Returns an example value for the Array type.
If an example isn't a defined attribute on the class we return
a list of 1 item containing the example value of the `items` attribute.
If `items` is None we simply return a `[1]`. |
381,339 | def _flush(self, close=False):
for channel in self.forward_channels:
if close is True:
channel.queue.put_next(None)
channel.queue._flush_writes()
for channels in self.shuffle_channels:
for channel in channels:
if close is True:
channel.queue.put_next(None)
channel.queue._flush_writes()
for channels in self.shuffle_key_channels:
for channel in channels:
if close is True:
channel.queue.put_next(None)
channel.queue._flush_writes()
for channels in self.round_robin_channels:
for channel in channels:
if close is True:
channel.queue.put_next(None)
channel.queue._flush_writes() | Flushes remaining output records in the output queues to plasma.
None is used as special type of record that is propagated from sources
to sink to notify that the end of data in a stream.
Attributes:
close (bool): A flag denoting whether the channel should be
also marked as 'closed' (True) or not (False) after flushing. |
381,340 | def put(self, message):
return self.connection.put(, data=dict(message=message)) | Simply test Put a string
:param message: str of the message
:return: str of the message |
381,341 | def expand(data):
namejohnversionv1namejohnversionv2namelisaversionv1namelisaversionv2ll iterate over it for every possible
combination of elements in the lists. If the element in question is not a
list, then it is considered unique and repeated for each yielded
configuration set. Example
.. code-block:: yaml
name: [john, lisa]
version: [v1, v2]
text: >
hello,
world!
Should yield to the following configuration sets:
.. code-block:: python
[
{: , : , : },
{: , : , : },
{: , : , : },
{: , : , : },
]
Keys starting with one `_` (underscore) are treated as "unique" objects as
well. Example:
.. code-block:: yaml
name: [john, lisa]
version: [v1, v2]
_unique: [i1, i2]
Should yield to the following configuration sets:
.. code-block:: python
[
{: , : , : [, ]},
{: , : , : [, ]},
{: , : , : [, ]},
{: , : , : [, ]},
]
Parameters:
data (str): YAML data to be parsed
Yields:
dict: A dictionary of key-value pairs for building the templates
_'):
iterables[key] = value
else:
unique[key] = value
for values in itertools.product(*iterables.values()):
retval = dict(unique)
keys = list(iterables.keys())
retval.update(dict(zip(keys, values)))
yield retval | Generates configuration sets based on the YAML input contents
For an introduction to the YAML mark-up, just search the net. Here is one of
its references: https://en.wikipedia.org/wiki/YAML
A configuration set corresponds to settings for **all** variables in the
input template that needs replacing. For example, if your template mentions
the variables ``name`` and ``version``, then each configuration set should
yield values for both ``name`` and ``version``.
For example:
.. code-block:: yaml
name: [john, lisa]
version: [v1, v2]
This should yield to the following configuration sets:
.. code-block:: python
[
{'name': 'john', 'version': 'v1'},
{'name': 'john', 'version': 'v2'},
{'name': 'lisa', 'version': 'v1'},
{'name': 'lisa', 'version': 'v2'},
]
Each key in the input file should correspond to either an object or a YAML
array. If the object is a list, then we'll iterate over it for every possible
combination of elements in the lists. If the element in question is not a
list, then it is considered unique and repeated for each yielded
configuration set. Example
.. code-block:: yaml
name: [john, lisa]
version: [v1, v2]
text: >
hello,
world!
Should yield to the following configuration sets:
.. code-block:: python
[
{'name': 'john', 'version': 'v1', 'text': 'hello, world!'},
{'name': 'john', 'version': 'v2', 'text': 'hello, world!'},
{'name': 'lisa', 'version': 'v1', 'text': 'hello, world!'},
{'name': 'lisa', 'version': 'v2', 'text': 'hello, world!'},
]
Keys starting with one `_` (underscore) are treated as "unique" objects as
well. Example:
.. code-block:: yaml
name: [john, lisa]
version: [v1, v2]
_unique: [i1, i2]
Should yield to the following configuration sets:
.. code-block:: python
[
{'name': 'john', 'version': 'v1', '_unique': ['i1', 'i2']},
{'name': 'john', 'version': 'v2', '_unique': ['i1', 'i2']},
{'name': 'lisa', 'version': 'v1', '_unique': ['i1', 'i2']},
{'name': 'lisa', 'version': 'v2', '_unique': ['i1', 'i2']},
]
Parameters:
data (str): YAML data to be parsed
Yields:
dict: A dictionary of key-value pairs for building the templates |
381,342 | def diffusion_coeff_counts(self):
return [(key, len(list(group)))
for key, group in itertools.groupby(self.diffusion_coeff)] | List of tuples of (diffusion coefficient, counts) pairs.
The order of the diffusion coefficients is as in self.diffusion_coeff. |
381,343 | def getLayout(self, algorithmName, verbose=None):
response=api(url=self.___url++str(algorithmName)+, method="H", verbose=verbose, parse_params=False)
return response | Returns all the details, including names, parameters, and compatible column types for the Layout algorithm specified by the `algorithmName` parameter.
:param algorithmName: Name of the Layout algorithm
:param verbose: print more
:returns: 200: successful operation |
381,344 | def FromJsonString(self, value):
timezone_offset = value.find()
if timezone_offset == -1:
timezone_offset = value.find()
if timezone_offset == -1:
timezone_offset = value.rfind()
if timezone_offset == -1:
raise ParseError(
)
time_value = value[0:timezone_offset]
point_position = time_value.find()
if point_position == -1:
second_value = time_value
nano_value =
else:
second_value = time_value[:point_position]
nano_value = time_value[point_position + 1:]
date_object = datetime.strptime(second_value, _TIMESTAMPFOMAT)
td = date_object - datetime(1970, 1, 1)
seconds = td.seconds + td.days * _SECONDS_PER_DAY
if len(nano_value) > 9:
raise ParseError(
.format(nano_value))
if nano_value:
nanos = round(float( + nano_value) * 1e9)
else:
nanos = 0
if value[timezone_offset] == :
if len(value) != timezone_offset + 1:
raise ParseError(
.format(value))
else:
timezone = value[timezone_offset:]
pos = timezone.find()
if pos == -1:
raise ParseError(
.format(timezone))
if timezone[0] == :
seconds -= (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60
else:
seconds += (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60
self.seconds = int(seconds)
self.nanos = int(nanos) | Parse a RFC 3339 date string format to Timestamp.
Args:
value: A date string. Any fractional digits (or none) and any offset are
accepted as long as they fit into nano-seconds precision.
Example of accepted format: '1972-01-01T10:00:20.021-05:00'
Raises:
ParseError: On parsing problems. |
381,345 | def cli(ctx, resource):
log = logging.getLogger()
assert isinstance(ctx, Context)
resource = str(resource).lower()
if resource == :
resource = IpsManager(ctx)
for r in resource.versions.values():
click.secho(r.version.vstring, bold=True)
return
if resource in (, ):
resource = DevToolsManager(ctx)
for r in resource.versions.values():
click.secho(.format(v=r.version.vstring, id=r.version.vid), bold=True)
return | Displays all locally cached <resource> versions available for installation.
\b
Available resources:
ips (default)
dev_tools |
381,346 | def update_user_type(self):
if self.rb_tutor.isChecked():
self.user_type =
elif self.rb_student.isChecked():
self.user_type =
self.accept() | Return either 'tutor' or 'student' based on which radio
button is selected. |
381,347 | def preferred_height(self, cli, width, max_available_height, wrap_lines):
complete_state = cli.current_buffer.complete_state
column_width = self._get_column_width(complete_state)
column_count = max(1, (width - self._required_margin) // column_width)
return int(math.ceil(len(complete_state.current_completions) / float(column_count))) | Preferred height: as much as needed in order to display all the completions. |
381,348 | def _get_default(self, obj):
if self.name in obj._property_values:
raise RuntimeError("Bokeh internal error, does not handle the case of self.name already in _property_values")
is_themed = obj.themed_values() is not None and self.name in obj.themed_values()
default = self.instance_default(obj)
if is_themed:
unstable_dict = obj._unstable_themed_values
else:
unstable_dict = obj._unstable_default_values
if self.name in unstable_dict:
return unstable_dict[self.name]
if self.property._may_have_unstable_default():
if isinstance(default, PropertyValueContainer):
default._register_owner(obj, self)
unstable_dict[self.name] = default
return default | Internal implementation of instance attribute access for default
values.
Handles bookeeping around |PropertyContainer| value, etc. |
381,349 | def doDirectPayment(self, params):
defaults = {"method": "DoDirectPayment", "paymentaction": "Sale"}
required = ["creditcardtype",
"acct",
"expdate",
"cvv2",
"ipaddress",
"firstname",
"lastname",
"street",
"city",
"state",
"countrycode",
"zip",
"amt",
]
nvp_obj = self._fetch(params, required, defaults)
if nvp_obj.flag:
raise PayPalFailure(nvp_obj.flag_info, nvp=nvp_obj)
return nvp_obj | Call PayPal DoDirectPayment method. |
381,350 | def process(self):
print(.format(len(self.file_path_list)))
print(.format(self.file_path_list))
hunt_action = flows_pb2.FileFinderAction(
action_type=flows_pb2.FileFinderAction.DOWNLOAD)
hunt_args = flows_pb2.FileFinderArgs(
paths=self.file_path_list, action=hunt_action)
return self._create_hunt(, hunt_args) | Construct and start a new File hunt.
Returns:
The newly created GRR hunt object.
Raises:
RuntimeError: if no items specified for collection. |
381,351 | def get_poll(self, arg, *, request_policy=None):
if isinstance(arg, str):
match = self._url_re.match(arg)
if match:
arg = match.group()
return self._http_client.get(.format(self._POLLS, arg),
request_policy=request_policy,
cls=strawpoll.Poll) | Retrieves a poll from strawpoll.
:param arg: Either the ID of the poll or its strawpoll url.
:param request_policy: Overrides :attr:`API.requests_policy` for that \
request.
:type request_policy: Optional[:class:`RequestsPolicy`]
:raises HTTPException: Requesting the poll failed.
:returns: A poll constructed with the requested data.
:rtype: :class:`Poll` |
381,352 | def get_last_config_update_time_output_last_config_update_time(self, **kwargs):
config = ET.Element("config")
get_last_config_update_time = ET.Element("get_last_config_update_time")
config = get_last_config_update_time
output = ET.SubElement(get_last_config_update_time, "output")
last_config_update_time = ET.SubElement(output, "last-config-update-time")
last_config_update_time.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
381,353 | def h_kinetic(T, P, MW, Hvap, f=1):
r
return (2*f)/(2-f)*(MW/(1000*2*pi*R*T))**0.5*(Hvap**2*P*MW)/(1000*R*T**2) | r'''Calculates heat transfer coefficient for condensation
of a pure chemical inside a vertical tube or tube bundle, as presented in
[2]_ according to [1]_.
.. math::
h = \left(\frac{2f}{2-f}\right)\left(\frac{MW}{1000\cdot 2\pi R T}
\right)^{0.5}\left(\frac{H_{vap}^2 P \cdot MW}{1000\cdot RT^2}\right)
Parameters
----------
T : float
Vapor temperature, [K]
P : float
Vapor pressure, [Pa]
MW : float
Molecular weight of the gas, [g/mol]
Hvap : float
Heat of vaporization of the fluid at P, [J/kg]
f : float
Correction factor, [-]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
f is a correction factor for how the removal of gas particles affects the
behavior of the ideal gas in diffusing to the condensing surface. It is
quite close to one, and has not been well explored in the literature due
to the rarity of the importance of the kinetic resistance.
Examples
--------
Water at 1 bar and 300 K:
>>> h_kinetic(300, 1E5, 18.02, 2441674)
30788845.562480535
References
----------
.. [1] Berman, L. D. "On the Effect of Molecular-Kinetic Resistance upon
Heat Transfer with Condensation." International Journal of Heat and Mass
Transfer 10, no. 10 (October 1, 1967): 1463.
doi:10.1016/0017-9310(67)90033-6.
.. [2] Kakaç, Sadik, ed. Boilers, Evaporators, and Condensers. 1 edition.
Wiley-Interscience, 1991.
.. [3] Stephan, Karl. Heat Transfer in Condensation and Boiling. Translated
by C. V. Green. Softcover reprint of the original 1st ed. 1992 edition.
Berlin; New York: Springer, 2013. |
381,354 | def graph_from_voxels(fg_markers,
bg_markers,
regional_term = False,
boundary_term = False,
regional_term_args = False,
boundary_term_args = False):
logger = Logger.getInstance()
logger.debug(.format(fg_markers.size, __voxel_4conectedness(fg_markers.shape), fg_markers.shape))
graph = GCGraph(fg_markers.size, __voxel_4conectedness(fg_markers.shape))
logger.info()
fg_markers = scipy.asarray(fg_markers, dtype=scipy.bool_)
bg_markers = scipy.asarray(bg_markers, dtype=scipy.bool_)
if not regional_term: regional_term = __regional_term_voxel
if not boundary_term: boundary_term = __boundary_term_voxel
if not hasattr(regional_term, ) or not 2 == len(inspect.getargspec(regional_term)[0]):
raise AttributeError()
if not hasattr(boundary_term, ) or not 2 == len(inspect.getargspec(boundary_term)[0]):
raise AttributeError()
logger.debug(.format(fg_markers.size,
len(fg_markers.ravel().nonzero()[0]),
len(bg_markers.ravel().nonzero()[0])))
logger.info()
regional_term(graph, regional_term_args)
logger.info()
boundary_term(graph, boundary_term_args)
logger.info()
if not 0 == scipy.count_nonzero(fg_markers):
graph.set_source_nodes(fg_markers.ravel().nonzero()[0])
if not 0 == scipy.count_nonzero(bg_markers):
graph.set_sink_nodes(bg_markers.ravel().nonzero()[0])
return graph.get_graph() | Create a graph-cut ready graph to segment a nD image using the voxel neighbourhood.
Create a `~medpy.graphcut.maxflow.GraphDouble` object for all voxels of an image with a
:math:`ndim * 2` neighbourhood.
Every voxel of the image is regarded as a node. They are connected to their immediate
neighbours via arcs. If to voxels are neighbours is determined using
:math:`ndim*2`-connectedness (e.g. :math:`3*2=6` for 3D). In the next step the arcs weights
(n-weights) are computed using the supplied ``boundary_term`` function
(see :mod:`~medpy.graphcut.energy_voxel` for a selection).
Implicitly the graph holds two additional nodes: the source and the sink, so called
terminal nodes. These are connected with all other nodes through arcs of an initial
weight (t-weight) of zero.
All voxels that are under the foreground markers are considered to be tightly bound
to the source: The t-weight of the arc from source to these nodes is set to a maximum
value. The same goes for the background markers: The covered voxels receive a maximum
(`~medpy.graphcut.graph.GCGraph.MAX`) t-weight for their arc towards the sink.
All other t-weights are set using the supplied ``regional_term`` function
(see :mod:`~medpy.graphcut.energy_voxel` for a selection).
Parameters
----------
fg_markers : ndarray
The foreground markers as binary array of the same shape as the original image.
bg_markers : ndarray
The background markers as binary array of the same shape as the original image.
regional_term : function
This can be either `False`, in which case all t-weights are set to 0, except for
the nodes that are directly connected to the source or sink; or a function, in
which case the supplied function is used to compute the t_edges. It has to
have the following signature *regional_term(graph, regional_term_args)*, and is
supposed to compute (source_t_weight, sink_t_weight) for all voxels of the image
and add these to the passed `~medpy.graphcut.graph.GCGraph` object. The weights
have only to be computed for nodes where they do not equal zero. Additional
parameters can be passed to the function via the ``regional_term_args`` parameter.
boundary_term : function
This can be either `False`, in which case all n-edges, i.e. between all nodes
that are not source or sink, are set to 0; or a function, in which case the
supplied function is used to compute the edge weights. It has to have the
following signature *boundary_term(graph, boundary_term_args)*, and is supposed
to compute the edges between the graphs nodes and to add them to the supplied
`~medpy.graphcut.graph.GCGraph` object. Additional parameters can be passed to
the function via the ``boundary_term_args`` parameter.
regional_term_args : tuple
Use this to pass some additional parameters to the ``regional_term`` function.
boundary_term_args : tuple
Use this to pass some additional parameters to the ``boundary_term`` function.
Returns
-------
graph : `~medpy.graphcut.maxflow.GraphDouble`
The created graph, ready to execute the graph-cut.
Raises
------
AttributeError
If an argument is malformed.
FunctionError
If one of the supplied functions returns unexpected results.
Notes
-----
If a voxel is marked as both, foreground and background, the background marker
is given higher priority.
All arcs whose weight is not explicitly set are assumed to carry a weight of zero. |
381,355 | def source_filename(self, docname: str, srcdir: str):
docpath = Path(srcdir, docname)
parent = docpath.parent
imgpath = parent.joinpath(self.filename)
if not imgpath.exists():
msg = f
raise SphinxError(msg)
return imgpath | Get the full filename to referenced image |
381,356 | def settings_system_update(self, data):
data["auth_password"] = self._password
response = self._put(url.settings_system, body=data)
self._check_response(response, 200) | Set system settings. Uses PUT to /settings/system interface
:Args:
* *data*: (dict) Settings dictionary as specified `here <https://cloud.knuverse.com/docs/api/#api-System_Settings-Set_System_Settings>`_.
:Returns: None |
381,357 | def state_by_node2state_by_state(tpm):
tpm = np.array(tpm)
tpm = to_multidimensional(tpm)
N = tpm.shape[-1]
S = 2**N
sbs_tpm = np.zeros((S, S))
if not np.any(np.logical_and(tpm < 1, tpm > 0)):
for previous_state_index in range(S):
previous_state = le_index2state(previous_state_index, N)
current_state_index = state2le_index(tpm[previous_state])
sbs_tpm[previous_state_index, current_state_index] = 1
else:
for previous_state_index in range(S):
previous_state = le_index2state(previous_state_index, N)
marginal_tpm = tpm[previous_state]
for current_state_index in range(S):
current_state = np.array(
[i for i in le_index2state(current_state_index, N)])
sbs_tpm[previous_state_index, current_state_index] = (
np.prod(marginal_tpm[current_state == 1]) *
np.prod(1 - marginal_tpm[current_state == 0]))
return sbs_tpm | Convert a state-by-node TPM to a state-by-state TPM.
.. important::
A nondeterministic state-by-node TPM can have more than one
representation as a state-by-state TPM. However, the mapping can be
made to be one-to-one if we assume the TPMs to be conditionally
independent. Therefore, **this function returns the corresponding
conditionally independent state-by-state TPM.**
.. note::
The indices of the rows of the state-by-node TPM are assumed to follow
the little-endian convention, while the indices of the columns follow
the big-endian convention. The indices of the rows and columns of the
resulting state-by-state TPM both follow the big-endian convention. See
the documentation on PyPhi :ref:`tpm-conventions` for more info.
Args:
tpm (list[list] or np.ndarray): A state-by-node TPM with row indices
following the little-endian convention and column indices following
the big-endian convention.
Returns:
np.ndarray: A state-by-state TPM, with both row and column indices
following the big-endian convention.
>>> tpm = np.array([[1, 1, 0],
... [0, 0, 1],
... [0, 1, 1],
... [1, 0, 0],
... [0, 0, 1],
... [1, 0, 0],
... [1, 1, 1],
... [1, 0, 1]])
>>> state_by_node2state_by_state(tpm)
array([[0., 0., 0., 1., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 0.],
[0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 0., 0., 0.],
[0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0., 1., 0., 0.]]) |
381,358 | def exception(self, e):
self.logged_exception(e)
self.logger.exception(e) | Log an error messsage.
:param e: Exception to log. |
381,359 | def sync_header_chain(cls, path, bitcoind_server, last_block_id ):
current_block_id = SPVClient.height( path )
if current_block_id is None:
assert USE_TESTNET
current_block_id = -1
assert (current_block_id >= 0 and USE_MAINNET) or USE_TESTNET
if current_block_id < last_block_id:
if USE_MAINNET:
log.debug("Synchronize %s to %s" % (current_block_id, last_block_id))
else:
log.debug("Synchronize testnet %s to %s" % (current_block_id + 1, last_block_id ))
if current_block_id >= 0:
prev_block_header = SPVClient.read_header( path, current_block_id )
prev_block_hash = prev_block_header[]
else:
prev_block_hash = GENESIS_BLOCK_HASH_TESTNET
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(600)
bitcoind_port = 8333
if ":" in bitcoind_server:
p = bitcoind_server.split(":")
bitcoind_server = p[0]
bitcoind_port = int(p[1])
log.debug("connect to %s:%s" % (bitcoind_server, bitcoind_port))
sock.connect( (bitcoind_server, bitcoind_port) )
client = BlockHeaderClient( sock, path, prev_block_hash, last_block_id )
client.run()
if SPVClient.height(path) < last_block_id:
raise Exception("Did not receive all headers up to %s (only got %s)" % (last_block_id, SPVClient.height(path)))
log.debug("synced headers from %s to %s in %s" % (current_block_id, last_block_id, path))
return True | Synchronize our local block headers up to the last block ID given.
@last_block_id is *inclusive*
@bitcoind_server is host:port or just host |
381,360 | def _set_time_property(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=time_property.time_property, is_container=, presence=False, yang_name="time-property", rest_name="time-property", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=False)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__time_property = t
if hasattr(self, ):
self._set() | Setter method for time_property, mapped from YANG variable /ptp_state/time_property (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_time_property is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_time_property() directly. |
381,361 | def _onMouseWheel(self, evt):
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
delta = evt.GetWheelDelta()
rotation = evt.GetWheelRotation()
rate = evt.GetLinesPerAction()
step = rate*float(rotation)/delta
evt.Skip()
if wx.Platform == :
if not hasattr(self,):
self._skipwheelevent = True
elif self._skipwheelevent:
self._skipwheelevent = False
return
else:
self._skipwheelevent = True
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=evt) | Translate mouse wheel events into matplotlib events |
381,362 | def get_all_chains(self):
return [self.get_chain(i) for i in range(len(self.leaves))] | Assemble and return a list of all chains for all leaf nodes to the merkle root. |
381,363 | def from_string(cls, string, *, default_func=None):
if not isinstance(string, str):
raise TypeError(f)
parts = string.split(, 1)
if len(parts) == 2:
protocol, address = parts
else:
item, = parts
protocol = None
if default_func:
if default_func(item, ServicePart.HOST) and default_func(item, ServicePart.PORT):
protocol, address = item,
else:
protocol, address = default_func(None, ServicePart.PROTOCOL), item
if not protocol:
raise ValueError(f)
if default_func:
default_func = partial(default_func, protocol.lower())
address = NetAddress.from_string(address, default_func=default_func)
return cls(protocol, address) | Construct a Service from a string.
If default_func is provided and any ServicePart is missing, it is called with
default_func(protocol, part) to obtain the missing part. |
381,364 | def asset_view_atype(self, ):
if not self.cur_asset:
return
atype = self.cur_asset.atype
self.view_atype(atype) | View the project of the current atype
:returns: None
:rtype: None
:raises: None |
381,365 | def delete_dcnm_out_nwk(self, tenant_id, fw_dict, is_fw_virt=False):
tenant_name = fw_dict.get()
ret = self._delete_service_nwk(tenant_id, tenant_name, )
if ret:
res = fw_const.DCNM_OUT_NETWORK_DEL_SUCCESS
LOG.info("out Service network deleted for tenant %s",
tenant_id)
else:
res = fw_const.DCNM_OUT_NETWORK_DEL_FAIL
LOG.info("out Service network deleted failed for tenant %s",
tenant_id)
self.update_fw_db_result(tenant_id, dcnm_status=res)
return ret | Delete the DCNM OUT network and update the result. |
381,366 | def __add_item(self, item, keys=None):
if(not keys or not len(keys)):
raise Exception(
% (self.__class__.__name__, str(item)))
direct_key = tuple(keys)
for key in keys:
key_type = str(type(key))
if(not key_type in self.__dict__):
self.__setattr__(key_type, dict())
self.__dict__[key_type][key] = direct_key
if(not in self.__dict__):
self.items_dict = dict()
self.items_dict[direct_key] = item | Internal method to add an item to the multi-key dictionary |
381,367 | def get_plugin(cls, name=None, **kwargs):
if isinstance(name, KurtPlugin):
return name
if in kwargs:
kwargs[] = kwargs[].lower()
if name:
kwargs["name"] = name
if not kwargs:
raise ValueError, "No arguments"
for plugin in cls.plugins.values():
for name in kwargs:
if getattr(plugin, name) != kwargs[name]:
break
else:
return plugin
raise ValueError, "Unknown format %r" % kwargs | Returns the first format plugin whose attributes match kwargs.
For example::
get_plugin(extension="scratch14")
Will return the :class:`KurtPlugin` whose :attr:`extension
<KurtPlugin.extension>` attribute is ``"scratch14"``.
The :attr:`name <KurtPlugin.name>` is used as the ``format`` parameter
to :attr:`Project.load` and :attr:`Project.save`.
:raises: :class:`ValueError` if the format doesn't exist.
:returns: :class:`KurtPlugin` |
381,368 | def getid(self, idtype):
memorable_id = None
while memorable_id in self._ids:
l=[]
for _ in range(4):
l.append(str(randint(0, 19)))
memorable_id = .join(l)
self._ids.append(memorable_id)
return idtype + + memorable_id | idtype in Uniq constants |
381,369 | def gather_layer_info(self):
for i in range(len(self.cc[0])):
layer_radii = [x[i].tags[] for x in self.cc]
self.radii_layers.append(layer_radii)
layer_alpha = [x[i].tags[] for x in self.cc]
self.alpha_layers.append(layer_alpha)
layer_ca = [x[i].tags[] for x in self.cc]
self.ca_layers.append(layer_ca)
return | Extracts the tagged coiled-coil parameters for each layer. |
381,370 | def authentication_required(meth):
def check(cls, *args, **kwargs):
if cls.authenticated:
return meth(cls, *args, **kwargs)
raise Error("Authentication required")
return check | Simple class method decorator.
Checks if the client is currently connected.
:param meth: the original called method |
381,371 | def make_data():
a = { (1,1):.25, (1,2):.15, (1,3):.2,
(2,1):.3, (2,2):.3, (2,3):.1,
(3,1):.15, (3,2):.65, (3,3):.05,
(4,1):.1, (4,2):.05, (4,3):.8
}
epsilon = 0.01
I,p = multidict({1:5, 2:6, 3:8, 4:20})
K,LB = multidict({1:.2, 2:.3, 3:.2})
return I,K,a,p,epsilon,LB | creates example data set |
381,372 | def apply(self, data_source):
dataframe = self.__get_dataframe(data_source, use_target=False)
dataframe = self.__cleaner.apply(dataframe)
dataframe = self.__transformer.apply(dataframe)
return dataframe | Called with the predict data (new information).
@param data_source: Either a pandas.DataFrame or a file-like object. |
381,373 | def implemented(cls, for_type):
for function in cls.required():
if not function.implemented_for_type(for_type):
raise TypeError(
"%r doesn't implement %r so it cannot participate in "
"the protocol %r." %
(for_type, function.func.__name__, cls))
cls.register(for_type) | Assert that protocol 'cls' is implemented for type 'for_type'.
This will cause 'for_type' to be registered with the protocol 'cls'.
Subsequently, protocol.isa(for_type, cls) will return True, as will
isinstance, issubclass and others.
Raises:
TypeError if 'for_type' doesn't implement all required functions. |
381,374 | def get_api_link(self):
url = self._api_link
if url:
qs = self.get_qs()
url = "%s?type=choices" % url
if qs:
url = "%s&%s" % (url, u.join([u % (k, urllib.quote(unicode(v).encode())) \
for k, v in qs.items()]))
url = "%s&%s" % (url, u.join([u % x \
for x in qs.keys()]))
return url | Adds a query string to the api url. At minimum adds the type=choices
argument so that the return format is json. Any other filtering
arguments calculated by the `get_qs` method are then added to the
url. It is up to the destination url to respect them as filters. |
381,375 | def get_tac_permissions(calendar_id):
return _process_get_perm_resp(
get_permissions_url,
post_tac_resource(get_permissions_url,
_create_get_perm_body(calendar_id)),
TrumbaCalendar.TAC_CAMPUS_CODE,
calendar_id) | Return a list of sorted Permission objects representing
the user permissions of a given Tacoma calendar.
:return: a list of trumba.Permission objects
corresponding to the given campus calendar.
None if error, [] if not exists
raise DataFailureException or a corresponding TrumbaException
if the request failed or an error code has been returned. |
381,376 | def _adjacent_tri(self, edge, i):
if not np.isscalar(i):
i = [x for x in i if x not in edge][0]
try:
pt1 = self._edges_lookup[edge]
pt2 = self._edges_lookup[(edge[1], edge[0])]
except KeyError:
return None
if pt1 == i:
return (edge[1], edge[0], pt2)
elif pt2 == i:
return (edge[1], edge[0], pt1)
else:
raise RuntimeError("Edge %s and point %d do not form a triangle "
"in this mesh." % (edge, i)) | Given a triangle formed by edge and i, return the triangle that shares
edge. *i* may be either a point or the entire triangle. |
381,377 | def injector_gear_2_json(self):
LOGGER.debug("InjectorCachedGear.injector_gear_2_json")
json_obj = {
: self.id,
: self.name,
: self.admin_queue,
: self.description,
: if self.running else
}
return json_obj | transform this local object to JSON.
:return: the JSON from this local object |
381,378 | def get_property(obj, name):
if obj == None:
raise Exception("Object cannot be null")
if name == None:
raise Exception("Property name cannot be null")
name = name.lower()
try:
for property_name in dir(obj):
if property_name.lower() != name:
continue
property = getattr(obj, property_name)
if PropertyReflector._is_property(property, property_name):
return property
except:
pass
return None | Gets value of object property specified by its name.
:param obj: an object to read property from.
:param name: a name of the property to get.
:return: the property value or null if property doesn't exist or introspection failed. |
381,379 | def apply_filters(self, filters):
_configs = self.configs
_stats = self.stats
self.configs = {}
self.stats = {}
for f in filters:
if f in _configs:
self.configs[f] = _configs[f]
elif f in _stats:
self.stats[f] = _stats[f]
else:
raise NoSuchControlFileError("%s for %s" % (f, self.subsystem.name)) | It applies a specified filters. The filters are used to reduce the control groups
which are accessed by get_confgs, get_stats, and get_defaults methods. |
381,380 | def sun_declination(day):
day_of_year = day.toordinal() - date(day.year, 1, 1).toordinal()
day_angle = 2 * pi * day_of_year / 365
declination_radians = sum([
0.006918,
0.001480*sin(3*day_angle),
0.070257*sin(day_angle),
0.000907*sin(2*day_angle),
-0.399912*cos(day_angle),
-0.006758*cos(2*day_angle),
-0.002697*cos(3*day_angle),
])
return degrees(declination_radians) | Compute the declination angle of the sun for the given date.
Uses the Spencer Formula
(found at http://www.illustratingshadows.com/www-formulae-collection.pdf)
:param day: The datetime.date to compute the declination angle for
:returns: The angle, in degrees, of the angle of declination |
381,381 | def send_video_note(chat_id, video_note,
duration=None, length=None, reply_to_message_id=None, reply_markup=None, disable_notification=False,
**kwargs):
files = None
if isinstance(video_note, InputFile):
files = [video_note]
video = None
elif not isinstance(video_note, str):
raise Exception()
params = dict(
chat_id=chat_id,
video_note=video_note
)
params.update(
_clean_params(
duration=duration,
length=length,
reply_to_message_id=reply_to_message_id,
reply_markup=reply_markup,
disable_notification=disable_notification,
)
)
return TelegramBotRPCRequest(, params=params, files=files, on_result=Message.from_result, **kwargs) | Use this method to send video files, Telegram clients support mp4 videos (other formats may be sent as Document).
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:param video_note: Video to send. Pass a file_id as String to send a video that exists on the Telegram servers (recommended),
pass an HTTP URL as a String for Telegram to get a video from the Internet, or upload a new video using multipart/form-data.
:param duration: Duration of sent video in seconds
:param length: Video width and height
:param reply_to_message_id: If the message is a reply, ID of the original message
:param reply_markup: Additional interface options. A JSON-serialized object for a
custom reply keyboard, instructions to hide keyboard or to
force a reply from the user.
:param disable_notification: Sends the message silently. iOS users will not receive a notification, Android users
will receive a notification with no sound. Other apps coming soon.
:param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest`
:type chat_id: int or str
:type video: InputFile or str
:type duration: int
:type caption: str
:type reply_to_message_id: int
:type reply_markup: ReplyKeyboardMarkup or ReplyKeyboardHide or ForceReply
:returns: On success, the sent Message is returned.
:rtype: TelegramBotRPCRequest |
381,382 | def read_hotkey(suppress=True):
queue = _queue.Queue()
fn = lambda e: queue.put(e) or e.event_type == KEY_DOWN
hooked = hook(fn, suppress=suppress)
while True:
event = queue.get()
if event.event_type == KEY_UP:
unhook(hooked)
with _pressed_events_lock:
names = [e.name for e in _pressed_events.values()] + [event.name]
return get_hotkey_name(names) | Similar to `read_key()`, but blocks until the user presses and releases a
hotkey (or single key), then returns a string representing the hotkey
pressed.
Example:
read_hotkey()
# "ctrl+shift+p" |
381,383 | def get_mimetype(self):
if hasattr(self, ):
if (isinstance(self._mimetype, (tuple, list,)) and
len(self._mimetype) == 2):
return self._mimetype
else:
raise NodeError(
)
mtype, encoding = mimetypes.guess_type(self.name)
if mtype is None:
if self.is_binary:
mtype =
encoding = None
else:
mtype =
encoding = None
return mtype, encoding | Mimetype is calculated based on the file's content. If ``_mimetype``
attribute is available, it will be returned (backends which store
mimetypes or can easily recognize them, should set this private
attribute to indicate that type should *NOT* be calculated). |
381,384 | def timeit(output):
b = time.time()
yield
print output, % (time.time()-b) | If output is string, then print the string and also time used |
381,385 | def gps_rtk_send(self, time_last_baseline_ms, rtk_receiver_id, wn, tow, rtk_health, rtk_rate, nsats, baseline_coords_type, baseline_a_mm, baseline_b_mm, baseline_c_mm, accuracy, iar_num_hypotheses, force_mavlink1=False):
return self.send(self.gps_rtk_encode(time_last_baseline_ms, rtk_receiver_id, wn, tow, rtk_health, rtk_rate, nsats, baseline_coords_type, baseline_a_mm, baseline_b_mm, baseline_c_mm, accuracy, iar_num_hypotheses), force_mavlink1=force_mavlink1) | RTK GPS data. Gives information on the relative baseline calculation
the GPS is reporting
time_last_baseline_ms : Time since boot of last baseline message received in ms. (uint32_t)
rtk_receiver_id : Identification of connected RTK receiver. (uint8_t)
wn : GPS Week Number of last baseline (uint16_t)
tow : GPS Time of Week of last baseline (uint32_t)
rtk_health : GPS-specific health report for RTK data. (uint8_t)
rtk_rate : Rate of baseline messages being received by GPS, in HZ (uint8_t)
nsats : Current number of sats used for RTK calculation. (uint8_t)
baseline_coords_type : Coordinate system of baseline. 0 == ECEF, 1 == NED (uint8_t)
baseline_a_mm : Current baseline in ECEF x or NED north component in mm. (int32_t)
baseline_b_mm : Current baseline in ECEF y or NED east component in mm. (int32_t)
baseline_c_mm : Current baseline in ECEF z or NED down component in mm. (int32_t)
accuracy : Current estimate of baseline accuracy. (uint32_t)
iar_num_hypotheses : Current number of integer ambiguity hypotheses. (int32_t) |
381,386 | def init_db_conn(connection_name, HOSTS=None):
el = elasticsearch.Elasticsearch(hosts=HOSTS)
el_pool.connections[connection_name] = ElasticSearchClient(el) | Initialize a redis connection by each connection string
defined in the configuration file |
381,387 | def executor(self) -> "ThreadPoolExecutor":
if not isinstance(self.__executor, ThreadPoolExecutor) or self.__executor.is_shutdown:
self.configure()
return self.__executor | Executor instance.
:rtype: ThreadPoolExecutor |
381,388 | def has_stack(self, stack_name):
cf = self.cf_client
try:
resp = cf.describe_stacks(StackName=stack_name)
if len(resp["Stacks"]) != 1:
return False
msg = str(e)
if "Stack with id {0} does not exist".format(stack_name) in msg:
LOG.debug("Stack with id {0} does not exist".format(
stack_name))
return False
else:
LOG.debug("Unable to get stack details.", exc_info=e)
raise e | Checks if a CloudFormation stack with given name exists
:param stack_name: Name or ID of the stack
:return: True if stack exists. False otherwise |
381,389 | def update_hosts(self, host_names):
if self.host_access:
curr_hosts = [access.host.name for access in self.host_access]
else:
curr_hosts = []
if set(curr_hosts) == set(host_names):
log.info(
)
return None
new_hosts = [UnityHostList.get(cli=self._cli, name=host_name)[0]
for host_name in host_names]
new_access = [{: item,
: HostLUNAccessEnum.PRODUCTION}
for item in new_hosts]
resp = self.modify(host_access=new_access)
resp.raise_if_err()
return resp | Primarily for puppet-unity use.
Update the hosts for the lun if needed.
:param host_names: specify the new hosts which access the LUN. |
381,390 | def getMachine(self, machineName):
url = self._url + "/%s" % machineName
return Machine(url=url,
securityHandler=self._securityHandler,
initialize=True,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | returns a machine object for a given machine
Input:
machineName - name of the box ex: SERVER.DOMAIN.COM |
381,391 | def pairs(args):
import jcvi.formats.bed
p = OptionParser(pairs.__doc__)
p.set_pairs()
opts, targs = p.parse_args(args)
if len(targs) != 1:
sys.exit(not p.print_help())
samfile, = targs
bedfile = samfile.rsplit(".", 1)[0] + ".bed"
if need_update(samfile, bedfile):
cmd = "bamToBed -i {0}".format(samfile)
sh(cmd, outfile=bedfile)
args[args.index(samfile)] = bedfile
return jcvi.formats.bed.pairs(args) | See __doc__ for OptionParser.set_pairs(). |
381,392 | def svg(self, value):
if len(value) < 500:
self._svg = value
return
try:
root = ET.fromstring(value)
except ET.ParseError as e:
log.error("Canxmlnsxmlns:xlink', "http://www.w3.org/1999/xlink")
if len(root.findall("{http://www.w3.org/2000/svg}image")) == 1:
href = "{http://www.w3.org/1999/xlink}href"
elem = root.find("{http://www.w3.org/2000/svg}image")
if elem.get(href, "").startswith("data:image/"):
changed = True
data = elem.get(href, "")
extension = re.sub(r"[^a-z0-9]", "", data.split(";")[0].split("/")[1].lower())
data = base64.decodebytes(data.split(",", 1)[1].encode())
filename = hashlib.md5(data).hexdigest() + "." + extension
elem.set(href, filename)
file_path = os.path.join(self._project.pictures_directory, filename)
if not os.path.exists(file_path):
with open(file_path, "wb") as f:
f.write(data)
value = filename
if len(value) > 1000:
filename = hashlib.md5(value.encode()).hexdigest() + ".svg"
file_path = os.path.join(self._project.pictures_directory, filename)
if not os.path.exists(file_path):
with open(file_path, "w+", encoding="utf-8") as f:
f.write(value)
self._svg = filename
else:
self._svg = value | Set SVG field value.
If the svg has embed base64 element we will extract them
to disk in order to avoid duplication of content |
381,393 | def job_file(self):
job_file_name = % (self.name)
job_file_path = os.path.join(self.initial_dir, job_file_name)
self._job_file = job_file_path
return self._job_file | The path to the submit description file representing this job. |
381,394 | def add_permission(self, perm):
self.Permissions(permission=perm)
PermissionCache.flush()
self.save() | Soyut Role Permission nesnesi tanımlamayı sağlar.
Args:
perm (object): |
381,395 | def start_record():
global record, playback, current
if record:
raise StateError("Already recording.")
if playback:
raise StateError("Currently playing back.")
record = True
current = ReplayData()
install(RecordingHTTPConnection, RecordingHTTPSConnection) | Install an httplib wrapper that records but does not modify calls. |
381,396 | def zoomedHealpixMap(title, map, lon, lat, radius,
xsize=1000, **kwargs):
reso = 60. * 2. * radius / xsize
hp.gnomview(map=map, rot=[lon, lat, 0], title=title, xsize=xsize, reso=reso, degree=False, **kwargs) | Inputs: lon (deg), lat (deg), radius (deg) |
381,397 | def _make_sql_params(self,kw):
return [ %k for k in kw.keys() ]
for k,v in kw.iteritems():
vals.append( %k)
return vals | Make a list of strings to pass to an SQL statement
from the dictionary kw with Python types |
381,398 | def dasopr(fname):
fname = stypes.stringToCharP(fname)
handle = ctypes.c_int()
libspice.dasopr_c(fname, ctypes.byref(handle))
return handle.value | Open a DAS file for reading.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dasopr_c.html
:param fname: Name of a DAS file to be opened.
:type fname: str
:return: Handle assigned to the opened DAS file.
:rtype: int |
381,399 | def _execute_cell(args, cell_body):
env = google.datalab.utils.commands.notebook_environment()
config = google.datalab.utils.commands.parse_config(cell_body, env, False) or {}
parameters = config.get() or []
if parameters:
jsonschema.validate({: parameters}, BigQuerySchema.QUERY_PARAMS_SCHEMA)
table_name = google.datalab.bigquery.Query.resolve_parameters(args[], parameters)
query = google.datalab.utils.commands.get_notebook_item(args[])
if args[]:
print(query.sql)
query_params = get_query_parameters(args, cell_body)
if args[]:
context = google.datalab.utils._utils._construct_context_for_args(args)
r = query.execute(output_options, context=context, query_params=query_params)
return r.result() | Implements the BigQuery cell magic used to execute BQ queries.
The supported syntax is:
%%bq execute <args>
[<inline SQL>]
Args:
args: the optional arguments following '%%bq execute'.
cell_body: optional contents of the cell
Returns:
QueryResultsTable containing query result |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.