code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def threshold_img(data, threshold, mask=None, mask_out='below'):
""" Threshold data, setting all values in the array above/below threshold
to zero.
Args:
data (ndarray): The image data to threshold.
threshold (float): Numeric threshold to apply to image.
mask (ndarray): Optional 1D-array with the same length as the data. If
passed, the threshold is first applied to the mask, and the
resulting indices are used to threshold the data. This is primarily
useful when, e.g., applying a statistical threshold to a z-value
image based on a p-value threshold.
mask_out (str): Thresholding direction. Can be 'below' the threshold
(default) or 'above' the threshold. Note: use 'above' when masking
based on p values.
"""
if mask is not None:
mask = threshold_img(mask, threshold, mask_out=mask_out)
return data * mask.astype(bool)
if mask_out.startswith('b'):
data[data < threshold] = 0
elif mask_out.startswith('a'):
data[data > threshold] = 0
return data | Threshold data, setting all values in the array above/below threshold
to zero.
Args:
data (ndarray): The image data to threshold.
threshold (float): Numeric threshold to apply to image.
mask (ndarray): Optional 1D-array with the same length as the data. If
passed, the threshold is first applied to the mask, and the
resulting indices are used to threshold the data. This is primarily
useful when, e.g., applying a statistical threshold to a z-value
image based on a p-value threshold.
mask_out (str): Thresholding direction. Can be 'below' the threshold
(default) or 'above' the threshold. Note: use 'above' when masking
based on p values. |
def devices(self):
"""
Return a list of connected devices in the form (*serial*, *status*) where status can
be any of the following:
1. device
2. offline
3. unauthorized
:returns: A list of tuples representing connected devices
"""
devices = None
with self.socket.Connect():
devices = self._command("host:devices")
return parse_device_list(devices) | Return a list of connected devices in the form (*serial*, *status*) where status can
be any of the following:
1. device
2. offline
3. unauthorized
:returns: A list of tuples representing connected devices |
def _get_griddistrict(ding0_filepath):
"""
Just get the grid district number from ding0 data file path
Parameters
----------
ding0_filepath : str
Path to ding0 data ending typically
`/path/to/ding0_data/"ding0_grids__" + str(``grid_district``) + ".xxx"`
Returns
-------
int
grid_district number
"""
grid_district = os.path.basename(ding0_filepath)
grid_district_search = re.search('[_]+\d+', grid_district)
if grid_district_search:
grid_district = int(grid_district_search.group(0)[2:])
return grid_district
else:
raise (KeyError('Grid District not found in '.format(grid_district))) | Just get the grid district number from ding0 data file path
Parameters
----------
ding0_filepath : str
Path to ding0 data ending typically
`/path/to/ding0_data/"ding0_grids__" + str(``grid_district``) + ".xxx"`
Returns
-------
int
grid_district number |
def validate_scopes(self, client_id, scopes, client, request,
*args, **kwargs):
"""Ensure the client is authorized access to requested scopes."""
if hasattr(client, 'validate_scopes'):
return client.validate_scopes(scopes)
return set(client.default_scopes).issuperset(set(scopes)) | Ensure the client is authorized access to requested scopes. |
def create_spot_instances(ec2, price, image_id, spec, num_instances=1, timeout=None, tentative=False, tags=None):
"""
:rtype: Iterator[list[Instance]]
"""
def spotRequestNotFound(e):
return e.error_code == "InvalidSpotInstanceRequestID.NotFound"
for attempt in retry_ec2(retry_for=a_long_time,
retry_while=inconsistencies_detected):
with attempt:
requests = ec2.request_spot_instances(
price, image_id, count=num_instances, **spec)
if tags is not None:
for requestID in (request.id for request in requests):
for attempt in retry_ec2(retry_while=spotRequestNotFound):
with attempt:
ec2.create_tags([requestID], tags)
num_active, num_other = 0, 0
# noinspection PyUnboundLocalVariable,PyTypeChecker
# request_spot_instances's type annotation is wrong
for batch in wait_spot_requests_active(ec2,
requests,
timeout=timeout,
tentative=tentative):
instance_ids = []
for request in batch:
if request.state == 'active':
instance_ids.append(request.instance_id)
num_active += 1
else:
log.info(
'Request %s in unexpected state %s.',
request.id,
request.state)
num_other += 1
if instance_ids:
# This next line is the reason we batch. It's so we can get multiple instances in
# a single request.
yield ec2.get_only_instances(instance_ids)
if not num_active:
message = 'None of the spot requests entered the active state'
if tentative:
log.warn(message + '.')
else:
raise RuntimeError(message)
if num_other:
log.warn('%i request(s) entered a state other than active.', num_other) | :rtype: Iterator[list[Instance]] |
def as_dict(self):
"""
ping statistics.
Returns:
|dict|:
Examples:
>>> import pingparsing
>>> parser = pingparsing.PingParsing()
>>> parser.parse(ping_result)
>>> parser.as_dict()
{
"destination": "google.com",
"packet_transmit": 60,
"packet_receive": 60,
"packet_loss_rate": 0.0,
"packet_loss_count": 0,
"rtt_min": 61.425,
"rtt_avg": 99.731,
"rtt_max": 212.597,
"rtt_mdev": 27.566,
"packet_duplicate_rate": 0.0,
"packet_duplicate_count": 0
}
"""
return {
"destination": self.destination,
"packet_transmit": self.packet_transmit,
"packet_receive": self.packet_receive,
"packet_loss_count": self.packet_loss_count,
"packet_loss_rate": self.packet_loss_rate,
"rtt_min": self.rtt_min,
"rtt_avg": self.rtt_avg,
"rtt_max": self.rtt_max,
"rtt_mdev": self.rtt_mdev,
"packet_duplicate_count": self.packet_duplicate_count,
"packet_duplicate_rate": self.packet_duplicate_rate,
} | ping statistics.
Returns:
|dict|:
Examples:
>>> import pingparsing
>>> parser = pingparsing.PingParsing()
>>> parser.parse(ping_result)
>>> parser.as_dict()
{
"destination": "google.com",
"packet_transmit": 60,
"packet_receive": 60,
"packet_loss_rate": 0.0,
"packet_loss_count": 0,
"rtt_min": 61.425,
"rtt_avg": 99.731,
"rtt_max": 212.597,
"rtt_mdev": 27.566,
"packet_duplicate_rate": 0.0,
"packet_duplicate_count": 0
} |
def disable(self):
""" Relieve all state machines that have no active execution and hide the widget """
self.ticker_text_label.hide()
if self.current_observed_sm_m:
self.stop_sm_m_observation(self.current_observed_sm_m) | Relieve all state machines that have no active execution and hide the widget |
def add_api_compression(self, api_id, min_compression_size):
"""
Add Rest API compression
"""
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[
{
'op': 'replace',
'path': '/minimumCompressionSize',
'value': str(min_compression_size)
}
]
) | Add Rest API compression |
def _profile(self, frame, event, arg):
"""The callback function to register by :func:`sys.setprofile`."""
# c = event.startswith('c_')
if event.startswith('c_'):
return
time1 = self.timer()
frames = self.frame_stack(frame)
if frames:
frames.pop()
parent_stats = self.stats
for f in frames:
parent_stats = parent_stats.ensure_child(f.f_code, void)
code = frame.f_code
frame_key = id(frame)
# if c:
# event = event[2:]
# code = mock_code(arg.__name__)
# frame_key = id(arg)
# record
time2 = self.timer()
self.overhead += time2 - time1
if event == 'call':
time = time2 - self.overhead
self.record_entering(time, code, frame_key, parent_stats)
elif event == 'return':
time = time1 - self.overhead
self.record_leaving(time, code, frame_key, parent_stats)
time3 = self.timer()
self.overhead += time3 - time2 | The callback function to register by :func:`sys.setprofile`. |
def unmount(self, client):
"""Unmounts a backend within Vault"""
getattr(client, self.unmount_fun)(mount_point=self.path) | Unmounts a backend within Vault |
def check_version(server, version, filename, timeout=SHORT_TIMEOUT):
"""Check for the latest version of OK and update accordingly."""
address = VERSION_ENDPOINT.format(server=server)
print('Checking for software updates...')
log.info('Existing OK version: %s', version)
log.info('Checking latest version from %s', address)
try:
response = requests.get(address, timeout=timeout)
response.raise_for_status()
except (requests.exceptions.RequestException, requests.exceptions.BaseHTTPError) as e:
print('Network error when checking for updates.')
log.warning('Network error when checking version from %s: %s', address,
str(e), stack_info=True)
return False
response_json = response.json()
if not _validate_api_response(response_json):
print('Error while checking updates: malformed server response')
log.info('Malformed response from %s: %s', address, response.text)
return False
current_version = response_json['data']['results'][0]['current_version']
if current_version == version:
print('OK is up to date')
return True
download_link = response_json['data']['results'][0]['download_link']
log.info('Downloading version %s from %s', current_version, download_link)
try:
response = requests.get(download_link, timeout=timeout)
response.raise_for_status()
except (requests.exceptions.RequestException, requests.exceptions.BaseHTTPError) as e:
print('Error when downloading new version of OK')
log.warning('Error when downloading new version of OK: %s', str(e),
stack_info=True)
return False
log.info('Writing new version to %s', filename)
zip_binary = response.content
try:
_write_zip(filename, zip_binary)
except IOError as e:
print('Error when downloading new version of OK')
log.warning('Error writing to %s: %s', filename, str(e))
return False
else:
print('Updated to version: {}'.format(current_version))
log.info('Successfully wrote to %s', filename)
return True | Check for the latest version of OK and update accordingly. |
def _stinespring_to_choi(data, input_dim, output_dim):
"""Transform Stinespring representation to Choi representation."""
trace_dim = data[0].shape[0] // output_dim
stine_l = np.reshape(data[0], (output_dim, trace_dim, input_dim))
if data[1] is None:
stine_r = stine_l
else:
stine_r = np.reshape(data[1], (output_dim, trace_dim, input_dim))
return np.reshape(
np.einsum('iAj,kAl->jilk', stine_l, stine_r.conj()),
2 * [input_dim * output_dim]) | Transform Stinespring representation to Choi representation. |
def _cdf(self, xloc, left, right, cache):
"""
Cumulative distribution function.
Example:
>>> print(chaospy.Uniform().fwd([-0.5, 0.5, 1.5, 2.5]))
[0. 0.5 1. 1. ]
>>> print(chaospy.Add(chaospy.Uniform(), 1).fwd([-0.5, 0.5, 1.5, 2.5]))
[0. 0. 0.5 1. ]
>>> print(chaospy.Add(1, chaospy.Uniform()).fwd([-0.5, 0.5, 1.5, 2.5]))
[0. 0. 0.5 1. ]
>>> print(chaospy.Add(1, 1).fwd([-0.5, 0.5, 1.5, 2.5]))
[0. 0. 0. 1.]
"""
left = evaluation.get_forward_cache(left, cache)
right = evaluation.get_forward_cache(right, cache)
if isinstance(left, Dist):
if isinstance(right, Dist):
raise evaluation.DependencyError(
"under-defined distribution {} or {}".format(left, right))
elif not isinstance(right, Dist):
return numpy.asfarray(left+right <= xloc)
else:
left, right = right, left
xloc = (xloc.T-numpy.asfarray(right).T).T
output = evaluation.evaluate_forward(left, xloc, cache=cache)
assert output.shape == xloc.shape
return output | Cumulative distribution function.
Example:
>>> print(chaospy.Uniform().fwd([-0.5, 0.5, 1.5, 2.5]))
[0. 0.5 1. 1. ]
>>> print(chaospy.Add(chaospy.Uniform(), 1).fwd([-0.5, 0.5, 1.5, 2.5]))
[0. 0. 0.5 1. ]
>>> print(chaospy.Add(1, chaospy.Uniform()).fwd([-0.5, 0.5, 1.5, 2.5]))
[0. 0. 0.5 1. ]
>>> print(chaospy.Add(1, 1).fwd([-0.5, 0.5, 1.5, 2.5]))
[0. 0. 0. 1.] |
def dup_idx(arr):
"""Return the indices of all duplicated array elements.
Parameters
----------
arr : array-like object
An array-like object
Returns
-------
idx : NumPy array
An array containing the indices of the duplicated elements
Examples
--------
>>> from root_numpy import dup_idx
>>> dup_idx([1, 2, 3, 4, 5])
array([], dtype=int64)
>>> dup_idx([1, 2, 3, 4, 5, 5])
array([4, 5])
>>> dup_idx([1, 2, 3, 4, 5, 5, 1])
array([0, 4, 5, 6])
"""
_, b = np.unique(arr, return_inverse=True)
return np.nonzero(np.logical_or.reduce(
b[:, np.newaxis] == np.nonzero(np.bincount(b) > 1),
axis=1))[0] | Return the indices of all duplicated array elements.
Parameters
----------
arr : array-like object
An array-like object
Returns
-------
idx : NumPy array
An array containing the indices of the duplicated elements
Examples
--------
>>> from root_numpy import dup_idx
>>> dup_idx([1, 2, 3, 4, 5])
array([], dtype=int64)
>>> dup_idx([1, 2, 3, 4, 5, 5])
array([4, 5])
>>> dup_idx([1, 2, 3, 4, 5, 5, 1])
array([0, 4, 5, 6]) |
def get_feature_sequence(self, feature_id, organism=None, sequence=None):
"""
[CURRENTLY BROKEN] Get the sequence of a feature
:type feature_id: str
:param feature_id: Feature UUID
:type organism: str
:param organism: Organism Common Name
:type sequence: str
:param sequence: Sequence Name
:rtype: dict
:return: A standard apollo feature dictionary ({"features": [{...}]})
"""
# Choices: peptide, cds, cdna, genomic
# { "track": "Miro.v2", "features": [ { "uniquename": "714dcda6-2358-467d-855e-f495a82aa154" } ], "operation": "get_sequence", "type": "peptide" }:
# { "track": "Miro.v2", "features": [ { "uniquename": "714dcda6-2358-467d-855e-f495a82aa154" } ], "operation": "get_sequence", "flank": 500, "type": "genomic" }:
# This API is not behaving as expected. Wrong documentation?
data = {
'type': 'peptide',
'features': [
{'uniquename': feature_id}
]
}
data = self._update_data(data, organism, sequence)
return self.post('getSequence', data) | [CURRENTLY BROKEN] Get the sequence of a feature
:type feature_id: str
:param feature_id: Feature UUID
:type organism: str
:param organism: Organism Common Name
:type sequence: str
:param sequence: Sequence Name
:rtype: dict
:return: A standard apollo feature dictionary ({"features": [{...}]}) |
def SelfReferenceProperty(label=None, collection_name=None, **attrs):
"""Create a self reference.
"""
if 'reference_class' in attrs:
raise ConfigurationError(
'Do not provide reference_class to self-reference.')
return ReferenceProperty(_SELF_REFERENCE, label, collection_name, **attrs) | Create a self reference. |
def update(self):
'''Update definitions.'''
# Download http://rebase.neb.com/rebase/link_withref to tmp
self._tmpdir = tempfile.mkdtemp()
try:
self._rebase_file = self._tmpdir + '/rebase_file'
print 'Downloading latest enzyme definitions'
url = 'http://rebase.neb.com/rebase/link_withref'
header = {'User-Agent': 'Mozilla/5.0'}
req = urllib2.Request(url, headers=header)
con = urllib2.urlopen(req)
with open(self._rebase_file, 'wb') as rebase_file:
rebase_file.write(con.read())
# Process into self._enzyme_dict
self._process_file()
except urllib2.HTTPError, e:
print 'HTTP Error: {} {}'.format(e.code, url)
print 'Falling back on default enzyme list'
self._enzyme_dict = coral.constants.fallback_enzymes
except urllib2.URLError, e:
print 'URL Error: {} {}'.format(e.reason, url)
print 'Falling back on default enzyme list'
self._enzyme_dict = coral.constants.fallback_enzymes
# Process into RestrictionSite objects? (depends on speed)
print 'Processing into RestrictionSite instances.'
self.restriction_sites = {}
# TODO: make sure all names are unique
for key, (site, cuts) in self._enzyme_dict.iteritems():
# Make a site
try:
r = coral.RestrictionSite(coral.DNA(site), cuts, name=key)
# Add it to dict with name as key
self.restriction_sites[key] = r
except ValueError:
# Encountered ambiguous sequence, have to ignore it until
# coral.DNA can handle ambiguous DNA
pass | Update definitions. |
def mktmp(self):
"""
Make the I{location} directory if it doesn't already exits.
"""
try:
if not os.path.isdir(self.location):
os.makedirs(self.location)
except:
log.debug(self.location, exc_info=1)
return self | Make the I{location} directory if it doesn't already exits. |
def _call_variants_samtools(align_bams, ref_file, items, target_regions, tx_out_file):
"""Call variants with samtools in target_regions.
Works around a GATK VCF 4.2 compatibility issue in samtools 1.0
by removing addition 4.2-only isms from VCF header lines.
"""
config = items[0]["config"]
mpileup = prep_mpileup(align_bams, ref_file, config,
target_regions=target_regions, want_bcf=True)
bcftools = config_utils.get_program("bcftools", config)
samtools_version = programs.get_version("samtools", config=config)
if samtools_version and LooseVersion(samtools_version) <= LooseVersion("0.1.19"):
raise ValueError("samtools calling not supported with pre-1.0 samtools")
bcftools_opts = "call -v -m"
compress_cmd = "| bgzip -c" if tx_out_file.endswith(".gz") else ""
fix_ambig_ref = vcfutils.fix_ambiguous_cl()
fix_ambig_alt = vcfutils.fix_ambiguous_cl(5)
cmd = ("{mpileup} "
"| {bcftools} {bcftools_opts} - "
"| {fix_ambig_ref} | {fix_ambig_alt} "
"| vt normalize -n -q -r {ref_file} - "
"| sed 's/VCFv4.2/VCFv4.1/' "
"| sed 's/,Version=3>/>/' "
"| sed 's/,Version=\"3\">/>/' "
"| sed 's/Number=R/Number=./' "
"{compress_cmd} > {tx_out_file}")
do.run(cmd.format(**locals()), "Variant calling with samtools", items[0]) | Call variants with samtools in target_regions.
Works around a GATK VCF 4.2 compatibility issue in samtools 1.0
by removing addition 4.2-only isms from VCF header lines. |
def action_create(self, courseid, taskid, path):
""" Delete a file or a directory """
# the path is given by the user. Let's normalize it
path = path.strip()
if not path.startswith("/"):
path = "/" + path
want_directory = path.endswith("/")
wanted_path = self.verify_path(courseid, taskid, path, True)
if wanted_path is None:
return self.show_tab_file(courseid, taskid, _("Invalid new path"))
task_fs = self.task_factory.get_task_fs(courseid, taskid)
if want_directory:
task_fs.from_subfolder(wanted_path).ensure_exists()
else:
task_fs.put(wanted_path, b"")
return self.show_tab_file(courseid, taskid) | Delete a file or a directory |
def set_device_name(self, newname):
""" Sets internal device name. (not announced bluetooth name).
requires utf-8 encoded string. """
return self.write(request.SetDeviceName(self.seq, *self.prep_str(newname))) | Sets internal device name. (not announced bluetooth name).
requires utf-8 encoded string. |
def construct_rest_of_worlds(self, excluded, fp=None, use_mp=True, simplify=True):
"""Construct many rest-of-world geometries and optionally write to filepath ``fp``.
``excluded`` must be a **dictionary** of {"rest-of-world label": ["names", "of", "excluded", "locations"]}``."""
geoms = {}
raw_data = []
for key in sorted(excluded):
locations = excluded[key]
for location in locations:
assert location in self.locations, "Can't find location {}".format(location)
included = self.all_faces.difference(
{face for loc in locations for face in self.data[loc]}
)
raw_data.append((key, self.faces_fp, included))
if use_mp:
with Pool(cpu_count() - 1) as pool:
results = pool.map(_union, raw_data)
geoms = dict(results)
else:
geoms = dict([_union(row) for row in raw_data])
if simplify:
geoms = {k: v.simplify(0.05) for k, v in geoms.items()}
if fp:
labels = sorted(geoms)
self.write_geoms_to_file(fp, [geoms[key] for key in labels], labels)
return fp
else:
return geoms | Construct many rest-of-world geometries and optionally write to filepath ``fp``.
``excluded`` must be a **dictionary** of {"rest-of-world label": ["names", "of", "excluded", "locations"]}``. |
def parse_qtype(self, param_type, param_value):
'''parse type of quniform or qloguniform'''
if param_type == 'quniform':
return self._parse_quniform(param_value)
if param_type == 'qloguniform':
param_value[:2] = np.log(param_value[:2])
return list(np.exp(self._parse_quniform(param_value)))
raise RuntimeError("Not supported type: %s" % param_type) | parse type of quniform or qloguniform |
def _integrate_scipy(self, intern_xout, intern_y0, intern_p,
atol=1e-8, rtol=1e-8, first_step=None, with_jacobian=None,
force_predefined=False, name=None, **kwargs):
""" Do not use directly (use ``integrate('scipy', ...)``).
Uses `scipy.integrate.ode <http://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.ode.html>`_
Parameters
----------
\*args :
See :meth:`integrate`.
name : str (default: 'lsoda'/'dopri5' when jacobian is available/not)
What integrator wrapped in scipy.integrate.ode to use.
\*\*kwargs :
Keyword arguments passed onto `set_integrator(...) <
http://docs.scipy.org/doc/scipy/reference/generated/
scipy.integrate.ode.set_integrator.html#scipy.integrate.ode.set_integrator>`_
Returns
-------
See :meth:`integrate`.
"""
from scipy.integrate import ode
ny = intern_y0.shape[-1]
nx = intern_xout.shape[-1]
results = []
for _xout, _y0, _p in zip(intern_xout, intern_y0, intern_p):
if name is None:
if self.j_cb is None:
name = 'dopri5'
else:
name = 'lsoda'
if with_jacobian is None:
if name == 'lsoda': # lsoda might call jacobian
with_jacobian = True
elif name in ('dop853', 'dopri5'):
with_jacobian = False # explicit steppers
elif name == 'vode':
with_jacobian = kwargs.get('method', 'adams') == 'bdf'
def rhs(t, y, p=()):
rhs.ncall += 1
return self.f_cb(t, y, p)
rhs.ncall = 0
if self.j_cb is not None:
def jac(t, y, p=()):
jac.ncall += 1
return self.j_cb(t, y, p)
jac.ncall = 0
r = ode(rhs, jac=jac if with_jacobian else None)
if 'lband' in kwargs or 'uband' in kwargs or 'band' in kwargs:
raise ValueError("lband and uband set locally (set `band` at initialization instead)")
if self.band is not None:
kwargs['lband'], kwargs['uband'] = self.band
r.set_integrator(name, atol=atol, rtol=rtol, **kwargs)
if len(_p) > 0:
r.set_f_params(_p)
r.set_jac_params(_p)
r.set_initial_value(_y0, _xout[0])
if nx == 2 and not force_predefined:
mode = 'adaptive'
if name in ('vode', 'lsoda'):
warnings.warn("'adaptive' mode with SciPy's integrator (vode/lsoda) may overshoot (itask=2)")
warnings.warn("'adaptive' mode with SciPy's integrator is unreliable, consider using e.g. cvode")
# vode itask 2 (may overshoot)
ysteps = [_y0]
xsteps = [_xout[0]]
while r.t < _xout[1]:
r.integrate(_xout[1], step=True)
if not r.successful():
raise RuntimeError("failed")
xsteps.append(r.t)
ysteps.append(r.y)
else:
xsteps, ysteps = [], []
def solout(x, y):
xsteps.append(x)
ysteps.append(y)
r.set_solout(solout)
r.integrate(_xout[1])
if not r.successful():
raise RuntimeError("failed")
_yout = np.array(ysteps)
_xout = np.array(xsteps)
else: # predefined
mode = 'predefined'
_yout = np.empty((nx, ny))
_yout[0, :] = _y0
for idx in range(1, nx):
r.integrate(_xout[idx])
if not r.successful():
raise RuntimeError("failed")
_yout[idx, :] = r.y
info = {
'internal_xout': _xout,
'internal_yout': _yout,
'internal_params': _p,
'success': r.successful(),
'nfev': rhs.ncall,
'n_steps': -1, # don't know how to obtain this number
'name': name,
'mode': mode,
'atol': atol,
'rtol': rtol
}
if self.j_cb is not None:
info['njev'] = jac.ncall
results.append(info)
return results | Do not use directly (use ``integrate('scipy', ...)``).
Uses `scipy.integrate.ode <http://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.ode.html>`_
Parameters
----------
\*args :
See :meth:`integrate`.
name : str (default: 'lsoda'/'dopri5' when jacobian is available/not)
What integrator wrapped in scipy.integrate.ode to use.
\*\*kwargs :
Keyword arguments passed onto `set_integrator(...) <
http://docs.scipy.org/doc/scipy/reference/generated/
scipy.integrate.ode.set_integrator.html#scipy.integrate.ode.set_integrator>`_
Returns
-------
See :meth:`integrate`. |
def from_dict(config):
'''
Instantiate a new ProxyConfig from a dictionary that represents a
client configuration, as described in `the documentation`_.
.. _the documentation:
https://docs.docker.com/network/proxy/#configure-the-docker-client
'''
return ProxyConfig(
http=config.get('httpProxy'),
https=config.get('httpsProxy'),
ftp=config.get('ftpProxy'),
no_proxy=config.get('noProxy'),
) | Instantiate a new ProxyConfig from a dictionary that represents a
client configuration, as described in `the documentation`_.
.. _the documentation:
https://docs.docker.com/network/proxy/#configure-the-docker-client |
def set_value(self, value: datetime):
""" Sets the current value """
assert isinstance(value, datetime)
self.value = value | Sets the current value |
def absent(name, orgname=None, profile='grafana'):
'''
Ensure that a data source is present.
name
Name of the data source to remove.
orgname
Name of the organization from which the data source should be absent.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
'''
if isinstance(profile, string_types):
profile = __salt__['config.option'](profile)
ret = {'name': name, 'result': None, 'comment': None, 'changes': {}}
datasource = __salt__['grafana4.get_datasource'](name, orgname, profile)
if not datasource:
ret['result'] = True
ret['comment'] = 'Data source {0} already absent'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Datasource {0} will be deleted'.format(name)
return ret
__salt__['grafana4.delete_datasource'](datasource['id'], profile=profile)
ret['result'] = True
ret['changes'][name] = 'Absent'
ret['comment'] = 'Data source {0} was deleted'.format(name)
return ret | Ensure that a data source is present.
name
Name of the data source to remove.
orgname
Name of the organization from which the data source should be absent.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'. |
def plot(self,
resolution_constant_regions=20,
resolution_smooth_regions=200):
"""
Return arrays x, y for plotting the piecewise constant function.
Just the minimum number of straight lines are returned if
``eps=0``, otherwise `resolution_constant_regions` plotting intervals
are insed in the constant regions with `resolution_smooth_regions`
plotting intervals in the smoothed regions.
"""
if self.eps == 0:
x = []; y = []
for I, value in zip(self._indicator_functions, self._values):
x.append(I.L)
y.append(value)
x.append(I.R)
y.append(value)
return x, y
else:
n = float(resolution_smooth_regions)/self.eps
if len(self.data) == 1:
return [self.L, self.R], [self._values[0], self._values[0]]
else:
x = [np.linspace(self.data[0][0], self.data[1][0]-self.eps,
resolution_constant_regions+1)]
# Iterate over all internal discontinuities
for I in self._indicator_functions[1:]:
x.append(np.linspace(I.L-self.eps, I.L+self.eps,
resolution_smooth_regions+1))
x.append(np.linspace(I.L+self.eps, I.R-self.eps,
resolution_constant_regions+1))
# Last part
x.append(np.linspace(I.R-self.eps, I.R, 3))
x = np.concatenate(x)
y = self(x)
return x, y | Return arrays x, y for plotting the piecewise constant function.
Just the minimum number of straight lines are returned if
``eps=0``, otherwise `resolution_constant_regions` plotting intervals
are insed in the constant regions with `resolution_smooth_regions`
plotting intervals in the smoothed regions. |
def trim_sparse(M, n_std=3, s_min=None, s_max=None):
"""Apply the trimming procedure to a sparse matrix.
"""
try:
from scipy.sparse import coo_matrix
except ImportError as e:
print(str(e))
print("I am peforming dense normalization by default.")
return trim_dense(M.todense())
r = M.tocoo()
sparsity = np.array(r.sum(axis=1)).flatten()
mean = np.mean(sparsity)
std = np.std(sparsity)
if s_min is None:
s_min = mean - n_std * std
if s_max is None:
s_max = mean + n_std * std
f = (sparsity > s_min) * (sparsity < s_max)
indices = [u for u in range(len(r.data)) if f[r.row[u]] and f[r.col[u]]]
rows = np.array([r.row[i] for i in indices])
cols = np.array([r.col[j] for j in indices])
data = np.array([r.data[k] for k in indices])
N = coo_matrix((data, (rows, cols)))
return N | Apply the trimming procedure to a sparse matrix. |
def main():
"""Provide the entry point to the subreddit_stats command."""
parser = arg_parser(usage='usage: %prog [options] SUBREDDIT VIEW')
parser.add_option('-c', '--commenters', type='int', default=10,
help='Number of top commenters to display '
'[default %default]')
parser.add_option('-d', '--distinguished', action='store_true',
help=('Include distinguished subissions and '
'comments (default: False). Note that regular '
'comments of distinguished submissions will still '
'be included.'))
parser.add_option('-s', '--submitters', type='int', default=10,
help='Number of top submitters to display '
'[default %default]')
options, args = parser.parse_args()
if options.verbose == 1:
logger.setLevel(logging.INFO)
elif options.verbose > 1:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.NOTSET)
logger.addHandler(logging.StreamHandler())
if len(args) != 2:
parser.error('SUBREDDIT and VIEW must be provided')
subreddit, view = args
check_for_updates(options)
srs = SubredditStats(subreddit, options.site, options.distinguished)
result = srs.run(view, options.submitters, options.commenters)
if result:
print(result.permalink)
return 0 | Provide the entry point to the subreddit_stats command. |
def create_encoder_config(args: argparse.Namespace,
max_seq_len_source: int,
max_seq_len_target: int,
config_conv: Optional[encoder.ConvolutionalEmbeddingConfig],
num_embed_source: int) -> Tuple[encoder.EncoderConfig, int]:
"""
Create the encoder config.
:param args: Arguments as returned by argparse.
:param max_seq_len_source: Maximum source sequence length.
:param max_seq_len_target: Maximum target sequence length.
:param config_conv: The config for the convolutional encoder (optional).
:param num_embed_source: The size of the source embedding.
:return: The encoder config and the number of hidden units of the encoder.
"""
encoder_num_layers, _ = args.num_layers
config_encoder = None # type: Optional[Config]
if args.decoder_only:
if args.encoder in (C.TRANSFORMER_TYPE, C.TRANSFORMER_WITH_CONV_EMBED_TYPE):
encoder_num_hidden = args.transformer_model_size[0]
elif args.encoder == C.CONVOLUTION_TYPE:
encoder_num_hidden = args.cnn_num_hidden
else:
encoder_num_hidden = args.rnn_num_hidden
config_encoder = encoder.EmptyEncoderConfig(num_embed=num_embed_source,
num_hidden=encoder_num_hidden)
elif args.encoder in (C.TRANSFORMER_TYPE, C.TRANSFORMER_WITH_CONV_EMBED_TYPE):
encoder_transformer_preprocess, _ = args.transformer_preprocess
encoder_transformer_postprocess, _ = args.transformer_postprocess
encoder_transformer_model_size = args.transformer_model_size[0]
total_source_factor_size = sum(args.source_factors_num_embed)
if args.source_factors_combine == C.SOURCE_FACTORS_COMBINE_CONCAT and total_source_factor_size > 0:
logger.info("Encoder transformer-model-size adjusted to account for source factor embeddings: %d -> %d" % (
encoder_transformer_model_size, num_embed_source + total_source_factor_size))
encoder_transformer_model_size = num_embed_source + total_source_factor_size
config_encoder = transformer.TransformerConfig(
model_size=encoder_transformer_model_size,
attention_heads=args.transformer_attention_heads[0],
feed_forward_num_hidden=args.transformer_feed_forward_num_hidden[0],
act_type=args.transformer_activation_type,
num_layers=encoder_num_layers,
dropout_attention=args.transformer_dropout_attention,
dropout_act=args.transformer_dropout_act,
dropout_prepost=args.transformer_dropout_prepost,
positional_embedding_type=args.transformer_positional_embedding_type,
preprocess_sequence=encoder_transformer_preprocess,
postprocess_sequence=encoder_transformer_postprocess,
max_seq_len_source=max_seq_len_source,
max_seq_len_target=max_seq_len_target,
conv_config=config_conv,
lhuc=args.lhuc is not None and (C.LHUC_ENCODER in args.lhuc or C.LHUC_ALL in args.lhuc))
encoder_num_hidden = encoder_transformer_model_size
elif args.encoder == C.CONVOLUTION_TYPE:
cnn_kernel_width_encoder, _ = args.cnn_kernel_width
cnn_config = convolution.ConvolutionConfig(kernel_width=cnn_kernel_width_encoder,
num_hidden=args.cnn_num_hidden,
act_type=args.cnn_activation_type,
weight_normalization=args.weight_normalization)
cnn_num_embed = num_embed_source
if args.source_factors_combine == C.SOURCE_FACTORS_COMBINE_CONCAT:
cnn_num_embed += sum(args.source_factors_num_embed)
config_encoder = encoder.ConvolutionalEncoderConfig(num_embed=cnn_num_embed,
max_seq_len_source=max_seq_len_source,
cnn_config=cnn_config,
num_layers=encoder_num_layers,
positional_embedding_type=args.cnn_positional_embedding_type)
encoder_num_hidden = args.cnn_num_hidden
else:
encoder_rnn_dropout_inputs, _ = args.rnn_dropout_inputs
encoder_rnn_dropout_states, _ = args.rnn_dropout_states
encoder_rnn_dropout_recurrent, _ = args.rnn_dropout_recurrent
config_encoder = encoder.RecurrentEncoderConfig(
rnn_config=rnn.RNNConfig(cell_type=args.rnn_cell_type,
num_hidden=args.rnn_num_hidden,
num_layers=encoder_num_layers,
dropout_inputs=encoder_rnn_dropout_inputs,
dropout_states=encoder_rnn_dropout_states,
dropout_recurrent=encoder_rnn_dropout_recurrent,
residual=args.rnn_residual_connections,
first_residual_layer=args.rnn_first_residual_layer,
forget_bias=args.rnn_forget_bias,
lhuc=args.lhuc is not None and (C.LHUC_ENCODER in args.lhuc or C.LHUC_ALL in args.lhuc)),
conv_config=config_conv,
reverse_input=args.rnn_encoder_reverse_input)
encoder_num_hidden = args.rnn_num_hidden
return config_encoder, encoder_num_hidden | Create the encoder config.
:param args: Arguments as returned by argparse.
:param max_seq_len_source: Maximum source sequence length.
:param max_seq_len_target: Maximum target sequence length.
:param config_conv: The config for the convolutional encoder (optional).
:param num_embed_source: The size of the source embedding.
:return: The encoder config and the number of hidden units of the encoder. |
def adjustReplicas(self,
old_required_number_of_instances: int,
new_required_number_of_instances: int):
"""
Add or remove replicas depending on `f`
"""
# TODO: refactor this
replica_num = old_required_number_of_instances
while replica_num < new_required_number_of_instances:
self.replicas.add_replica(replica_num)
self.processStashedMsgsForReplica(replica_num)
replica_num += 1
while replica_num > new_required_number_of_instances:
replica_num -= 1
self.replicas.remove_replica(replica_num)
pop_keys(self.msgsForFutureReplicas, lambda inst_id: inst_id < new_required_number_of_instances)
if len(self.primaries_disconnection_times) < new_required_number_of_instances:
self.primaries_disconnection_times.extend(
[None] * (new_required_number_of_instances - len(self.primaries_disconnection_times)))
elif len(self.primaries_disconnection_times) > new_required_number_of_instances:
self.primaries_disconnection_times = self.primaries_disconnection_times[:new_required_number_of_instances] | Add or remove replicas depending on `f` |
def InitFromHuntObject(self,
hunt_obj,
hunt_counters=None,
with_full_summary=False):
"""Initialize API hunt object from a database hunt object.
Args:
hunt_obj: rdf_hunt_objects.Hunt to read the data from.
hunt_counters: Optional db.HuntCounters object with counters information.
with_full_summary: if True, hunt_runner_args, completion counts and a few
other fields will be filled in. The way to think about it is that with
with_full_summary==True ApiHunt will have the data to render "Hunt
Overview" page and with with_full_summary==False it will have enough
data to be rendered as a hunts list row.
Returns:
Self.
"""
self.urn = rdfvalue.RDFURN("hunts").Add(str(hunt_obj.hunt_id))
self.hunt_id = hunt_obj.hunt_id
if (hunt_obj.args.hunt_type ==
rdf_hunt_objects.HuntArguments.HuntType.STANDARD):
self.name = "GenericHunt"
else:
self.name = "VariableGenericHunt"
self.state = str(hunt_obj.hunt_state)
self.crash_limit = hunt_obj.crash_limit
self.client_limit = hunt_obj.client_limit
self.client_rate = hunt_obj.client_rate
self.created = hunt_obj.create_time
self.duration = hunt_obj.duration
self.creator = hunt_obj.creator
self.description = hunt_obj.description
self.is_robot = hunt_obj.creator in ["GRRWorker", "Cron"]
if hunt_counters is not None:
self.results_count = hunt_counters.num_results
self.clients_with_results_count = hunt_counters.num_clients_with_results
self.clients_queued_count = (
hunt_counters.num_clients - hunt_counters.num_successful_clients -
hunt_counters.num_failed_clients - hunt_counters.num_crashed_clients)
# TODO(user): remove this hack when AFF4 is gone. For regression tests
# compatibility only.
self.total_cpu_usage = hunt_counters.total_cpu_seconds or 0
self.total_net_usage = hunt_counters.total_network_bytes_sent
if with_full_summary:
self.all_clients_count = hunt_counters.num_clients
self.completed_clients_count = (
hunt_counters.num_successful_clients +
hunt_counters.num_failed_clients)
self.remaining_clients_count = (
self.all_clients_count - self.completed_clients_count)
else:
self.results_count = 0
self.clients_with_results_count = 0
self.clients_queued_count = 0
self.total_cpu_usage = 0
self.total_net_usage = 0
if with_full_summary:
self.all_clients_count = 0
self.completed_clients_count = 0
self.remaining_clients_count = 0
if hunt_obj.original_object.object_type != "UNKNOWN":
ref = ApiFlowLikeObjectReference()
self.original_object = ref.FromFlowLikeObjectReference(
hunt_obj.original_object)
if with_full_summary:
hra = self.hunt_runner_args = rdf_hunts.HuntRunnerArgs(
hunt_name=self.name,
description=hunt_obj.description,
client_rule_set=hunt_obj.client_rule_set,
crash_limit=hunt_obj.crash_limit,
avg_results_per_client_limit=hunt_obj.avg_results_per_client_limit,
avg_cpu_seconds_per_client_limit=hunt_obj
.avg_cpu_seconds_per_client_limit,
avg_network_bytes_per_client_limit=hunt_obj
.avg_network_bytes_per_client_limit,
client_rate=hunt_obj.client_rate,
original_object=hunt_obj.original_object)
if hunt_obj.HasField("output_plugins"):
hra.output_plugins = hunt_obj.output_plugins
# TODO(user): This is a backwards compatibility code. Remove
# HuntRunnerArgs from ApiHunt.
if hunt_obj.client_limit != 100:
hra.client_limit = hunt_obj.client_limit
if hunt_obj.HasField("per_client_cpu_limit"):
hra.per_client_cpu_limit = hunt_obj.per_client_cpu_limit
if hunt_obj.HasField("per_client_network_limit_bytes"):
hra.per_client_network_limit_bytes = (
hunt_obj.per_client_network_bytes_limit)
if hunt_obj.HasField("total_network_bytes_limit"):
hra.network_bytes_limit = hunt_obj.total_network_bytes_limit
self.client_rule_set = hunt_obj.client_rule_set
if (hunt_obj.args.hunt_type ==
rdf_hunt_objects.HuntArguments.HuntType.STANDARD):
self.flow_name = hunt_obj.args.standard.flow_name
self.flow_args = hunt_obj.args.standard.flow_args
return self | Initialize API hunt object from a database hunt object.
Args:
hunt_obj: rdf_hunt_objects.Hunt to read the data from.
hunt_counters: Optional db.HuntCounters object with counters information.
with_full_summary: if True, hunt_runner_args, completion counts and a few
other fields will be filled in. The way to think about it is that with
with_full_summary==True ApiHunt will have the data to render "Hunt
Overview" page and with with_full_summary==False it will have enough
data to be rendered as a hunts list row.
Returns:
Self. |
def run_single(workflow, *, registry, db_file, always_cache=True):
""""Run workflow in a single thread, storing results in a Sqlite3
database.
:param workflow: Workflow or PromisedObject to be evaluated.
:param registry: serialization Registry function.
:param db_file: filename of Sqlite3 database, give `':memory:'` to
keep the database in memory only.
:param always_cache: Currently ignored. always_cache is true.
:return: Evaluated result.
"""
with JobDB(db_file, registry) as db:
job_logger = make_logger("worker", push_map, db)
result_logger = make_logger("worker", pull_map, db)
@pull
def pass_job(source):
"""Receives jobs from source, passes back results."""
for msg in source():
key, job = msg
status, retrieved_result = db.add_job_to_db(key, job)
if status == 'retrieved':
yield retrieved_result
continue
elif status == 'attached':
continue
result = run_job(key, job)
attached = db.store_result_in_db(result, always_cache=True)
yield result
yield from (ResultMessage(key, 'attached', result.value, None)
for key in attached)
scheduler = Scheduler(job_keeper=db)
queue = Queue()
job_front_end = job_logger >> queue.sink
result_front_end = queue.source >> pass_job >> result_logger
single_worker = Connection(result_front_end, job_front_end)
return scheduler.run(single_worker, get_workflow(workflow)) | Run workflow in a single thread, storing results in a Sqlite3
database.
:param workflow: Workflow or PromisedObject to be evaluated.
:param registry: serialization Registry function.
:param db_file: filename of Sqlite3 database, give `':memory:'` to
keep the database in memory only.
:param always_cache: Currently ignored. always_cache is true.
:return: Evaluated result. |
def shape(self):
"""Total spaces per axis, computed recursively.
The recursion ends at the fist level that does not have a shape.
Examples
--------
>>> r2, r3 = odl.rn(2), odl.rn(3)
>>> pspace = odl.ProductSpace(r2, r3)
>>> pspace.shape
(2,)
>>> pspace2 = odl.ProductSpace(pspace, 3)
>>> pspace2.shape
(3, 2)
If the space is a "pure" product space, shape recurses all the way
into the components:
>>> r2_2 = odl.ProductSpace(r2, 3)
>>> r2_2.shape
(3, 2)
"""
if len(self) == 0:
return ()
elif self.is_power_space:
try:
sub_shape = self[0].shape
except AttributeError:
sub_shape = ()
else:
sub_shape = ()
return (len(self),) + sub_shape | Total spaces per axis, computed recursively.
The recursion ends at the fist level that does not have a shape.
Examples
--------
>>> r2, r3 = odl.rn(2), odl.rn(3)
>>> pspace = odl.ProductSpace(r2, r3)
>>> pspace.shape
(2,)
>>> pspace2 = odl.ProductSpace(pspace, 3)
>>> pspace2.shape
(3, 2)
If the space is a "pure" product space, shape recurses all the way
into the components:
>>> r2_2 = odl.ProductSpace(r2, 3)
>>> r2_2.shape
(3, 2) |
def close(self):
""" Close the object nicely and release all the data
arrays from memory YOU CANT GET IT BACK, the pointers
and data are gone so use the getData method to get
the data array returned for future use. You can use
putData to reattach a new data array to the imageObject.
"""
if self._image is None:
return
# mcara: I think the code below is not necessary but in order to
# preserve the same functionality as the code removed below,
# I make an empty copy of the image object:
empty_image = fits.HDUList()
for u in self._image:
empty_image.append(u.__class__(data=None, header=None))
# mcara: END unnecessary code
self._image.close() #calls fits.close()
self._image = empty_image | Close the object nicely and release all the data
arrays from memory YOU CANT GET IT BACK, the pointers
and data are gone so use the getData method to get
the data array returned for future use. You can use
putData to reattach a new data array to the imageObject. |
def list_of_mined(cls):
"""
Provide the list of mined so they can be added to the list
queue.
:return: The list of mined domains or URL.
:rtype: list
"""
# We initiate a variable which will return the result.
result = []
if PyFunceble.CONFIGURATION["mining"]:
# The mining is activated.
if PyFunceble.INTERN["file_to_test"] in PyFunceble.INTERN["mined"]:
# The file we are testing is into our mining database.
for element in PyFunceble.INTERN["mined"][
PyFunceble.INTERN["file_to_test"]
]:
# We loop through the list of index of the file we are testing.
# We append the element of the currently read index to our result.
result.extend(
PyFunceble.INTERN["mined"][PyFunceble.INTERN["file_to_test"]][
element
]
)
# We format our result.
result = List(result).format()
# We return the result.
return result | Provide the list of mined so they can be added to the list
queue.
:return: The list of mined domains or URL.
:rtype: list |
def render(self, container, descender, state, space_below=0,
first_line_only=False):
"""Typeset the paragraph
The paragraph is typeset in the given container starting below the
current cursor position of the container. When the end of the container
is reached, the rendering state is preserved to continue setting the
rest of the paragraph when this method is called with a new container.
Args:
container (Container): the container to render to
descender (float or None): descender height of the preceeding line
state (ParagraphState): the state where rendering will continue
first_line_only (bool): typeset only the first line
"""
indent_first = (float(self.get_style('indent_first', container))
if state.initial else 0)
line_width = float(container.width)
line_spacing = self.get_style('line_spacing', container)
text_align = self.get_style('text_align', container)
tab_stops = self.get_style('tab_stops', container)
if not tab_stops:
tab_width = 2 * self.get_style('font_size', container)
tab_stops = DefaultTabStops(tab_width)
# `saved_state` is updated after successfully rendering each line, so
# that when `container` overflows on rendering a line, the words in that
# line are yielded again on the next typeset() call.
saved_state = copy(state)
prev_state = copy(state)
max_line_width = 0
def typeset_line(line, last_line=False):
"""Typeset `line` and, if no exception is raised, update the
paragraph's internal rendering state."""
nonlocal state, saved_state, max_line_width, descender, space_below
max_line_width = max(max_line_width, line.cursor)
advance = (line.ascender(container) if descender is None
else line_spacing.advance(line, descender, container))
descender = line.descender(container) # descender <= 0
line.advance = advance
total_advance = advance + (space_below if last_line else 0) - descender
if container.remaining_height < total_advance:
raise EndOfContainer(saved_state)
assert container.advance2(advance)
line.typeset(container, text_align, last_line)
assert container.advance2(- descender)
state.initial = False
saved_state = copy(state)
return Line(tab_stops, line_width, container,
significant_whitespace=self.significant_whitespace)
first_line = line = Line(tab_stops, line_width, container,
indent_first, self.significant_whitespace)
while True:
try:
word = state.next_word()
except StopIteration:
break
try:
if not line.append_word(word):
for first, second in word.hyphenate(container):
if line.append_word(first):
state.prepend_word(second) # prepend second part
break
else:
state = prev_state
line = typeset_line(line)
if first_line_only:
break
continue
except NewLineException:
line.append(word.glyphs_span)
line = typeset_line(line, last_line=True)
if first_line_only:
break
prev_state = copy(state)
if line:
typeset_line(line, last_line=True)
# Correct the horizontal text placement for auto-width paragraphs
if self._width(container) == FlowableWidth.AUTO:
if text_align == TextAlign.CENTER:
container.left -= float(container.width - max_line_width) / 2
if text_align == TextAlign.RIGHT:
container.left -= float(container.width - max_line_width)
return max_line_width, first_line.advance, descender | Typeset the paragraph
The paragraph is typeset in the given container starting below the
current cursor position of the container. When the end of the container
is reached, the rendering state is preserved to continue setting the
rest of the paragraph when this method is called with a new container.
Args:
container (Container): the container to render to
descender (float or None): descender height of the preceeding line
state (ParagraphState): the state where rendering will continue
first_line_only (bool): typeset only the first line |
def get_example(cls) -> list:
"""Returns an example value for the Array type.
If an example isn't a defined attribute on the class we return
a list of 1 item containing the example value of the `items` attribute.
If `items` is None we simply return a `[1]`.
"""
if cls.example is not None:
return cls.example
if cls.items is not None:
if isinstance(cls.items, list):
return [item.get_example() for item in cls.items]
else:
return [cls.items.get_example()]
return [1] | Returns an example value for the Array type.
If an example isn't a defined attribute on the class we return
a list of 1 item containing the example value of the `items` attribute.
If `items` is None we simply return a `[1]`. |
def _flush(self, close=False):
"""Flushes remaining output records in the output queues to plasma.
None is used as special type of record that is propagated from sources
to sink to notify that the end of data in a stream.
Attributes:
close (bool): A flag denoting whether the channel should be
also marked as 'closed' (True) or not (False) after flushing.
"""
for channel in self.forward_channels:
if close is True:
channel.queue.put_next(None)
channel.queue._flush_writes()
for channels in self.shuffle_channels:
for channel in channels:
if close is True:
channel.queue.put_next(None)
channel.queue._flush_writes()
for channels in self.shuffle_key_channels:
for channel in channels:
if close is True:
channel.queue.put_next(None)
channel.queue._flush_writes()
for channels in self.round_robin_channels:
for channel in channels:
if close is True:
channel.queue.put_next(None)
channel.queue._flush_writes() | Flushes remaining output records in the output queues to plasma.
None is used as special type of record that is propagated from sources
to sink to notify that the end of data in a stream.
Attributes:
close (bool): A flag denoting whether the channel should be
also marked as 'closed' (True) or not (False) after flushing. |
def put(self, message):
"""
Simply test Put a string
:param message: str of the message
:return: str of the message
"""
return self.connection.put('echo/string', data=dict(message=message)) | Simply test Put a string
:param message: str of the message
:return: str of the message |
def expand(data):
'''Generates configuration sets based on the YAML input contents
For an introduction to the YAML mark-up, just search the net. Here is one of
its references: https://en.wikipedia.org/wiki/YAML
A configuration set corresponds to settings for **all** variables in the
input template that needs replacing. For example, if your template mentions
the variables ``name`` and ``version``, then each configuration set should
yield values for both ``name`` and ``version``.
For example:
.. code-block:: yaml
name: [john, lisa]
version: [v1, v2]
This should yield to the following configuration sets:
.. code-block:: python
[
{'name': 'john', 'version': 'v1'},
{'name': 'john', 'version': 'v2'},
{'name': 'lisa', 'version': 'v1'},
{'name': 'lisa', 'version': 'v2'},
]
Each key in the input file should correspond to either an object or a YAML
array. If the object is a list, then we'll iterate over it for every possible
combination of elements in the lists. If the element in question is not a
list, then it is considered unique and repeated for each yielded
configuration set. Example
.. code-block:: yaml
name: [john, lisa]
version: [v1, v2]
text: >
hello,
world!
Should yield to the following configuration sets:
.. code-block:: python
[
{'name': 'john', 'version': 'v1', 'text': 'hello, world!'},
{'name': 'john', 'version': 'v2', 'text': 'hello, world!'},
{'name': 'lisa', 'version': 'v1', 'text': 'hello, world!'},
{'name': 'lisa', 'version': 'v2', 'text': 'hello, world!'},
]
Keys starting with one `_` (underscore) are treated as "unique" objects as
well. Example:
.. code-block:: yaml
name: [john, lisa]
version: [v1, v2]
_unique: [i1, i2]
Should yield to the following configuration sets:
.. code-block:: python
[
{'name': 'john', 'version': 'v1', '_unique': ['i1', 'i2']},
{'name': 'john', 'version': 'v2', '_unique': ['i1', 'i2']},
{'name': 'lisa', 'version': 'v1', '_unique': ['i1', 'i2']},
{'name': 'lisa', 'version': 'v2', '_unique': ['i1', 'i2']},
]
Parameters:
data (str): YAML data to be parsed
Yields:
dict: A dictionary of key-value pairs for building the templates
'''
data = _ordered_load(data, yaml.SafeLoader)
# separates "unique" objects from the ones we have to iterate
# pre-assemble return dictionary
iterables = dict()
unique = dict()
for key, value in data.items():
if isinstance(value, list) and not key.startswith('_'):
iterables[key] = value
else:
unique[key] = value
# generates all possible combinations of iterables
for values in itertools.product(*iterables.values()):
retval = dict(unique)
keys = list(iterables.keys())
retval.update(dict(zip(keys, values)))
yield retval | Generates configuration sets based on the YAML input contents
For an introduction to the YAML mark-up, just search the net. Here is one of
its references: https://en.wikipedia.org/wiki/YAML
A configuration set corresponds to settings for **all** variables in the
input template that needs replacing. For example, if your template mentions
the variables ``name`` and ``version``, then each configuration set should
yield values for both ``name`` and ``version``.
For example:
.. code-block:: yaml
name: [john, lisa]
version: [v1, v2]
This should yield to the following configuration sets:
.. code-block:: python
[
{'name': 'john', 'version': 'v1'},
{'name': 'john', 'version': 'v2'},
{'name': 'lisa', 'version': 'v1'},
{'name': 'lisa', 'version': 'v2'},
]
Each key in the input file should correspond to either an object or a YAML
array. If the object is a list, then we'll iterate over it for every possible
combination of elements in the lists. If the element in question is not a
list, then it is considered unique and repeated for each yielded
configuration set. Example
.. code-block:: yaml
name: [john, lisa]
version: [v1, v2]
text: >
hello,
world!
Should yield to the following configuration sets:
.. code-block:: python
[
{'name': 'john', 'version': 'v1', 'text': 'hello, world!'},
{'name': 'john', 'version': 'v2', 'text': 'hello, world!'},
{'name': 'lisa', 'version': 'v1', 'text': 'hello, world!'},
{'name': 'lisa', 'version': 'v2', 'text': 'hello, world!'},
]
Keys starting with one `_` (underscore) are treated as "unique" objects as
well. Example:
.. code-block:: yaml
name: [john, lisa]
version: [v1, v2]
_unique: [i1, i2]
Should yield to the following configuration sets:
.. code-block:: python
[
{'name': 'john', 'version': 'v1', '_unique': ['i1', 'i2']},
{'name': 'john', 'version': 'v2', '_unique': ['i1', 'i2']},
{'name': 'lisa', 'version': 'v1', '_unique': ['i1', 'i2']},
{'name': 'lisa', 'version': 'v2', '_unique': ['i1', 'i2']},
]
Parameters:
data (str): YAML data to be parsed
Yields:
dict: A dictionary of key-value pairs for building the templates |
def diffusion_coeff_counts(self):
"""List of tuples of (diffusion coefficient, counts) pairs.
The order of the diffusion coefficients is as in self.diffusion_coeff.
"""
return [(key, len(list(group)))
for key, group in itertools.groupby(self.diffusion_coeff)] | List of tuples of (diffusion coefficient, counts) pairs.
The order of the diffusion coefficients is as in self.diffusion_coeff. |
def getLayout(self, algorithmName, verbose=None):
"""
Returns all the details, including names, parameters, and compatible column types for the Layout algorithm specified by the `algorithmName` parameter.
:param algorithmName: Name of the Layout algorithm
:param verbose: print more
:returns: 200: successful operation
"""
response=api(url=self.___url+'apply/layouts/'+str(algorithmName)+'', method="H", verbose=verbose, parse_params=False)
return response | Returns all the details, including names, parameters, and compatible column types for the Layout algorithm specified by the `algorithmName` parameter.
:param algorithmName: Name of the Layout algorithm
:param verbose: print more
:returns: 200: successful operation |
def FromJsonString(self, value):
"""Parse a RFC 3339 date string format to Timestamp.
Args:
value: A date string. Any fractional digits (or none) and any offset are
accepted as long as they fit into nano-seconds precision.
Example of accepted format: '1972-01-01T10:00:20.021-05:00'
Raises:
ParseError: On parsing problems.
"""
timezone_offset = value.find('Z')
if timezone_offset == -1:
timezone_offset = value.find('+')
if timezone_offset == -1:
timezone_offset = value.rfind('-')
if timezone_offset == -1:
raise ParseError(
'Failed to parse timestamp: missing valid timezone offset.')
time_value = value[0:timezone_offset]
# Parse datetime and nanos.
point_position = time_value.find('.')
if point_position == -1:
second_value = time_value
nano_value = ''
else:
second_value = time_value[:point_position]
nano_value = time_value[point_position + 1:]
date_object = datetime.strptime(second_value, _TIMESTAMPFOMAT)
td = date_object - datetime(1970, 1, 1)
seconds = td.seconds + td.days * _SECONDS_PER_DAY
if len(nano_value) > 9:
raise ParseError(
'Failed to parse Timestamp: nanos {0} more than '
'9 fractional digits.'.format(nano_value))
if nano_value:
nanos = round(float('0.' + nano_value) * 1e9)
else:
nanos = 0
# Parse timezone offsets.
if value[timezone_offset] == 'Z':
if len(value) != timezone_offset + 1:
raise ParseError('Failed to parse timestamp: invalid trailing'
' data {0}.'.format(value))
else:
timezone = value[timezone_offset:]
pos = timezone.find(':')
if pos == -1:
raise ParseError(
'Invalid timezone offset value: {0}.'.format(timezone))
if timezone[0] == '+':
seconds -= (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60
else:
seconds += (int(timezone[1:pos])*60+int(timezone[pos+1:]))*60
# Set seconds and nanos
self.seconds = int(seconds)
self.nanos = int(nanos) | Parse a RFC 3339 date string format to Timestamp.
Args:
value: A date string. Any fractional digits (or none) and any offset are
accepted as long as they fit into nano-seconds precision.
Example of accepted format: '1972-01-01T10:00:20.021-05:00'
Raises:
ParseError: On parsing problems. |
def cli(ctx, resource):
"""
Displays all locally cached <resource> versions available for installation.
\b
Available resources:
ips (default)
dev_tools
"""
log = logging.getLogger('ipsv.setup')
assert isinstance(ctx, Context)
resource = str(resource).lower()
if resource == 'ips':
resource = IpsManager(ctx)
for r in resource.versions.values():
click.secho(r.version.vstring, bold=True)
return
if resource in ('dev_tools', 'dev tools'):
resource = DevToolsManager(ctx)
for r in resource.versions.values():
click.secho('{v} ({id})'.format(v=r.version.vstring, id=r.version.vid), bold=True)
return | Displays all locally cached <resource> versions available for installation.
\b
Available resources:
ips (default)
dev_tools |
def update_user_type(self):
"""Return either 'tutor' or 'student' based on which radio
button is selected.
"""
if self.rb_tutor.isChecked():
self.user_type = 'tutor'
elif self.rb_student.isChecked():
self.user_type = 'student'
self.accept() | Return either 'tutor' or 'student' based on which radio
button is selected. |
def preferred_height(self, cli, width, max_available_height, wrap_lines):
"""
Preferred height: as much as needed in order to display all the completions.
"""
complete_state = cli.current_buffer.complete_state
column_width = self._get_column_width(complete_state)
column_count = max(1, (width - self._required_margin) // column_width)
return int(math.ceil(len(complete_state.current_completions) / float(column_count))) | Preferred height: as much as needed in order to display all the completions. |
def _get_default(self, obj):
''' Internal implementation of instance attribute access for default
values.
Handles bookeeping around |PropertyContainer| value, etc.
'''
if self.name in obj._property_values:
# this shouldn't happen because we should have checked before _get_default()
raise RuntimeError("Bokeh internal error, does not handle the case of self.name already in _property_values")
is_themed = obj.themed_values() is not None and self.name in obj.themed_values()
default = self.instance_default(obj)
if is_themed:
unstable_dict = obj._unstable_themed_values
else:
unstable_dict = obj._unstable_default_values
if self.name in unstable_dict:
return unstable_dict[self.name]
if self.property._may_have_unstable_default():
if isinstance(default, PropertyValueContainer):
default._register_owner(obj, self)
unstable_dict[self.name] = default
return default | Internal implementation of instance attribute access for default
values.
Handles bookeeping around |PropertyContainer| value, etc. |
def doDirectPayment(self, params):
"""Call PayPal DoDirectPayment method."""
defaults = {"method": "DoDirectPayment", "paymentaction": "Sale"}
required = ["creditcardtype",
"acct",
"expdate",
"cvv2",
"ipaddress",
"firstname",
"lastname",
"street",
"city",
"state",
"countrycode",
"zip",
"amt",
]
nvp_obj = self._fetch(params, required, defaults)
if nvp_obj.flag:
raise PayPalFailure(nvp_obj.flag_info, nvp=nvp_obj)
# @@@ Could check cvv2match / avscode are both 'X' or '0'
# qd = django.http.QueryDict(nvp_obj.response)
# if qd.get('cvv2match') not in ['X', '0']:
# nvp_obj.set_flag("Invalid cvv2match: %s" % qd.get('cvv2match')
# if qd.get('avscode') not in ['X', '0']:
# nvp_obj.set_flag("Invalid avscode: %s" % qd.get('avscode')
return nvp_obj | Call PayPal DoDirectPayment method. |
def process(self):
"""Construct and start a new File hunt.
Returns:
The newly created GRR hunt object.
Raises:
RuntimeError: if no items specified for collection.
"""
print('Hunt to collect {0:d} items'.format(len(self.file_path_list)))
print('Files to be collected: {0!s}'.format(self.file_path_list))
hunt_action = flows_pb2.FileFinderAction(
action_type=flows_pb2.FileFinderAction.DOWNLOAD)
hunt_args = flows_pb2.FileFinderArgs(
paths=self.file_path_list, action=hunt_action)
return self._create_hunt('FileFinder', hunt_args) | Construct and start a new File hunt.
Returns:
The newly created GRR hunt object.
Raises:
RuntimeError: if no items specified for collection. |
def get_poll(self, arg, *, request_policy=None):
"""Retrieves a poll from strawpoll.
:param arg: Either the ID of the poll or its strawpoll url.
:param request_policy: Overrides :attr:`API.requests_policy` for that \
request.
:type request_policy: Optional[:class:`RequestsPolicy`]
:raises HTTPException: Requesting the poll failed.
:returns: A poll constructed with the requested data.
:rtype: :class:`Poll`
"""
if isinstance(arg, str):
# Maybe we received an url to parse
match = self._url_re.match(arg)
if match:
arg = match.group('id')
return self._http_client.get('{}/{}'.format(self._POLLS, arg),
request_policy=request_policy,
cls=strawpoll.Poll) | Retrieves a poll from strawpoll.
:param arg: Either the ID of the poll or its strawpoll url.
:param request_policy: Overrides :attr:`API.requests_policy` for that \
request.
:type request_policy: Optional[:class:`RequestsPolicy`]
:raises HTTPException: Requesting the poll failed.
:returns: A poll constructed with the requested data.
:rtype: :class:`Poll` |
def get_last_config_update_time_output_last_config_update_time(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_last_config_update_time = ET.Element("get_last_config_update_time")
config = get_last_config_update_time
output = ET.SubElement(get_last_config_update_time, "output")
last_config_update_time = ET.SubElement(output, "last-config-update-time")
last_config_update_time.text = kwargs.pop('last_config_update_time')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def h_kinetic(T, P, MW, Hvap, f=1):
r'''Calculates heat transfer coefficient for condensation
of a pure chemical inside a vertical tube or tube bundle, as presented in
[2]_ according to [1]_.
.. math::
h = \left(\frac{2f}{2-f}\right)\left(\frac{MW}{1000\cdot 2\pi R T}
\right)^{0.5}\left(\frac{H_{vap}^2 P \cdot MW}{1000\cdot RT^2}\right)
Parameters
----------
T : float
Vapor temperature, [K]
P : float
Vapor pressure, [Pa]
MW : float
Molecular weight of the gas, [g/mol]
Hvap : float
Heat of vaporization of the fluid at P, [J/kg]
f : float
Correction factor, [-]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
f is a correction factor for how the removal of gas particles affects the
behavior of the ideal gas in diffusing to the condensing surface. It is
quite close to one, and has not been well explored in the literature due
to the rarity of the importance of the kinetic resistance.
Examples
--------
Water at 1 bar and 300 K:
>>> h_kinetic(300, 1E5, 18.02, 2441674)
30788845.562480535
References
----------
.. [1] Berman, L. D. "On the Effect of Molecular-Kinetic Resistance upon
Heat Transfer with Condensation." International Journal of Heat and Mass
Transfer 10, no. 10 (October 1, 1967): 1463.
doi:10.1016/0017-9310(67)90033-6.
.. [2] Kakaç, Sadik, ed. Boilers, Evaporators, and Condensers. 1 edition.
Wiley-Interscience, 1991.
.. [3] Stephan, Karl. Heat Transfer in Condensation and Boiling. Translated
by C. V. Green. Softcover reprint of the original 1st ed. 1992 edition.
Berlin; New York: Springer, 2013.
'''
return (2*f)/(2-f)*(MW/(1000*2*pi*R*T))**0.5*(Hvap**2*P*MW)/(1000*R*T**2) | r'''Calculates heat transfer coefficient for condensation
of a pure chemical inside a vertical tube or tube bundle, as presented in
[2]_ according to [1]_.
.. math::
h = \left(\frac{2f}{2-f}\right)\left(\frac{MW}{1000\cdot 2\pi R T}
\right)^{0.5}\left(\frac{H_{vap}^2 P \cdot MW}{1000\cdot RT^2}\right)
Parameters
----------
T : float
Vapor temperature, [K]
P : float
Vapor pressure, [Pa]
MW : float
Molecular weight of the gas, [g/mol]
Hvap : float
Heat of vaporization of the fluid at P, [J/kg]
f : float
Correction factor, [-]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
f is a correction factor for how the removal of gas particles affects the
behavior of the ideal gas in diffusing to the condensing surface. It is
quite close to one, and has not been well explored in the literature due
to the rarity of the importance of the kinetic resistance.
Examples
--------
Water at 1 bar and 300 K:
>>> h_kinetic(300, 1E5, 18.02, 2441674)
30788845.562480535
References
----------
.. [1] Berman, L. D. "On the Effect of Molecular-Kinetic Resistance upon
Heat Transfer with Condensation." International Journal of Heat and Mass
Transfer 10, no. 10 (October 1, 1967): 1463.
doi:10.1016/0017-9310(67)90033-6.
.. [2] Kakaç, Sadik, ed. Boilers, Evaporators, and Condensers. 1 edition.
Wiley-Interscience, 1991.
.. [3] Stephan, Karl. Heat Transfer in Condensation and Boiling. Translated
by C. V. Green. Softcover reprint of the original 1st ed. 1992 edition.
Berlin; New York: Springer, 2013. |
def graph_from_voxels(fg_markers,
bg_markers,
regional_term = False,
boundary_term = False,
regional_term_args = False,
boundary_term_args = False):
"""
Create a graph-cut ready graph to segment a nD image using the voxel neighbourhood.
Create a `~medpy.graphcut.maxflow.GraphDouble` object for all voxels of an image with a
:math:`ndim * 2` neighbourhood.
Every voxel of the image is regarded as a node. They are connected to their immediate
neighbours via arcs. If to voxels are neighbours is determined using
:math:`ndim*2`-connectedness (e.g. :math:`3*2=6` for 3D). In the next step the arcs weights
(n-weights) are computed using the supplied ``boundary_term`` function
(see :mod:`~medpy.graphcut.energy_voxel` for a selection).
Implicitly the graph holds two additional nodes: the source and the sink, so called
terminal nodes. These are connected with all other nodes through arcs of an initial
weight (t-weight) of zero.
All voxels that are under the foreground markers are considered to be tightly bound
to the source: The t-weight of the arc from source to these nodes is set to a maximum
value. The same goes for the background markers: The covered voxels receive a maximum
(`~medpy.graphcut.graph.GCGraph.MAX`) t-weight for their arc towards the sink.
All other t-weights are set using the supplied ``regional_term`` function
(see :mod:`~medpy.graphcut.energy_voxel` for a selection).
Parameters
----------
fg_markers : ndarray
The foreground markers as binary array of the same shape as the original image.
bg_markers : ndarray
The background markers as binary array of the same shape as the original image.
regional_term : function
This can be either `False`, in which case all t-weights are set to 0, except for
the nodes that are directly connected to the source or sink; or a function, in
which case the supplied function is used to compute the t_edges. It has to
have the following signature *regional_term(graph, regional_term_args)*, and is
supposed to compute (source_t_weight, sink_t_weight) for all voxels of the image
and add these to the passed `~medpy.graphcut.graph.GCGraph` object. The weights
have only to be computed for nodes where they do not equal zero. Additional
parameters can be passed to the function via the ``regional_term_args`` parameter.
boundary_term : function
This can be either `False`, in which case all n-edges, i.e. between all nodes
that are not source or sink, are set to 0; or a function, in which case the
supplied function is used to compute the edge weights. It has to have the
following signature *boundary_term(graph, boundary_term_args)*, and is supposed
to compute the edges between the graphs nodes and to add them to the supplied
`~medpy.graphcut.graph.GCGraph` object. Additional parameters can be passed to
the function via the ``boundary_term_args`` parameter.
regional_term_args : tuple
Use this to pass some additional parameters to the ``regional_term`` function.
boundary_term_args : tuple
Use this to pass some additional parameters to the ``boundary_term`` function.
Returns
-------
graph : `~medpy.graphcut.maxflow.GraphDouble`
The created graph, ready to execute the graph-cut.
Raises
------
AttributeError
If an argument is malformed.
FunctionError
If one of the supplied functions returns unexpected results.
Notes
-----
If a voxel is marked as both, foreground and background, the background marker
is given higher priority.
All arcs whose weight is not explicitly set are assumed to carry a weight of zero.
"""
# prepare logger
logger = Logger.getInstance()
# prepare result graph
logger.debug('Assuming {} nodes and {} edges for image of shape {}'.format(fg_markers.size, __voxel_4conectedness(fg_markers.shape), fg_markers.shape))
graph = GCGraph(fg_markers.size, __voxel_4conectedness(fg_markers.shape))
logger.info('Performing attribute tests...')
# check, set and convert all supplied parameters
fg_markers = scipy.asarray(fg_markers, dtype=scipy.bool_)
bg_markers = scipy.asarray(bg_markers, dtype=scipy.bool_)
# set dummy functions if not supplied
if not regional_term: regional_term = __regional_term_voxel
if not boundary_term: boundary_term = __boundary_term_voxel
# check supplied functions and their signature
if not hasattr(regional_term, '__call__') or not 2 == len(inspect.getargspec(regional_term)[0]):
raise AttributeError('regional_term has to be a callable object which takes two parameter.')
if not hasattr(boundary_term, '__call__') or not 2 == len(inspect.getargspec(boundary_term)[0]):
raise AttributeError('boundary_term has to be a callable object which takes two parameters.')
logger.debug('#nodes={}, #hardwired-nodes source/sink={}/{}'.format(fg_markers.size,
len(fg_markers.ravel().nonzero()[0]),
len(bg_markers.ravel().nonzero()[0])))
# compute the weights of all edges from the source and to the sink i.e.
# compute the weights of the t_edges Wt
logger.info('Computing and adding terminal edge weights...')
regional_term(graph, regional_term_args)
# compute the weights of the edges between the neighbouring nodes i.e.
# compute the weights of the n_edges Wr
logger.info('Computing and adding inter-node edge weights...')
boundary_term(graph, boundary_term_args)
# collect all voxels that are under the foreground resp. background markers i.e.
# collect all nodes that are connected to the source resp. sink
logger.info('Setting terminal weights for the markers...')
if not 0 == scipy.count_nonzero(fg_markers):
graph.set_source_nodes(fg_markers.ravel().nonzero()[0])
if not 0 == scipy.count_nonzero(bg_markers):
graph.set_sink_nodes(bg_markers.ravel().nonzero()[0])
return graph.get_graph() | Create a graph-cut ready graph to segment a nD image using the voxel neighbourhood.
Create a `~medpy.graphcut.maxflow.GraphDouble` object for all voxels of an image with a
:math:`ndim * 2` neighbourhood.
Every voxel of the image is regarded as a node. They are connected to their immediate
neighbours via arcs. If to voxels are neighbours is determined using
:math:`ndim*2`-connectedness (e.g. :math:`3*2=6` for 3D). In the next step the arcs weights
(n-weights) are computed using the supplied ``boundary_term`` function
(see :mod:`~medpy.graphcut.energy_voxel` for a selection).
Implicitly the graph holds two additional nodes: the source and the sink, so called
terminal nodes. These are connected with all other nodes through arcs of an initial
weight (t-weight) of zero.
All voxels that are under the foreground markers are considered to be tightly bound
to the source: The t-weight of the arc from source to these nodes is set to a maximum
value. The same goes for the background markers: The covered voxels receive a maximum
(`~medpy.graphcut.graph.GCGraph.MAX`) t-weight for their arc towards the sink.
All other t-weights are set using the supplied ``regional_term`` function
(see :mod:`~medpy.graphcut.energy_voxel` for a selection).
Parameters
----------
fg_markers : ndarray
The foreground markers as binary array of the same shape as the original image.
bg_markers : ndarray
The background markers as binary array of the same shape as the original image.
regional_term : function
This can be either `False`, in which case all t-weights are set to 0, except for
the nodes that are directly connected to the source or sink; or a function, in
which case the supplied function is used to compute the t_edges. It has to
have the following signature *regional_term(graph, regional_term_args)*, and is
supposed to compute (source_t_weight, sink_t_weight) for all voxels of the image
and add these to the passed `~medpy.graphcut.graph.GCGraph` object. The weights
have only to be computed for nodes where they do not equal zero. Additional
parameters can be passed to the function via the ``regional_term_args`` parameter.
boundary_term : function
This can be either `False`, in which case all n-edges, i.e. between all nodes
that are not source or sink, are set to 0; or a function, in which case the
supplied function is used to compute the edge weights. It has to have the
following signature *boundary_term(graph, boundary_term_args)*, and is supposed
to compute the edges between the graphs nodes and to add them to the supplied
`~medpy.graphcut.graph.GCGraph` object. Additional parameters can be passed to
the function via the ``boundary_term_args`` parameter.
regional_term_args : tuple
Use this to pass some additional parameters to the ``regional_term`` function.
boundary_term_args : tuple
Use this to pass some additional parameters to the ``boundary_term`` function.
Returns
-------
graph : `~medpy.graphcut.maxflow.GraphDouble`
The created graph, ready to execute the graph-cut.
Raises
------
AttributeError
If an argument is malformed.
FunctionError
If one of the supplied functions returns unexpected results.
Notes
-----
If a voxel is marked as both, foreground and background, the background marker
is given higher priority.
All arcs whose weight is not explicitly set are assumed to carry a weight of zero. |
def source_filename(self, docname: str, srcdir: str):
""" Get the full filename to referenced image """
docpath = Path(srcdir, docname)
parent = docpath.parent
imgpath = parent.joinpath(self.filename)
# Does this exist?
if not imgpath.exists():
msg = f'Image does not exist at "{imgpath}"'
raise SphinxError(msg)
return imgpath | Get the full filename to referenced image |
def settings_system_update(self, data):
"""
Set system settings. Uses PUT to /settings/system interface
:Args:
* *data*: (dict) Settings dictionary as specified `here <https://cloud.knuverse.com/docs/api/#api-System_Settings-Set_System_Settings>`_.
:Returns: None
"""
data["auth_password"] = self._password
response = self._put(url.settings_system, body=data)
self._check_response(response, 200) | Set system settings. Uses PUT to /settings/system interface
:Args:
* *data*: (dict) Settings dictionary as specified `here <https://cloud.knuverse.com/docs/api/#api-System_Settings-Set_System_Settings>`_.
:Returns: None |
def state_by_node2state_by_state(tpm):
"""Convert a state-by-node TPM to a state-by-state TPM.
.. important::
A nondeterministic state-by-node TPM can have more than one
representation as a state-by-state TPM. However, the mapping can be
made to be one-to-one if we assume the TPMs to be conditionally
independent. Therefore, **this function returns the corresponding
conditionally independent state-by-state TPM.**
.. note::
The indices of the rows of the state-by-node TPM are assumed to follow
the little-endian convention, while the indices of the columns follow
the big-endian convention. The indices of the rows and columns of the
resulting state-by-state TPM both follow the big-endian convention. See
the documentation on PyPhi :ref:`tpm-conventions` for more info.
Args:
tpm (list[list] or np.ndarray): A state-by-node TPM with row indices
following the little-endian convention and column indices following
the big-endian convention.
Returns:
np.ndarray: A state-by-state TPM, with both row and column indices
following the big-endian convention.
>>> tpm = np.array([[1, 1, 0],
... [0, 0, 1],
... [0, 1, 1],
... [1, 0, 0],
... [0, 0, 1],
... [1, 0, 0],
... [1, 1, 1],
... [1, 0, 1]])
>>> state_by_node2state_by_state(tpm)
array([[0., 0., 0., 1., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 0.],
[0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 0., 0., 0.],
[0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0., 1., 0., 0.]])
"""
# Cast to np.array.
tpm = np.array(tpm)
# Convert to multidimensional form.
tpm = to_multidimensional(tpm)
# Get the number of nodes from the last dimension of the TPM.
N = tpm.shape[-1]
# Get the number of states.
S = 2**N
# Initialize the state-by-state TPM.
sbs_tpm = np.zeros((S, S))
if not np.any(np.logical_and(tpm < 1, tpm > 0)):
# TPM is deterministic.
for previous_state_index in range(S):
# Use the little-endian convention to get the row and column
# indices.
previous_state = le_index2state(previous_state_index, N)
current_state_index = state2le_index(tpm[previous_state])
sbs_tpm[previous_state_index, current_state_index] = 1
else:
# TPM is nondeterministic.
for previous_state_index in range(S):
# Use the little-endian convention to get the row and column
# indices.
previous_state = le_index2state(previous_state_index, N)
marginal_tpm = tpm[previous_state]
for current_state_index in range(S):
current_state = np.array(
[i for i in le_index2state(current_state_index, N)])
sbs_tpm[previous_state_index, current_state_index] = (
np.prod(marginal_tpm[current_state == 1]) *
np.prod(1 - marginal_tpm[current_state == 0]))
return sbs_tpm | Convert a state-by-node TPM to a state-by-state TPM.
.. important::
A nondeterministic state-by-node TPM can have more than one
representation as a state-by-state TPM. However, the mapping can be
made to be one-to-one if we assume the TPMs to be conditionally
independent. Therefore, **this function returns the corresponding
conditionally independent state-by-state TPM.**
.. note::
The indices of the rows of the state-by-node TPM are assumed to follow
the little-endian convention, while the indices of the columns follow
the big-endian convention. The indices of the rows and columns of the
resulting state-by-state TPM both follow the big-endian convention. See
the documentation on PyPhi :ref:`tpm-conventions` for more info.
Args:
tpm (list[list] or np.ndarray): A state-by-node TPM with row indices
following the little-endian convention and column indices following
the big-endian convention.
Returns:
np.ndarray: A state-by-state TPM, with both row and column indices
following the big-endian convention.
>>> tpm = np.array([[1, 1, 0],
... [0, 0, 1],
... [0, 1, 1],
... [1, 0, 0],
... [0, 0, 1],
... [1, 0, 0],
... [1, 1, 1],
... [1, 0, 1]])
>>> state_by_node2state_by_state(tpm)
array([[0., 0., 0., 1., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 0.],
[0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 0., 0., 0.],
[0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0., 1., 0., 0.]]) |
def exception(self, e):
"""Log an error messsage.
:param e: Exception to log.
"""
self.logged_exception(e)
self.logger.exception(e) | Log an error messsage.
:param e: Exception to log. |
def sync_header_chain(cls, path, bitcoind_server, last_block_id ):
"""
Synchronize our local block headers up to the last block ID given.
@last_block_id is *inclusive*
@bitcoind_server is host:port or just host
"""
current_block_id = SPVClient.height( path )
if current_block_id is None:
assert USE_TESTNET
current_block_id = -1
assert (current_block_id >= 0 and USE_MAINNET) or USE_TESTNET
if current_block_id < last_block_id:
if USE_MAINNET:
log.debug("Synchronize %s to %s" % (current_block_id, last_block_id))
else:
log.debug("Synchronize testnet %s to %s" % (current_block_id + 1, last_block_id ))
# need to sync
if current_block_id >= 0:
prev_block_header = SPVClient.read_header( path, current_block_id )
prev_block_hash = prev_block_header['hash']
else:
# can only happen when in testnet
prev_block_hash = GENESIS_BLOCK_HASH_TESTNET
# connect
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# timeout (10 min)
sock.settimeout(600)
bitcoind_port = 8333
if ":" in bitcoind_server:
p = bitcoind_server.split(":")
bitcoind_server = p[0]
bitcoind_port = int(p[1])
log.debug("connect to %s:%s" % (bitcoind_server, bitcoind_port))
sock.connect( (bitcoind_server, bitcoind_port) )
client = BlockHeaderClient( sock, path, prev_block_hash, last_block_id )
# get headers
client.run()
# verify headers
if SPVClient.height(path) < last_block_id:
raise Exception("Did not receive all headers up to %s (only got %s)" % (last_block_id, SPVClient.height(path)))
# defensive: make sure it's *exactly* that many blocks
rc = SPVClient.verify_header_chain( path )
if not rc:
raise Exception("Failed to verify headers (stored in '%s')" % path)
log.debug("synced headers from %s to %s in %s" % (current_block_id, last_block_id, path))
return True | Synchronize our local block headers up to the last block ID given.
@last_block_id is *inclusive*
@bitcoind_server is host:port or just host |
def _set_time_property(self, v, load=False):
"""
Setter method for time_property, mapped from YANG variable /ptp_state/time_property (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_time_property is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_time_property() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=time_property.time_property, is_container='container', presence=False, yang_name="time-property", rest_name="time-property", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'ptp-time-property', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-ptp-operational', defining_module='brocade-ptp-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """time_property must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=time_property.time_property, is_container='container', presence=False, yang_name="time-property", rest_name="time-property", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'ptp-time-property', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-ptp-operational', defining_module='brocade-ptp-operational', yang_type='container', is_config=False)""",
})
self.__time_property = t
if hasattr(self, '_set'):
self._set() | Setter method for time_property, mapped from YANG variable /ptp_state/time_property (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_time_property is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_time_property() directly. |
def _onMouseWheel(self, evt):
"""Translate mouse wheel events into matplotlib events"""
# Determine mouse location
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
# Convert delta/rotation/rate into a floating point step size
delta = evt.GetWheelDelta()
rotation = evt.GetWheelRotation()
rate = evt.GetLinesPerAction()
#print "delta,rotation,rate",delta,rotation,rate
step = rate*float(rotation)/delta
# Done handling event
evt.Skip()
# Mac is giving two events for every wheel event
# Need to skip every second one
if wx.Platform == '__WXMAC__':
if not hasattr(self,'_skipwheelevent'):
self._skipwheelevent = True
elif self._skipwheelevent:
self._skipwheelevent = False
return # Return without processing event
else:
self._skipwheelevent = True
# Convert to mpl event
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=evt) | Translate mouse wheel events into matplotlib events |
def get_all_chains(self):
"""Assemble and return a list of all chains for all leaf nodes to the merkle root.
"""
return [self.get_chain(i) for i in range(len(self.leaves))] | Assemble and return a list of all chains for all leaf nodes to the merkle root. |
def from_string(cls, string, *, default_func=None):
'''Construct a Service from a string.
If default_func is provided and any ServicePart is missing, it is called with
default_func(protocol, part) to obtain the missing part.
'''
if not isinstance(string, str):
raise TypeError(f'service must be a string: {string}')
parts = string.split('://', 1)
if len(parts) == 2:
protocol, address = parts
else:
item, = parts
protocol = None
if default_func:
if default_func(item, ServicePart.HOST) and default_func(item, ServicePart.PORT):
protocol, address = item, ''
else:
protocol, address = default_func(None, ServicePart.PROTOCOL), item
if not protocol:
raise ValueError(f'invalid service string: {string}')
if default_func:
default_func = partial(default_func, protocol.lower())
address = NetAddress.from_string(address, default_func=default_func)
return cls(protocol, address) | Construct a Service from a string.
If default_func is provided and any ServicePart is missing, it is called with
default_func(protocol, part) to obtain the missing part. |
def asset_view_atype(self, ):
"""View the project of the current atype
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_asset:
return
atype = self.cur_asset.atype
self.view_atype(atype) | View the project of the current atype
:returns: None
:rtype: None
:raises: None |
def delete_dcnm_out_nwk(self, tenant_id, fw_dict, is_fw_virt=False):
"""Delete the DCNM OUT network and update the result. """
tenant_name = fw_dict.get('tenant_name')
ret = self._delete_service_nwk(tenant_id, tenant_name, 'out')
if ret:
res = fw_const.DCNM_OUT_NETWORK_DEL_SUCCESS
LOG.info("out Service network deleted for tenant %s",
tenant_id)
else:
res = fw_const.DCNM_OUT_NETWORK_DEL_FAIL
LOG.info("out Service network deleted failed for tenant %s",
tenant_id)
self.update_fw_db_result(tenant_id, dcnm_status=res)
return ret | Delete the DCNM OUT network and update the result. |
def __add_item(self, item, keys=None):
""" Internal method to add an item to the multi-key dictionary"""
if(not keys or not len(keys)):
raise Exception('Error in %s.__add_item(%s, keys=tuple/list of items): need to specify a tuple/list containing at least one key!'
% (self.__class__.__name__, str(item)))
direct_key = tuple(keys) # put all keys in a tuple, and use it as a key
for key in keys:
key_type = str(type(key))
# store direct key as a value in an intermediate dictionary
if(not key_type in self.__dict__):
self.__setattr__(key_type, dict())
self.__dict__[key_type][key] = direct_key
# store the value in the actual dictionary
if(not 'items_dict' in self.__dict__):
self.items_dict = dict()
self.items_dict[direct_key] = item | Internal method to add an item to the multi-key dictionary |
def get_plugin(cls, name=None, **kwargs):
"""Returns the first format plugin whose attributes match kwargs.
For example::
get_plugin(extension="scratch14")
Will return the :class:`KurtPlugin` whose :attr:`extension
<KurtPlugin.extension>` attribute is ``"scratch14"``.
The :attr:`name <KurtPlugin.name>` is used as the ``format`` parameter
to :attr:`Project.load` and :attr:`Project.save`.
:raises: :class:`ValueError` if the format doesn't exist.
:returns: :class:`KurtPlugin`
"""
if isinstance(name, KurtPlugin):
return name
if 'extension' in kwargs:
kwargs['extension'] = kwargs['extension'].lower()
if name:
kwargs["name"] = name
if not kwargs:
raise ValueError, "No arguments"
for plugin in cls.plugins.values():
for name in kwargs:
if getattr(plugin, name) != kwargs[name]:
break
else:
return plugin
raise ValueError, "Unknown format %r" % kwargs | Returns the first format plugin whose attributes match kwargs.
For example::
get_plugin(extension="scratch14")
Will return the :class:`KurtPlugin` whose :attr:`extension
<KurtPlugin.extension>` attribute is ``"scratch14"``.
The :attr:`name <KurtPlugin.name>` is used as the ``format`` parameter
to :attr:`Project.load` and :attr:`Project.save`.
:raises: :class:`ValueError` if the format doesn't exist.
:returns: :class:`KurtPlugin` |
def getid(self, idtype):
'''
idtype in Uniq constants
'''
memorable_id = None
while memorable_id in self._ids:
l=[]
for _ in range(4):
l.append(str(randint(0, 19)))
memorable_id = ''.join(l)
self._ids.append(memorable_id)
return idtype + '-' + memorable_id | idtype in Uniq constants |
def gather_layer_info(self):
"""Extracts the tagged coiled-coil parameters for each layer."""
for i in range(len(self.cc[0])):
layer_radii = [x[i].tags['distance_to_ref_axis'] for x in self.cc]
self.radii_layers.append(layer_radii)
layer_alpha = [x[i].tags['alpha_angle_ref_axis'] for x in self.cc]
self.alpha_layers.append(layer_alpha)
layer_ca = [x[i].tags['crick_angle_ref_axis'] for x in self.cc]
self.ca_layers.append(layer_ca)
return | Extracts the tagged coiled-coil parameters for each layer. |
def authentication_required(meth):
"""Simple class method decorator.
Checks if the client is currently connected.
:param meth: the original called method
"""
def check(cls, *args, **kwargs):
if cls.authenticated:
return meth(cls, *args, **kwargs)
raise Error("Authentication required")
return check | Simple class method decorator.
Checks if the client is currently connected.
:param meth: the original called method |
def make_data():
"""creates example data set"""
a = { (1,1):.25, (1,2):.15, (1,3):.2,
(2,1):.3, (2,2):.3, (2,3):.1,
(3,1):.15, (3,2):.65, (3,3):.05,
(4,1):.1, (4,2):.05, (4,3):.8
}
epsilon = 0.01
I,p = multidict({1:5, 2:6, 3:8, 4:20})
K,LB = multidict({1:.2, 2:.3, 3:.2})
return I,K,a,p,epsilon,LB | creates example data set |
def apply(self, data_source):
"""
Called with the predict data (new information).
@param data_source: Either a pandas.DataFrame or a file-like object.
"""
dataframe = self.__get_dataframe(data_source, use_target=False)
dataframe = self.__cleaner.apply(dataframe)
dataframe = self.__transformer.apply(dataframe)
return dataframe | Called with the predict data (new information).
@param data_source: Either a pandas.DataFrame or a file-like object. |
def implemented(cls, for_type):
"""Assert that protocol 'cls' is implemented for type 'for_type'.
This will cause 'for_type' to be registered with the protocol 'cls'.
Subsequently, protocol.isa(for_type, cls) will return True, as will
isinstance, issubclass and others.
Raises:
TypeError if 'for_type' doesn't implement all required functions.
"""
for function in cls.required():
if not function.implemented_for_type(for_type):
raise TypeError(
"%r doesn't implement %r so it cannot participate in "
"the protocol %r." %
(for_type, function.func.__name__, cls))
cls.register(for_type) | Assert that protocol 'cls' is implemented for type 'for_type'.
This will cause 'for_type' to be registered with the protocol 'cls'.
Subsequently, protocol.isa(for_type, cls) will return True, as will
isinstance, issubclass and others.
Raises:
TypeError if 'for_type' doesn't implement all required functions. |
def get_api_link(self):
"""
Adds a query string to the api url. At minimum adds the type=choices
argument so that the return format is json. Any other filtering
arguments calculated by the `get_qs` method are then added to the
url. It is up to the destination url to respect them as filters.
"""
url = self._api_link
if url:
qs = self.get_qs()
url = "%s?type=choices" % url
if qs:
url = "%s&%s" % (url, u'&'.join([u'%s=%s' % (k, urllib.quote(unicode(v).encode('utf8'))) \
for k, v in qs.items()]))
url = "%s&%s" % (url, u'&'.join([u'exclude=%s' % x \
for x in qs.keys()]))
return url | Adds a query string to the api url. At minimum adds the type=choices
argument so that the return format is json. Any other filtering
arguments calculated by the `get_qs` method are then added to the
url. It is up to the destination url to respect them as filters. |
def get_tac_permissions(calendar_id):
"""
Return a list of sorted Permission objects representing
the user permissions of a given Tacoma calendar.
:return: a list of trumba.Permission objects
corresponding to the given campus calendar.
None if error, [] if not exists
raise DataFailureException or a corresponding TrumbaException
if the request failed or an error code has been returned.
"""
return _process_get_perm_resp(
get_permissions_url,
post_tac_resource(get_permissions_url,
_create_get_perm_body(calendar_id)),
TrumbaCalendar.TAC_CAMPUS_CODE,
calendar_id) | Return a list of sorted Permission objects representing
the user permissions of a given Tacoma calendar.
:return: a list of trumba.Permission objects
corresponding to the given campus calendar.
None if error, [] if not exists
raise DataFailureException or a corresponding TrumbaException
if the request failed or an error code has been returned. |
def _adjacent_tri(self, edge, i):
"""
Given a triangle formed by edge and i, return the triangle that shares
edge. *i* may be either a point or the entire triangle.
"""
if not np.isscalar(i):
i = [x for x in i if x not in edge][0]
try:
pt1 = self._edges_lookup[edge]
pt2 = self._edges_lookup[(edge[1], edge[0])]
except KeyError:
return None
if pt1 == i:
return (edge[1], edge[0], pt2)
elif pt2 == i:
return (edge[1], edge[0], pt1)
else:
raise RuntimeError("Edge %s and point %d do not form a triangle "
"in this mesh." % (edge, i)) | Given a triangle formed by edge and i, return the triangle that shares
edge. *i* may be either a point or the entire triangle. |
def injector_gear_2_json(self):
"""
transform this local object to JSON.
:return: the JSON from this local object
"""
LOGGER.debug("InjectorCachedGear.injector_gear_2_json")
json_obj = {
'gearId': self.id,
'gearName': self.name,
'gearAdminQueue': self.admin_queue,
'gearDescription': self.description,
'running': 'true' if self.running else 'false'
}
return json_obj | transform this local object to JSON.
:return: the JSON from this local object |
def get_property(obj, name):
"""
Gets value of object property specified by its name.
:param obj: an object to read property from.
:param name: a name of the property to get.
:return: the property value or null if property doesn't exist or introspection failed.
"""
if obj == None:
raise Exception("Object cannot be null")
if name == None:
raise Exception("Property name cannot be null")
name = name.lower()
try:
for property_name in dir(obj):
if property_name.lower() != name:
continue
property = getattr(obj, property_name)
if PropertyReflector._is_property(property, property_name):
return property
except:
pass
return None | Gets value of object property specified by its name.
:param obj: an object to read property from.
:param name: a name of the property to get.
:return: the property value or null if property doesn't exist or introspection failed. |
def apply_filters(self, filters):
"""
It applies a specified filters. The filters are used to reduce the control groups
which are accessed by get_confgs, get_stats, and get_defaults methods.
"""
_configs = self.configs
_stats = self.stats
self.configs = {}
self.stats = {}
for f in filters:
if f in _configs:
self.configs[f] = _configs[f]
elif f in _stats:
self.stats[f] = _stats[f]
else:
raise NoSuchControlFileError("%s for %s" % (f, self.subsystem.name)) | It applies a specified filters. The filters are used to reduce the control groups
which are accessed by get_confgs, get_stats, and get_defaults methods. |
def sun_declination(day):
"""Compute the declination angle of the sun for the given date.
Uses the Spencer Formula
(found at http://www.illustratingshadows.com/www-formulae-collection.pdf)
:param day: The datetime.date to compute the declination angle for
:returns: The angle, in degrees, of the angle of declination
"""
day_of_year = day.toordinal() - date(day.year, 1, 1).toordinal()
day_angle = 2 * pi * day_of_year / 365
declination_radians = sum([
0.006918,
0.001480*sin(3*day_angle),
0.070257*sin(day_angle),
0.000907*sin(2*day_angle),
-0.399912*cos(day_angle),
-0.006758*cos(2*day_angle),
-0.002697*cos(3*day_angle),
])
return degrees(declination_radians) | Compute the declination angle of the sun for the given date.
Uses the Spencer Formula
(found at http://www.illustratingshadows.com/www-formulae-collection.pdf)
:param day: The datetime.date to compute the declination angle for
:returns: The angle, in degrees, of the angle of declination |
def send_video_note(chat_id, video_note,
duration=None, length=None, reply_to_message_id=None, reply_markup=None, disable_notification=False,
**kwargs):
"""
Use this method to send video files, Telegram clients support mp4 videos (other formats may be sent as Document).
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:param video_note: Video to send. Pass a file_id as String to send a video that exists on the Telegram servers (recommended),
pass an HTTP URL as a String for Telegram to get a video from the Internet, or upload a new video using multipart/form-data.
:param duration: Duration of sent video in seconds
:param length: Video width and height
:param reply_to_message_id: If the message is a reply, ID of the original message
:param reply_markup: Additional interface options. A JSON-serialized object for a
custom reply keyboard, instructions to hide keyboard or to
force a reply from the user.
:param disable_notification: Sends the message silently. iOS users will not receive a notification, Android users
will receive a notification with no sound. Other apps coming soon.
:param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest`
:type chat_id: int or str
:type video: InputFile or str
:type duration: int
:type caption: str
:type reply_to_message_id: int
:type reply_markup: ReplyKeyboardMarkup or ReplyKeyboardHide or ForceReply
:returns: On success, the sent Message is returned.
:rtype: TelegramBotRPCRequest
"""
files = None
if isinstance(video_note, InputFile):
files = [video_note]
video = None
elif not isinstance(video_note, str):
raise Exception('video must be instance of InputFile or str')
# required args
params = dict(
chat_id=chat_id,
video_note=video_note
)
# optional args
params.update(
_clean_params(
duration=duration,
length=length,
reply_to_message_id=reply_to_message_id,
reply_markup=reply_markup,
disable_notification=disable_notification,
)
)
return TelegramBotRPCRequest('sendVideoNote', params=params, files=files, on_result=Message.from_result, **kwargs) | Use this method to send video files, Telegram clients support mp4 videos (other formats may be sent as Document).
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:param video_note: Video to send. Pass a file_id as String to send a video that exists on the Telegram servers (recommended),
pass an HTTP URL as a String for Telegram to get a video from the Internet, or upload a new video using multipart/form-data.
:param duration: Duration of sent video in seconds
:param length: Video width and height
:param reply_to_message_id: If the message is a reply, ID of the original message
:param reply_markup: Additional interface options. A JSON-serialized object for a
custom reply keyboard, instructions to hide keyboard or to
force a reply from the user.
:param disable_notification: Sends the message silently. iOS users will not receive a notification, Android users
will receive a notification with no sound. Other apps coming soon.
:param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest`
:type chat_id: int or str
:type video: InputFile or str
:type duration: int
:type caption: str
:type reply_to_message_id: int
:type reply_markup: ReplyKeyboardMarkup or ReplyKeyboardHide or ForceReply
:returns: On success, the sent Message is returned.
:rtype: TelegramBotRPCRequest |
def read_hotkey(suppress=True):
"""
Similar to `read_key()`, but blocks until the user presses and releases a
hotkey (or single key), then returns a string representing the hotkey
pressed.
Example:
read_hotkey()
# "ctrl+shift+p"
"""
queue = _queue.Queue()
fn = lambda e: queue.put(e) or e.event_type == KEY_DOWN
hooked = hook(fn, suppress=suppress)
while True:
event = queue.get()
if event.event_type == KEY_UP:
unhook(hooked)
with _pressed_events_lock:
names = [e.name for e in _pressed_events.values()] + [event.name]
return get_hotkey_name(names) | Similar to `read_key()`, but blocks until the user presses and releases a
hotkey (or single key), then returns a string representing the hotkey
pressed.
Example:
read_hotkey()
# "ctrl+shift+p" |
def get_mimetype(self):
"""
Mimetype is calculated based on the file's content. If ``_mimetype``
attribute is available, it will be returned (backends which store
mimetypes or can easily recognize them, should set this private
attribute to indicate that type should *NOT* be calculated).
"""
if hasattr(self, '_mimetype'):
if (isinstance(self._mimetype, (tuple, list,)) and
len(self._mimetype) == 2):
return self._mimetype
else:
raise NodeError('given _mimetype attribute must be an 2 '
'element list or tuple')
mtype, encoding = mimetypes.guess_type(self.name)
if mtype is None:
if self.is_binary:
mtype = 'application/octet-stream'
encoding = None
else:
mtype = 'text/plain'
encoding = None
return mtype, encoding | Mimetype is calculated based on the file's content. If ``_mimetype``
attribute is available, it will be returned (backends which store
mimetypes or can easily recognize them, should set this private
attribute to indicate that type should *NOT* be calculated). |
def timeit(output):
"""
If output is string, then print the string and also time used
"""
b = time.time()
yield
print output, 'time used: %.3fs' % (time.time()-b) | If output is string, then print the string and also time used |
def gps_rtk_send(self, time_last_baseline_ms, rtk_receiver_id, wn, tow, rtk_health, rtk_rate, nsats, baseline_coords_type, baseline_a_mm, baseline_b_mm, baseline_c_mm, accuracy, iar_num_hypotheses, force_mavlink1=False):
'''
RTK GPS data. Gives information on the relative baseline calculation
the GPS is reporting
time_last_baseline_ms : Time since boot of last baseline message received in ms. (uint32_t)
rtk_receiver_id : Identification of connected RTK receiver. (uint8_t)
wn : GPS Week Number of last baseline (uint16_t)
tow : GPS Time of Week of last baseline (uint32_t)
rtk_health : GPS-specific health report for RTK data. (uint8_t)
rtk_rate : Rate of baseline messages being received by GPS, in HZ (uint8_t)
nsats : Current number of sats used for RTK calculation. (uint8_t)
baseline_coords_type : Coordinate system of baseline. 0 == ECEF, 1 == NED (uint8_t)
baseline_a_mm : Current baseline in ECEF x or NED north component in mm. (int32_t)
baseline_b_mm : Current baseline in ECEF y or NED east component in mm. (int32_t)
baseline_c_mm : Current baseline in ECEF z or NED down component in mm. (int32_t)
accuracy : Current estimate of baseline accuracy. (uint32_t)
iar_num_hypotheses : Current number of integer ambiguity hypotheses. (int32_t)
'''
return self.send(self.gps_rtk_encode(time_last_baseline_ms, rtk_receiver_id, wn, tow, rtk_health, rtk_rate, nsats, baseline_coords_type, baseline_a_mm, baseline_b_mm, baseline_c_mm, accuracy, iar_num_hypotheses), force_mavlink1=force_mavlink1) | RTK GPS data. Gives information on the relative baseline calculation
the GPS is reporting
time_last_baseline_ms : Time since boot of last baseline message received in ms. (uint32_t)
rtk_receiver_id : Identification of connected RTK receiver. (uint8_t)
wn : GPS Week Number of last baseline (uint16_t)
tow : GPS Time of Week of last baseline (uint32_t)
rtk_health : GPS-specific health report for RTK data. (uint8_t)
rtk_rate : Rate of baseline messages being received by GPS, in HZ (uint8_t)
nsats : Current number of sats used for RTK calculation. (uint8_t)
baseline_coords_type : Coordinate system of baseline. 0 == ECEF, 1 == NED (uint8_t)
baseline_a_mm : Current baseline in ECEF x or NED north component in mm. (int32_t)
baseline_b_mm : Current baseline in ECEF y or NED east component in mm. (int32_t)
baseline_c_mm : Current baseline in ECEF z or NED down component in mm. (int32_t)
accuracy : Current estimate of baseline accuracy. (uint32_t)
iar_num_hypotheses : Current number of integer ambiguity hypotheses. (int32_t) |
def init_db_conn(connection_name, HOSTS=None):
"""
Initialize a redis connection by each connection string
defined in the configuration file
"""
el = elasticsearch.Elasticsearch(hosts=HOSTS)
el_pool.connections[connection_name] = ElasticSearchClient(el) | Initialize a redis connection by each connection string
defined in the configuration file |
def executor(self) -> "ThreadPoolExecutor":
"""Executor instance.
:rtype: ThreadPoolExecutor
"""
if not isinstance(self.__executor, ThreadPoolExecutor) or self.__executor.is_shutdown:
self.configure()
return self.__executor | Executor instance.
:rtype: ThreadPoolExecutor |
def has_stack(self, stack_name):
"""
Checks if a CloudFormation stack with given name exists
:param stack_name: Name or ID of the stack
:return: True if stack exists. False otherwise
"""
cf = self.cf_client
try:
resp = cf.describe_stacks(StackName=stack_name)
if len(resp["Stacks"]) != 1:
return False
# When you run CreateChangeSet on a a stack that does not exist,
# CloudFormation will create a stack and set it's status
# REVIEW_IN_PROGRESS. However this stack is cannot be manipulated
# by "update" commands. Under this circumstances, we treat like
# this stack does not exist and call CreateChangeSet will
# ChangeSetType set to CREATE and not UPDATE.
stack = resp["Stacks"][0]
return stack["StackStatus"] != "REVIEW_IN_PROGRESS"
except botocore.exceptions.ClientError as e:
# If a stack does not exist, describe_stacks will throw an
# exception. Unfortunately we don't have a better way than parsing
# the exception msg to understand the nature of this exception.
msg = str(e)
if "Stack with id {0} does not exist".format(stack_name) in msg:
LOG.debug("Stack with id {0} does not exist".format(
stack_name))
return False
else:
# We don't know anything about this exception. Don't handle
LOG.debug("Unable to get stack details.", exc_info=e)
raise e | Checks if a CloudFormation stack with given name exists
:param stack_name: Name or ID of the stack
:return: True if stack exists. False otherwise |
def update_hosts(self, host_names):
"""Primarily for puppet-unity use.
Update the hosts for the lun if needed.
:param host_names: specify the new hosts which access the LUN.
"""
if self.host_access:
curr_hosts = [access.host.name for access in self.host_access]
else:
curr_hosts = []
if set(curr_hosts) == set(host_names):
log.info('Hosts for updating is equal to current hosts, '
'skip modification.')
return None
new_hosts = [UnityHostList.get(cli=self._cli, name=host_name)[0]
for host_name in host_names]
new_access = [{'host': item,
'accessMask': HostLUNAccessEnum.PRODUCTION}
for item in new_hosts]
resp = self.modify(host_access=new_access)
resp.raise_if_err()
return resp | Primarily for puppet-unity use.
Update the hosts for the lun if needed.
:param host_names: specify the new hosts which access the LUN. |
def getMachine(self, machineName):
"""returns a machine object for a given machine
Input:
machineName - name of the box ex: SERVER.DOMAIN.COM
"""
url = self._url + "/%s" % machineName
return Machine(url=url,
securityHandler=self._securityHandler,
initialize=True,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | returns a machine object for a given machine
Input:
machineName - name of the box ex: SERVER.DOMAIN.COM |
def pairs(args):
"""
See __doc__ for OptionParser.set_pairs().
"""
import jcvi.formats.bed
p = OptionParser(pairs.__doc__)
p.set_pairs()
opts, targs = p.parse_args(args)
if len(targs) != 1:
sys.exit(not p.print_help())
samfile, = targs
bedfile = samfile.rsplit(".", 1)[0] + ".bed"
if need_update(samfile, bedfile):
cmd = "bamToBed -i {0}".format(samfile)
sh(cmd, outfile=bedfile)
args[args.index(samfile)] = bedfile
return jcvi.formats.bed.pairs(args) | See __doc__ for OptionParser.set_pairs(). |
def svg(self, value):
"""
Set SVG field value.
If the svg has embed base64 element we will extract them
to disk in order to avoid duplication of content
"""
if len(value) < 500:
self._svg = value
return
try:
root = ET.fromstring(value)
except ET.ParseError as e:
log.error("Can't parse SVG: {}".format(e))
return
# SVG is the default namespace no need to prefix it
ET.register_namespace('xmlns', "http://www.w3.org/2000/svg")
ET.register_namespace('xmlns:xlink', "http://www.w3.org/1999/xlink")
if len(root.findall("{http://www.w3.org/2000/svg}image")) == 1:
href = "{http://www.w3.org/1999/xlink}href"
elem = root.find("{http://www.w3.org/2000/svg}image")
if elem.get(href, "").startswith("data:image/"):
changed = True
data = elem.get(href, "")
extension = re.sub(r"[^a-z0-9]", "", data.split(";")[0].split("/")[1].lower())
data = base64.decodebytes(data.split(",", 1)[1].encode())
# We compute an hash of the image file to avoid duplication
filename = hashlib.md5(data).hexdigest() + "." + extension
elem.set(href, filename)
file_path = os.path.join(self._project.pictures_directory, filename)
if not os.path.exists(file_path):
with open(file_path, "wb") as f:
f.write(data)
value = filename
# We dump also large svg on disk to keep .gns3 small
if len(value) > 1000:
filename = hashlib.md5(value.encode()).hexdigest() + ".svg"
file_path = os.path.join(self._project.pictures_directory, filename)
if not os.path.exists(file_path):
with open(file_path, "w+", encoding="utf-8") as f:
f.write(value)
self._svg = filename
else:
self._svg = value | Set SVG field value.
If the svg has embed base64 element we will extract them
to disk in order to avoid duplication of content |
def job_file(self):
"""The path to the submit description file representing this job.
"""
job_file_name = '%s.job' % (self.name)
job_file_path = os.path.join(self.initial_dir, job_file_name)
self._job_file = job_file_path
return self._job_file | The path to the submit description file representing this job. |
def add_permission(self, perm):
"""
Soyut Role Permission nesnesi tanımlamayı sağlar.
Args:
perm (object):
"""
self.Permissions(permission=perm)
PermissionCache.flush()
self.save() | Soyut Role Permission nesnesi tanımlamayı sağlar.
Args:
perm (object): |
def start_record():
"""
Install an httplib wrapper that records but does not modify calls.
"""
global record, playback, current
if record:
raise StateError("Already recording.")
if playback:
raise StateError("Currently playing back.")
record = True
current = ReplayData()
install(RecordingHTTPConnection, RecordingHTTPSConnection) | Install an httplib wrapper that records but does not modify calls. |
def zoomedHealpixMap(title, map, lon, lat, radius,
xsize=1000, **kwargs):
"""
Inputs: lon (deg), lat (deg), radius (deg)
"""
reso = 60. * 2. * radius / xsize # Deg to arcmin
hp.gnomview(map=map, rot=[lon, lat, 0], title=title, xsize=xsize, reso=reso, degree=False, **kwargs) | Inputs: lon (deg), lat (deg), radius (deg) |
def _make_sql_params(self,kw):
"""Make a list of strings to pass to an SQL statement
from the dictionary kw with Python types"""
return ['%s=?' %k for k in kw.keys() ]
for k,v in kw.iteritems():
vals.append('%s=?' %k)
return vals | Make a list of strings to pass to an SQL statement
from the dictionary kw with Python types |
def dasopr(fname):
"""
Open a DAS file for reading.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dasopr_c.html
:param fname: Name of a DAS file to be opened.
:type fname: str
:return: Handle assigned to the opened DAS file.
:rtype: int
"""
fname = stypes.stringToCharP(fname)
handle = ctypes.c_int()
libspice.dasopr_c(fname, ctypes.byref(handle))
return handle.value | Open a DAS file for reading.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dasopr_c.html
:param fname: Name of a DAS file to be opened.
:type fname: str
:return: Handle assigned to the opened DAS file.
:rtype: int |
def _execute_cell(args, cell_body):
"""Implements the BigQuery cell magic used to execute BQ queries.
The supported syntax is:
%%bq execute <args>
[<inline SQL>]
Args:
args: the optional arguments following '%%bq execute'.
cell_body: optional contents of the cell
Returns:
QueryResultsTable containing query result
"""
env = google.datalab.utils.commands.notebook_environment()
config = google.datalab.utils.commands.parse_config(cell_body, env, False) or {}
parameters = config.get('parameters') or []
if parameters:
jsonschema.validate({'parameters': parameters}, BigQuerySchema.QUERY_PARAMS_SCHEMA)
table_name = google.datalab.bigquery.Query.resolve_parameters(args['table'], parameters)
query = google.datalab.utils.commands.get_notebook_item(args['query'])
if args['verbose']:
print(query.sql)
query_params = get_query_parameters(args, cell_body)
if args['to_dataframe']:
# re-parse the int arguments because they're passed as strings
start_row = int(args['dataframe_start_row']) if args['dataframe_start_row'] else None
max_rows = int(args['dataframe_max_rows']) if args['dataframe_max_rows'] else None
output_options = QueryOutput.dataframe(start_row=start_row, max_rows=max_rows,
use_cache=not args['nocache'])
else:
output_options = QueryOutput.table(
name=table_name, mode=args['mode'], use_cache=not args['nocache'],
allow_large_results=args['large'])
context = google.datalab.utils._utils._construct_context_for_args(args)
r = query.execute(output_options, context=context, query_params=query_params)
return r.result() | Implements the BigQuery cell magic used to execute BQ queries.
The supported syntax is:
%%bq execute <args>
[<inline SQL>]
Args:
args: the optional arguments following '%%bq execute'.
cell_body: optional contents of the cell
Returns:
QueryResultsTable containing query result |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.