code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def ret(f, *args, **kwargs):
"""Automatically log progress on function entry and exit. Default logging
value: info. The function's return value will be included in the logs.
*Logging with values contained in the parameters of the decorated function*
Message (args[0]) may be a string to be formatted with parameters passed to
the decorated function. Each '{varname}' will be replaced by the value of
the parameter of the same name.
*Keyword parameters*
- log :: integer
- Specifies a custom level of logging to pass to the active logger.
- Default: INFO
*Exceptions:*
- IndexError and ValueError
- will be returned if *args contains a string that does not correspond to
a parameter name of the decorated function, or if there are more '{}'s
than there are *args.
"""
kwargs.update({'print_return': True})
return _stump(f, *args, **kwargs) | Automatically log progress on function entry and exit. Default logging
value: info. The function's return value will be included in the logs.
*Logging with values contained in the parameters of the decorated function*
Message (args[0]) may be a string to be formatted with parameters passed to
the decorated function. Each '{varname}' will be replaced by the value of
the parameter of the same name.
*Keyword parameters*
- log :: integer
- Specifies a custom level of logging to pass to the active logger.
- Default: INFO
*Exceptions:*
- IndexError and ValueError
- will be returned if *args contains a string that does not correspond to
a parameter name of the decorated function, or if there are more '{}'s
than there are *args. |
def get_yaml_parser_roundtrip_for_context():
"""Create a yaml parser that can serialize the pypyr Context.
Create yaml parser with get_yaml_parser_roundtrip, adding Context.
This allows the yaml parser to serialize the pypyr Context.
"""
yaml_writer = get_yaml_parser_roundtrip()
# Context is a dict data structure, so can just use a dict representer
yaml_writer.Representer.add_representer(
Context,
yamler.representer.RoundTripRepresenter.represent_dict)
return yaml_writer | Create a yaml parser that can serialize the pypyr Context.
Create yaml parser with get_yaml_parser_roundtrip, adding Context.
This allows the yaml parser to serialize the pypyr Context. |
def read_from_bpch(filename, file_position, shape, dtype, endian,
use_mmap=False):
""" Read a chunk of data from a bpch output file.
Parameters
----------
filename : str
Path to file on disk containing the data
file_position : int
Position (bytes) where desired data chunk begins
shape : tuple of ints
Resultant (n-dimensional) shape of requested data; the chunk
will be read sequentially from disk and then re-shaped
dtype : dtype
Dtype of data; for best results, pass a dtype which includes
an endian indicator, e.g. `dtype = np.dtype('>f4')`
endian : str
Endianness of data; should be consistent with `dtype`
use_mmap : bool
Memory map the chunk of data to the file on disk, else read
immediately
Returns
-------
Array with shape `shape` and dtype `dtype` containing the requested
chunk of data from `filename`.
"""
offset = file_position + 4
if use_mmap:
d = np.memmap(filename, dtype=dtype, mode='r', shape=shape,
offset=offset, order='F')
else:
with FortranFile(filename, 'rb', endian) as ff:
ff.seek(file_position)
d = np.array(ff.readline('*f'))
d = d.reshape(shape, order='F')
# As a sanity check, *be sure* that the resulting data block has the
# correct shape, and fail early if it doesn't.
if (d.shape != shape):
raise IOError("Data chunk read from {} does not have the right shape,"
" (expected {} but got {})"
.format(filename, shape, d.shape))
return d | Read a chunk of data from a bpch output file.
Parameters
----------
filename : str
Path to file on disk containing the data
file_position : int
Position (bytes) where desired data chunk begins
shape : tuple of ints
Resultant (n-dimensional) shape of requested data; the chunk
will be read sequentially from disk and then re-shaped
dtype : dtype
Dtype of data; for best results, pass a dtype which includes
an endian indicator, e.g. `dtype = np.dtype('>f4')`
endian : str
Endianness of data; should be consistent with `dtype`
use_mmap : bool
Memory map the chunk of data to the file on disk, else read
immediately
Returns
-------
Array with shape `shape` and dtype `dtype` containing the requested
chunk of data from `filename`. |
def consolidate_output(job, config, mutect, pindel, muse):
"""
Combine the contents of separate tarball outputs into one via streaming
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
:param str mutect: MuTect tarball FileStoreID
:param str pindel: Pindel tarball FileStoreID
:param str muse: MuSe tarball FileStoreID
"""
work_dir = job.fileStore.getLocalTempDir()
mutect_tar, pindel_tar, muse_tar = None, None, None
if mutect:
mutect_tar = job.fileStore.readGlobalFile(mutect, os.path.join(work_dir, 'mutect.tar.gz'))
if pindel:
pindel_tar = job.fileStore.readGlobalFile(pindel, os.path.join(work_dir, 'pindel.tar.gz'))
if muse:
muse_tar = job.fileStore.readGlobalFile(muse, os.path.join(work_dir, 'muse.tar.gz'))
out_tar = os.path.join(work_dir, config.uuid + '.tar.gz')
# Consolidate separate tarballs into one as streams (avoids unnecessary untaring)
tar_list = [x for x in [mutect_tar, pindel_tar, muse_tar] if x is not None]
with tarfile.open(os.path.join(work_dir, out_tar), 'w:gz') as f_out:
for tar in tar_list:
with tarfile.open(tar, 'r') as f_in:
for tarinfo in f_in:
with closing(f_in.extractfile(tarinfo)) as f_in_file:
if tar is mutect_tar:
tarinfo.name = os.path.join(config.uuid, 'mutect', os.path.basename(tarinfo.name))
elif tar is pindel_tar:
tarinfo.name = os.path.join(config.uuid, 'pindel', os.path.basename(tarinfo.name))
else:
tarinfo.name = os.path.join(config.uuid, 'muse', os.path.basename(tarinfo.name))
f_out.addfile(tarinfo, fileobj=f_in_file)
# Move to output location
if urlparse(config.output_dir).scheme == 's3':
job.fileStore.logToMaster('Uploading {} to S3: {}'.format(config.uuid, config.output_dir))
s3am_upload(job=job, fpath=out_tar, s3_dir=config.output_dir, num_cores=config.cores)
else:
job.fileStore.logToMaster('Moving {} to output dir: {}'.format(config.uuid, config.output_dir))
mkdir_p(config.output_dir)
copy_files(file_paths=[out_tar], output_dir=config.output_dir) | Combine the contents of separate tarball outputs into one via streaming
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
:param str mutect: MuTect tarball FileStoreID
:param str pindel: Pindel tarball FileStoreID
:param str muse: MuSe tarball FileStoreID |
def display(self, image):
"""
Takes a :py:mod:`PIL.Image` and dumps it to a numbered PNG file.
"""
assert(image.size == self.size)
self._last_image = image
self._count += 1
filename = self._file_template.format(self._count)
image = self.preprocess(image)
surface = self.to_surface(image, alpha=self._contrast)
logger.debug("Writing: {0}".format(filename))
self._pygame.image.save(surface, filename) | Takes a :py:mod:`PIL.Image` and dumps it to a numbered PNG file. |
def crypto_sign_open(signed, pk):
"""
Verifies the signature of the signed message ``signed`` using the public
key ``pk`` and returns the unsigned message.
:param signed: bytes
:param pk: bytes
:rtype: bytes
"""
message = ffi.new("unsigned char[]", len(signed))
message_len = ffi.new("unsigned long long *")
if lib.crypto_sign_open(
message, message_len, signed, len(signed), pk) != 0:
raise exc.BadSignatureError("Signature was forged or corrupt")
return ffi.buffer(message, message_len[0])[:] | Verifies the signature of the signed message ``signed`` using the public
key ``pk`` and returns the unsigned message.
:param signed: bytes
:param pk: bytes
:rtype: bytes |
def _prepare_load_balancers(self):
"""
Prepare load balancer variables
"""
stack = {
A.NAME: self[A.NAME],
A.VERSION: self[A.VERSION],
}
for load_balancer in self.get(R.LOAD_BALANCERS, []):
svars = {A.STACK: stack}
load_balancer[A.loadbalancer.VARS] = svars | Prepare load balancer variables |
def get_best_ip_by_real_data_fetch(_type='stock'):
"""
用特定的数据获取函数测试数据获得的时间,从而选择下载数据最快的服务器ip
默认使用特定品种1min的方式的获取
"""
from QUANTAXIS.QAUtil.QADate import QA_util_today_str
import time
#找到前两天的有效交易日期
pre_trade_date=QA_util_get_real_date(QA_util_today_str())
pre_trade_date=QA_util_get_real_date(pre_trade_date)
# 某个函数获取的耗时测试
def get_stock_data_by_ip(ips):
start=time.time()
try:
QA_fetch_get_stock_transaction('000001',pre_trade_date,pre_trade_date,2,ips['ip'],ips['port'])
end=time.time()
return end-start
except:
return 9999
def get_future_data_by_ip(ips):
start=time.time()
try:
QA_fetch_get_future_transaction('RBL8',pre_trade_date,pre_trade_date,2,ips['ip'],ips['port'])
end=time.time()
return end-start
except:
return 9999
func,ip_list=0,0
if _type=='stock':
func,ip_list=get_stock_data_by_ip,stock_ip_list
else:
func,ip_list=get_future_data_by_ip,future_ip_list
from pathos.multiprocessing import Pool
def multiMap(func,sequence):
res=[]
pool=Pool(4)
for i in sequence:
res.append(pool.apply_async(func,(i,)))
pool.close()
pool.join()
return list(map(lambda x:x.get(),res))
res=multiMap(func,ip_list)
index=res.index(min(res))
return ip_list[index] | 用特定的数据获取函数测试数据获得的时间,从而选择下载数据最快的服务器ip
默认使用特定品种1min的方式的获取 |
def cublasStbmv(handle, uplo, trans, diag, n, k, A, lda, x, incx):
"""
Matrix-vector product for real triangular-banded matrix.
"""
status = _libcublas.cublasStbmv_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
_CUBLAS_DIAG[diag],
n, k, int(A), lda, int(x), incx)
cublasCheckStatus(status) | Matrix-vector product for real triangular-banded matrix. |
def matches_all_rules(self, target_filename):
"""
Returns true if the given file matches all the rules in this ruleset.
:param target_filename:
:return: boolean
"""
for rule in self.match_rules:
if rule.test(target_filename) is False:
return False
self.logger.debug('{0}: {1} - {2}'.format(self.name,
os.path.basename(target_filename),
'Match'))
return True | Returns true if the given file matches all the rules in this ruleset.
:param target_filename:
:return: boolean |
def get_command(self, ctx, cmd_name):
"""Get command for click."""
path = "%s.%s" % (__name__, cmd_name)
path = path.replace("-", "_")
module = importlib.import_module(path)
return getattr(module, 'cli') | Get command for click. |
def get(obj: JsonObj, item: str, default: JsonObjTypes=None) -> JsonObjTypes:
""" Dictionary get routine """
return obj._get(item, default) | Dictionary get routine |
def SURFstar_compute_scores(inst, attr, nan_entries, num_attributes, mcmap, NN_near, NN_far, headers, class_type, X, y, labels_std, data_type):
""" Unique scoring procedure for SURFstar algorithm. Scoring based on nearest neighbors within defined radius, as well as
'anti-scoring' of far instances outside of radius of current target instance"""
scores = np.zeros(num_attributes)
for feature_num in range(num_attributes):
if len(NN_near) > 0:
scores[feature_num] += compute_score(attr, mcmap, NN_near, feature_num, inst,
nan_entries, headers, class_type, X, y, labels_std, data_type)
# Note that we are using the near scoring loop in 'compute_score' and then just subtracting it here, in line with original SURF* paper.
if len(NN_far) > 0:
scores[feature_num] -= compute_score(attr, mcmap, NN_far, feature_num, inst,
nan_entries, headers, class_type, X, y, labels_std, data_type)
return scores | Unique scoring procedure for SURFstar algorithm. Scoring based on nearest neighbors within defined radius, as well as
'anti-scoring' of far instances outside of radius of current target instance |
def init_group(self, group, chunk_size,
compression=None, compression_opts=None):
"""Initializes a HDF5 group compliant with the stored data.
This method creates the datasets 'items', 'labels', 'features'
and 'index' and leaves them empty.
:param h5py.Group group: The group to initializes.
:param float chunk_size: The size of a chunk in the file (in MB).
:param str compression: Optional compression, see
:class:`h5features.writer` for details
:param str compression: Optional compression options, see
:class:`h5features.writer` for details
"""
create_index(group, chunk_size)
self._entries['items'].create_dataset(
group, chunk_size, compression=compression,
compression_opts=compression_opts)
self._entries['features'].create_dataset(
group, chunk_size, compression=compression,
compression_opts=compression_opts)
# chunking the labels depends on features chunks
self._entries['labels'].create_dataset(
group, self._entries['features'].nb_per_chunk,
compression=compression,
compression_opts=compression_opts)
if self.has_properties():
self._entries['properties'].create_dataset(
group, compression=compression,
compression_opts=compression_opts) | Initializes a HDF5 group compliant with the stored data.
This method creates the datasets 'items', 'labels', 'features'
and 'index' and leaves them empty.
:param h5py.Group group: The group to initializes.
:param float chunk_size: The size of a chunk in the file (in MB).
:param str compression: Optional compression, see
:class:`h5features.writer` for details
:param str compression: Optional compression options, see
:class:`h5features.writer` for details |
def pilatus_description_metadata(description):
"""Return metatata from Pilatus image description as dict.
Return metadata from Pilatus pixel array detectors by Dectris, created
by camserver or TVX software.
>>> pilatus_description_metadata('# Pixel_size 172e-6 m x 172e-6 m')
{'Pixel_size': (0.000172, 0.000172)}
"""
result = {}
if not description.startswith('# '):
return result
for c in '#:=,()':
description = description.replace(c, ' ')
for line in description.split('\n'):
if line[:2] != ' ':
continue
line = line.split()
name = line[0]
if line[0] not in TIFF.PILATUS_HEADER:
try:
result['DateTime'] = datetime.datetime.strptime(
' '.join(line), '%Y-%m-%dT%H %M %S.%f')
except Exception:
result[name] = ' '.join(line[1:])
continue
indices, dtype = TIFF.PILATUS_HEADER[line[0]]
if isinstance(indices[0], slice):
# assumes one slice
values = line[indices[0]]
else:
values = [line[i] for i in indices]
if dtype is float and values[0] == 'not':
values = ['NaN']
values = tuple(dtype(v) for v in values)
if dtype == str:
values = ' '.join(values)
elif len(values) == 1:
values = values[0]
result[name] = values
return result | Return metatata from Pilatus image description as dict.
Return metadata from Pilatus pixel array detectors by Dectris, created
by camserver or TVX software.
>>> pilatus_description_metadata('# Pixel_size 172e-6 m x 172e-6 m')
{'Pixel_size': (0.000172, 0.000172)} |
def _get_public_room(self, room_name, invitees: List[User]):
""" Obtain a public, canonically named (if possible) room and invite peers """
room_name_full = f'#{room_name}:{self._server_name}'
invitees_uids = [user.user_id for user in invitees]
for _ in range(JOIN_RETRIES):
# try joining room
try:
room = self._client.join_room(room_name_full)
except MatrixRequestError as error:
if error.code == 404:
self.log.debug(
f'No room for peer, trying to create',
room_name=room_name_full,
error=error,
)
else:
self.log.debug(
f'Error joining room',
room_name=room_name,
error=error.content,
error_code=error.code,
)
else:
# Invite users to existing room
member_ids = {user.user_id for user in room.get_joined_members(force_resync=True)}
users_to_invite = set(invitees_uids) - member_ids
self.log.debug('Inviting users', room=room, invitee_ids=users_to_invite)
for invitee_id in users_to_invite:
room.invite_user(invitee_id)
self.log.debug('Room joined successfully', room=room)
break
# if can't, try creating it
try:
room = self._client.create_room(
room_name,
invitees=invitees_uids,
is_public=True,
)
except MatrixRequestError as error:
if error.code == 409:
msg = (
'Error creating room, '
'seems to have been created by peer meanwhile, retrying.'
)
else:
msg = 'Error creating room, retrying.'
self.log.debug(
msg,
room_name=room_name,
error=error.content,
error_code=error.code,
)
else:
self.log.debug('Room created successfully', room=room, invitees=invitees)
break
else:
# if can't join nor create, create an unnamed one
room = self._client.create_room(
None,
invitees=invitees_uids,
is_public=True,
)
self.log.warning(
'Could not create nor join a named room. Successfuly created an unnamed one',
room=room,
invitees=invitees,
)
return room | Obtain a public, canonically named (if possible) room and invite peers |
def import_module(module_fqname, superclasses=None):
"""Imports the module module_fqname and returns a list of defined classes
from that module. If superclasses is defined then the classes returned will
be subclasses of the specified superclass or superclasses. If superclasses
is plural it must be a tuple of classes."""
module_name = module_fqname.rpartition(".")[-1]
module = __import__(module_fqname, globals(), locals(), [module_name])
modules = [class_ for cname, class_ in
inspect.getmembers(module, inspect.isclass)
if class_.__module__ == module_fqname]
if superclasses:
modules = [m for m in modules if issubclass(m, superclasses)]
return modules | Imports the module module_fqname and returns a list of defined classes
from that module. If superclasses is defined then the classes returned will
be subclasses of the specified superclass or superclasses. If superclasses
is plural it must be a tuple of classes. |
def queryset(self, request, queryset):
form = self.get_form(request)
"""
That's the trick - we create self.form when django tries to get our queryset.
This allows to create unbount and bound form in the single place.
"""
self.form = form
start_date = form.start_date()
end_date = form.end_date()
if form.is_valid() and (start_date or end_date):
args = self.__get_filterargs(
start=start_date,
end=end_date,
)
return queryset.filter(**args) | That's the trick - we create self.form when django tries to get our queryset.
This allows to create unbount and bound form in the single place. |
def crop_to_extents(img1, img2, padding):
"""Crop the images to ensure both fit within the bounding box"""
beg_coords1, end_coords1 = crop_coords(img1, padding)
beg_coords2, end_coords2 = crop_coords(img2, padding)
beg_coords = np.fmin(beg_coords1, beg_coords2)
end_coords = np.fmax(end_coords1, end_coords2)
img1 = crop_3dimage(img1, beg_coords, end_coords)
img2 = crop_3dimage(img2, beg_coords, end_coords)
return img1, img2 | Crop the images to ensure both fit within the bounding box |
def sign_url_path(url, secret_key, expire_in=None, digest=None):
# type: (str, bytes, int, Callable) -> str
"""
Sign a URL (excluding the domain and scheme).
:param url: URL to sign
:param secret_key: Secret key
:param expire_in: Expiry time.
:param digest: Specify the digest function to use; default is sha256 from hashlib
:return: Signed URL
"""
result = urlparse(url)
query_args = MultiValueDict(parse_qs(result.query))
query_args['_'] = token()
if expire_in is not None:
query_args['expires'] = int(time() + expire_in)
query_args['signature'] = _generate_signature(result.path, secret_key, query_args, digest)
return "%s?%s" % (result.path, urlencode(list(query_args.sorteditems(True)))) | Sign a URL (excluding the domain and scheme).
:param url: URL to sign
:param secret_key: Secret key
:param expire_in: Expiry time.
:param digest: Specify the digest function to use; default is sha256 from hashlib
:return: Signed URL |
def prepare_actions(self, obs):
"""Keep a list of the past actions so they can be drawn."""
now = time.time()
while self._past_actions and self._past_actions[0].deadline < now:
self._past_actions.pop(0)
def add_act(ability_id, color, pos, timeout=1):
if ability_id:
ability = self._static_data.abilities[ability_id]
if ability.remaps_to_ability_id: # Prefer general abilities.
ability_id = ability.remaps_to_ability_id
self._past_actions.append(
PastAction(ability_id, color, pos, now, now + timeout))
for act in obs.actions:
if (act.HasField("action_raw") and
act.action_raw.HasField("unit_command") and
act.action_raw.unit_command.HasField("target_world_space_pos")):
pos = point.Point.build(
act.action_raw.unit_command.target_world_space_pos)
add_act(act.action_raw.unit_command.ability_id, colors.yellow, pos)
if act.HasField("action_feature_layer"):
act_fl = act.action_feature_layer
if act_fl.HasField("unit_command"):
if act_fl.unit_command.HasField("target_screen_coord"):
pos = self._world_to_feature_screen_px.back_pt(
point.Point.build(act_fl.unit_command.target_screen_coord))
add_act(act_fl.unit_command.ability_id, colors.cyan, pos)
elif act_fl.unit_command.HasField("target_minimap_coord"):
pos = self._world_to_feature_minimap_px.back_pt(
point.Point.build(act_fl.unit_command.target_minimap_coord))
add_act(act_fl.unit_command.ability_id, colors.cyan, pos)
else:
add_act(act_fl.unit_command.ability_id, None, None)
if (act_fl.HasField("unit_selection_point") and
act_fl.unit_selection_point.HasField("selection_screen_coord")):
pos = self._world_to_feature_screen_px.back_pt(point.Point.build(
act_fl.unit_selection_point.selection_screen_coord))
add_act(None, colors.cyan, pos)
if act_fl.HasField("unit_selection_rect"):
for r in act_fl.unit_selection_rect.selection_screen_coord:
rect = point.Rect(
self._world_to_feature_screen_px.back_pt(
point.Point.build(r.p0)),
self._world_to_feature_screen_px.back_pt(
point.Point.build(r.p1)))
add_act(None, colors.cyan, rect, 0.3)
if act.HasField("action_render"):
act_rgb = act.action_render
if act_rgb.HasField("unit_command"):
if act_rgb.unit_command.HasField("target_screen_coord"):
pos = self._world_to_rgb_screen_px.back_pt(
point.Point.build(act_rgb.unit_command.target_screen_coord))
add_act(act_rgb.unit_command.ability_id, colors.red, pos)
elif act_rgb.unit_command.HasField("target_minimap_coord"):
pos = self._world_to_rgb_minimap_px.back_pt(
point.Point.build(act_rgb.unit_command.target_minimap_coord))
add_act(act_rgb.unit_command.ability_id, colors.red, pos)
else:
add_act(act_rgb.unit_command.ability_id, None, None)
if (act_rgb.HasField("unit_selection_point") and
act_rgb.unit_selection_point.HasField("selection_screen_coord")):
pos = self._world_to_rgb_screen_px.back_pt(point.Point.build(
act_rgb.unit_selection_point.selection_screen_coord))
add_act(None, colors.red, pos)
if act_rgb.HasField("unit_selection_rect"):
for r in act_rgb.unit_selection_rect.selection_screen_coord:
rect = point.Rect(
self._world_to_rgb_screen_px.back_pt(
point.Point.build(r.p0)),
self._world_to_rgb_screen_px.back_pt(
point.Point.build(r.p1)))
add_act(None, colors.red, rect, 0.3) | Keep a list of the past actions so they can be drawn. |
def _read_n_samples(channel_file):
"""Calculate the number of samples based on the file size
Parameters
----------
channel_file : Path
path to single filename with the header
Returns
-------
int
number of blocks (i.e. records, in which the data is cut)
int
number of samples
"""
n_blocks = int((channel_file.stat().st_size - HDR_LENGTH) / BLK_SIZE)
n_samples = n_blocks * BLK_LENGTH
return n_blocks, n_samples | Calculate the number of samples based on the file size
Parameters
----------
channel_file : Path
path to single filename with the header
Returns
-------
int
number of blocks (i.e. records, in which the data is cut)
int
number of samples |
def pattern_to_str(pattern):
"""Convert regex pattern to string.
If pattern is string it returns itself,
if pattern is SRE_Pattern then return pattern attribute
:param pattern: pattern object or string
:return: str: pattern sttring
"""
if isinstance(pattern, str):
return repr(pattern)
else:
return repr(pattern.pattern) if pattern else None | Convert regex pattern to string.
If pattern is string it returns itself,
if pattern is SRE_Pattern then return pattern attribute
:param pattern: pattern object or string
:return: str: pattern sttring |
def dense_to_deeper_block(dense_layer, weighted=True):
'''deeper dense layer.
'''
units = dense_layer.units
weight = np.eye(units)
bias = np.zeros(units)
new_dense_layer = StubDense(units, units)
if weighted:
new_dense_layer.set_weights(
(add_noise(weight, np.array([0, 1])), add_noise(bias, np.array([0, 1])))
)
return [StubReLU(), new_dense_layer] | deeper dense layer. |
def phone_numbers(self):
"""
:rtype: twilio.rest.lookups.v1.phone_number.PhoneNumberList
"""
if self._phone_numbers is None:
self._phone_numbers = PhoneNumberList(self)
return self._phone_numbers | :rtype: twilio.rest.lookups.v1.phone_number.PhoneNumberList |
def format(self):
"""Handles the actual behaviour involved with formatting.
To change the behaviour, this method should be overridden.
Returns
--------
list
A paginated output of the help command.
"""
values = {}
title = "Description"
description = self.command.description + "\n\n" + self.get_ending_note() if not self.is_cog() else inspect.getdoc(self.command)
sections = []
if isinstance(self.command, Command):
description = self.command.short_doc
sections = [{"name": "Usage", "value": self.get_command_signature()},
{"name": "More Info", "value": self.command.help.replace(self.command.short_doc, "").format(prefix=self.clean_prefix),
"inline": False}]
def category(tup):
cog = tup[1].cog_name
return cog + ':' if cog is not None else '\u200bNo Category:'
if self.is_bot():
title = self.bot.user.display_name + " Help"
data = sorted(self.filter_command_list(), key=category)
for category, commands in itertools.groupby(data, key=category):
section = {}
commands = list(commands)
if len(commands) > 0:
section['name'] = category
section['value'] = self.add_commands(commands)
section['inline'] = False
sections.append(section)
elif not sections or self.has_subcommands():
section = {"name": "Commands:", "inline": False, "value": self.add_commands(self.filter_command_list())}
sections.append(section)
values['title'] = title
values['description'] = description
values['sections'] = sections
return values | Handles the actual behaviour involved with formatting.
To change the behaviour, this method should be overridden.
Returns
--------
list
A paginated output of the help command. |
def crypto_aead_chacha20poly1305_ietf_encrypt(message, aad, nonce, key):
"""
Encrypt the given ``message`` using the IETF ratified chacha20poly1305
construction described in RFC7539.
:param message:
:type message: bytes
:param aad:
:type aad: bytes
:param nonce:
:type nonce: bytes
:param key:
:type key: bytes
:return: authenticated ciphertext
:rtype: bytes
"""
ensure(isinstance(message, bytes), 'Input message type must be bytes',
raising=exc.TypeError)
mlen = len(message)
ensure(mlen <= crypto_aead_chacha20poly1305_ietf_MESSAGEBYTES_MAX,
'Message must be at most {0} bytes long'.format(
crypto_aead_chacha20poly1305_ietf_MESSAGEBYTES_MAX),
raising=exc.ValueError)
ensure(isinstance(aad, bytes) or (aad is None),
'Additional data must be bytes or None',
raising=exc.TypeError)
ensure(isinstance(nonce, bytes) and
len(nonce) == crypto_aead_chacha20poly1305_ietf_NPUBBYTES,
'Nonce must be a {0} bytes long bytes sequence'.format(
crypto_aead_chacha20poly1305_ietf_NPUBBYTES),
raising=exc.TypeError)
ensure(isinstance(key, bytes) and
len(key) == crypto_aead_chacha20poly1305_ietf_KEYBYTES,
'Key must be a {0} bytes long bytes sequence'.format(
crypto_aead_chacha20poly1305_ietf_KEYBYTES),
raising=exc.TypeError)
if aad:
_aad = aad
aalen = len(aad)
else:
_aad = ffi.NULL
aalen = 0
mxout = mlen + crypto_aead_chacha20poly1305_ietf_ABYTES
clen = ffi.new("unsigned long long *")
ciphertext = ffi.new("unsigned char[]", mxout)
res = lib.crypto_aead_chacha20poly1305_ietf_encrypt(ciphertext,
clen,
message,
mlen,
_aad,
aalen,
ffi.NULL,
nonce,
key)
ensure(res == 0, "Encryption failed.", raising=exc.CryptoError)
return ffi.buffer(ciphertext, clen[0])[:] | Encrypt the given ``message`` using the IETF ratified chacha20poly1305
construction described in RFC7539.
:param message:
:type message: bytes
:param aad:
:type aad: bytes
:param nonce:
:type nonce: bytes
:param key:
:type key: bytes
:return: authenticated ciphertext
:rtype: bytes |
def _build_tree(self):
"""
Build a full or a partial tree, depending on the groups/sub-groups specified.
"""
groups = self._groups or self.get_children_paths(self.root_path)
for group in groups:
node = Node(name=group, parent=self.root)
self.root.children.append(node)
self._init_sub_groups(node) | Build a full or a partial tree, depending on the groups/sub-groups specified. |
def execute_notebook(npth, dpth, timeout=1200, kernel='python3'):
"""
Execute the notebook at `npth` using `dpth` as the execution
directory. The execution timeout and kernel are `timeout` and
`kernel` respectively.
"""
ep = ExecutePreprocessor(timeout=timeout, kernel_name=kernel)
nb = nbformat.read(npth, as_version=4)
t0 = timer()
ep.preprocess(nb, {'metadata': {'path': dpth}})
t1 = timer()
with open(npth, 'wt') as f:
nbformat.write(nb, f)
return t1 - t0 | Execute the notebook at `npth` using `dpth` as the execution
directory. The execution timeout and kernel are `timeout` and
`kernel` respectively. |
def clear(self):
'Clear tracks in memory - all zero'
for track in self._tracks:
self._tracks[track].setall(False) | Clear tracks in memory - all zero |
def _finish_futures(self, responses):
"""Apply all the batch responses to the futures created.
:type responses: list of (headers, payload) tuples.
:param responses: List of headers and payloads from each response in
the batch.
:raises: :class:`ValueError` if no requests have been deferred.
"""
# If a bad status occurs, we track it, but don't raise an exception
# until all futures have been populated.
exception_args = None
if len(self._target_objects) != len(responses):
raise ValueError("Expected a response for every request.")
for target_object, subresponse in zip(self._target_objects, responses):
if not 200 <= subresponse.status_code < 300:
exception_args = exception_args or subresponse
elif target_object is not None:
try:
target_object._properties = subresponse.json()
except ValueError:
target_object._properties = subresponse.content
if exception_args is not None:
raise exceptions.from_http_response(exception_args) | Apply all the batch responses to the futures created.
:type responses: list of (headers, payload) tuples.
:param responses: List of headers and payloads from each response in
the batch.
:raises: :class:`ValueError` if no requests have been deferred. |
def record(self, pipeline_name, from_study):
"""
Returns the provenance record for a given pipeline
Parameters
----------
pipeline_name : str
The name of the pipeline that generated the record
from_study : str
The name of the study that the pipeline was generated from
Returns
-------
record : arcana.provenance.Record
The provenance record generated by the specified pipeline
"""
try:
return self._records[(pipeline_name, from_study)]
except KeyError:
found = []
for sname, pnames in groupby(sorted(self._records,
key=itemgetter(1)),
key=itemgetter(1)):
found.append(
"'{}' for '{}'".format("', '".join(p for p, _ in pnames),
sname))
raise ArcanaNameError(
(pipeline_name, from_study),
("{} doesn't have a provenance record for pipeline '{}' "
"for '{}' study (found {})".format(
self, pipeline_name, from_study,
'; '.join(found)))) | Returns the provenance record for a given pipeline
Parameters
----------
pipeline_name : str
The name of the pipeline that generated the record
from_study : str
The name of the study that the pipeline was generated from
Returns
-------
record : arcana.provenance.Record
The provenance record generated by the specified pipeline |
def rpm(self, vol_per_rev):
"""Return the pump speed required for the reactor's stock of material
given the volume of fluid output per revolution by the stock's pump.
:param vol_per_rev: Volume of fluid pumped per revolution (dependent on pump and tubing)
:type vol_per_rev: float
:return: Pump speed for the material stock, in revolutions per minute
:rtype: float
"""
return Stock.rpm(self, vol_per_rev, self.Q_stock()).to(u.rev/u.min) | Return the pump speed required for the reactor's stock of material
given the volume of fluid output per revolution by the stock's pump.
:param vol_per_rev: Volume of fluid pumped per revolution (dependent on pump and tubing)
:type vol_per_rev: float
:return: Pump speed for the material stock, in revolutions per minute
:rtype: float |
def parse(yaml, validate=True):
"""
Parse the given YAML data into a `Config` object, optionally validating it first.
:param yaml: YAML data (either a string, a stream, or pre-parsed Python dict/list)
:type yaml: list|dict|str|file
:param validate: Whether to validate the data before attempting to parse it.
:type validate: bool
:return: Config object
:rtype: valohai_yaml.objs.Config
"""
data = read_yaml(yaml)
if validate: # pragma: no branch
from .validation import validate
validate(data, raise_exc=True)
return Config.parse(data) | Parse the given YAML data into a `Config` object, optionally validating it first.
:param yaml: YAML data (either a string, a stream, or pre-parsed Python dict/list)
:type yaml: list|dict|str|file
:param validate: Whether to validate the data before attempting to parse it.
:type validate: bool
:return: Config object
:rtype: valohai_yaml.objs.Config |
def build_ellipse_model(shape, isolist, fill=0., high_harmonics=False):
"""
Build an elliptical model galaxy image from a list of isophotes.
For each ellipse in the input isophote list the algorithm fills the
output image array with the corresponding isophotal intensity.
Pixels in the output array are in general only partially covered by
the isophote "pixel". The algorithm takes care of this partial
pixel coverage by keeping track of how much intensity was added to
each pixel by storing the partial area information in an auxiliary
array. The information in this array is then used to normalize the
pixel intensities.
Parameters
----------
shape : 2-tuple
The (ny, nx) shape of the array used to generate the input
``isolist``.
isolist : `~photutils.isophote.IsophoteList` instance
The isophote list created by the `~photutils.isophote.Ellipse`
class.
fill : float, optional
The constant value to fill empty pixels. If an output pixel has
no contribution from any isophote, it will be assigned this
value. The default is 0.
high_harmonics : bool, optional
Whether to add the higher-order harmonics (i.e. ``a3``, ``b3``,
``a4``, and ``b4``; see `~photutils.isophote.Isophote` for
details) to the result.
Returns
-------
result : 2D `~numpy.ndarray`
The image with the model galaxy.
"""
from scipy.interpolate import LSQUnivariateSpline
# the target grid is spaced in 0.1 pixel intervals so as
# to ensure no gaps will result on the output array.
finely_spaced_sma = np.arange(isolist[0].sma, isolist[-1].sma, 0.1)
# interpolate ellipse parameters
# End points must be discarded, but how many?
# This seems to work so far
nodes = isolist.sma[2:-2]
intens_array = LSQUnivariateSpline(
isolist.sma, isolist.intens, nodes)(finely_spaced_sma)
eps_array = LSQUnivariateSpline(
isolist.sma, isolist.eps, nodes)(finely_spaced_sma)
pa_array = LSQUnivariateSpline(
isolist.sma, isolist.pa, nodes)(finely_spaced_sma)
x0_array = LSQUnivariateSpline(
isolist.sma, isolist.x0, nodes)(finely_spaced_sma)
y0_array = LSQUnivariateSpline(
isolist.sma, isolist.y0, nodes)(finely_spaced_sma)
grad_array = LSQUnivariateSpline(
isolist.sma, isolist.grad, nodes)(finely_spaced_sma)
a3_array = LSQUnivariateSpline(
isolist.sma, isolist.a3, nodes)(finely_spaced_sma)
b3_array = LSQUnivariateSpline(
isolist.sma, isolist.b3, nodes)(finely_spaced_sma)
a4_array = LSQUnivariateSpline(
isolist.sma, isolist.a4, nodes)(finely_spaced_sma)
b4_array = LSQUnivariateSpline(
isolist.sma, isolist.b4, nodes)(finely_spaced_sma)
# Return deviations from ellipticity to their original amplitude meaning
a3_array = -a3_array * grad_array * finely_spaced_sma
b3_array = -b3_array * grad_array * finely_spaced_sma
a4_array = -a4_array * grad_array * finely_spaced_sma
b4_array = -b4_array * grad_array * finely_spaced_sma
# correct deviations cased by fluctuations in spline solution
eps_array[np.where(eps_array < 0.)] = 0.
result = np.zeros(shape=shape)
weight = np.zeros(shape=shape)
eps_array[np.where(eps_array < 0.)] = 0.05
# for each interpolated isophote, generate intensity values on the
# output image array
# for index in range(len(finely_spaced_sma)):
for index in range(1, len(finely_spaced_sma)):
sma0 = finely_spaced_sma[index]
eps = eps_array[index]
pa = pa_array[index]
x0 = x0_array[index]
y0 = y0_array[index]
geometry = EllipseGeometry(x0, y0, sma0, eps, pa)
intens = intens_array[index]
# scan angles. Need to go a bit beyond full circle to ensure
# full coverage.
r = sma0
phi = 0.
while (phi <= 2*np.pi + geometry._phi_min):
# we might want to add the third and fourth harmonics
# to the basic isophotal intensity.
harm = 0.
if high_harmonics:
harm = (a3_array[index] * np.sin(3.*phi) +
b3_array[index] * np.cos(3.*phi) +
a4_array[index] * np.sin(4.*phi) +
b4_array[index] * np.cos(4.*phi) / 4.)
# get image coordinates of (r, phi) pixel
x = r * np.cos(phi + pa) + x0
y = r * np.sin(phi + pa) + y0
i = int(x)
j = int(y)
# if outside image boundaries, ignore.
if (i > 0 and i < shape[1] - 1 and j > 0 and j < shape[0] - 1):
# get fractional deviations relative to target array
fx = x - float(i)
fy = y - float(j)
# add up the isophote contribution to the overlapping pixels
result[j, i] += (intens + harm) * (1. - fy) * (1. - fx)
result[j, i + 1] += (intens + harm) * (1. - fy) * fx
result[j + 1, i] += (intens + harm) * fy * (1. - fx)
result[j + 1, i + 1] += (intens + harm) * fy * fx
# add up the fractional area contribution to the
# overlapping pixels
weight[j, i] += (1. - fy) * (1. - fx)
weight[j, i + 1] += (1. - fy) * fx
weight[j + 1, i] += fy * (1. - fx)
weight[j + 1, i + 1] += fy * fx
# step towards next pixel on ellipse
phi = max((phi + 0.75 / r), geometry._phi_min)
r = geometry.radius(phi)
# zero weight values must be set to 1.
weight[np.where(weight <= 0.)] = 1.
# normalize
result /= weight
# fill value
result[np.where(result == 0.)] = fill
return result | Build an elliptical model galaxy image from a list of isophotes.
For each ellipse in the input isophote list the algorithm fills the
output image array with the corresponding isophotal intensity.
Pixels in the output array are in general only partially covered by
the isophote "pixel". The algorithm takes care of this partial
pixel coverage by keeping track of how much intensity was added to
each pixel by storing the partial area information in an auxiliary
array. The information in this array is then used to normalize the
pixel intensities.
Parameters
----------
shape : 2-tuple
The (ny, nx) shape of the array used to generate the input
``isolist``.
isolist : `~photutils.isophote.IsophoteList` instance
The isophote list created by the `~photutils.isophote.Ellipse`
class.
fill : float, optional
The constant value to fill empty pixels. If an output pixel has
no contribution from any isophote, it will be assigned this
value. The default is 0.
high_harmonics : bool, optional
Whether to add the higher-order harmonics (i.e. ``a3``, ``b3``,
``a4``, and ``b4``; see `~photutils.isophote.Isophote` for
details) to the result.
Returns
-------
result : 2D `~numpy.ndarray`
The image with the model galaxy. |
def run_strelka(job, tumor_bam, normal_bam, univ_options, strelka_options, split=True):
"""
Run the strelka subgraph on the DNA bams. Optionally split the results into per-chromosome
vcfs.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict strelka_options: Options specific to strelka
:param bool split: Should the results be split into perchrom vcfs?
:return: Either the fsID to the genome-level vcf or a dict of results from running strelka
on every chromosome
perchrom_strelka:
|- 'chr1':
| |-'snvs': fsID
| +-'indels': fsID
|- 'chr2':
| |-'snvs': fsID
| +-'indels': fsID
|-...
|
+- 'chrM':
|-'snvs': fsID
+-'indels': fsID
:rtype: toil.fileStore.FileID|dict
"""
if strelka_options['chromosomes']:
chromosomes = strelka_options['chromosomes']
else:
chromosomes = sample_chromosomes(job, strelka_options['genome_fai'])
num_cores = min(len(chromosomes), univ_options['max_cores'])
strelka = job.wrapJobFn(run_strelka_full, tumor_bam, normal_bam, univ_options,
strelka_options,
disk=PromisedRequirement(strelka_disk,
tumor_bam['tumor_dna_fix_pg_sorted.bam'],
normal_bam['normal_dna_fix_pg_sorted.bam'],
strelka_options['genome_fasta']),
memory='6G',
cores=num_cores)
job.addChild(strelka)
if split:
unmerge_strelka = job.wrapJobFn(wrap_unmerge, strelka.rv(), chromosomes, strelka_options,
univ_options).encapsulate()
strelka.addChild(unmerge_strelka)
return unmerge_strelka.rv()
else:
return strelka.rv() | Run the strelka subgraph on the DNA bams. Optionally split the results into per-chromosome
vcfs.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict strelka_options: Options specific to strelka
:param bool split: Should the results be split into perchrom vcfs?
:return: Either the fsID to the genome-level vcf or a dict of results from running strelka
on every chromosome
perchrom_strelka:
|- 'chr1':
| |-'snvs': fsID
| +-'indels': fsID
|- 'chr2':
| |-'snvs': fsID
| +-'indels': fsID
|-...
|
+- 'chrM':
|-'snvs': fsID
+-'indels': fsID
:rtype: toil.fileStore.FileID|dict |
def type_stmt(self, stmt, p_elem, pset):
"""Handle ``type`` statement.
Built-in types are handled by one of the specific type
callback methods defined below.
"""
typedef = stmt.i_typedef
if typedef and not stmt.i_is_derived: # just ref
uname, dic = self.unique_def_name(typedef)
if uname not in dic:
self.install_def(uname, typedef, dic)
SchemaNode("ref", p_elem).set_attr("name", uname)
defst = typedef.search_one("default")
if defst:
dic[uname].default = defst.arg
occur = 1
else:
occur = dic[uname].occur
if occur > 0: self.propagate_occur(p_elem, occur)
return
chain = [stmt]
tdefault = None
while typedef:
type_ = typedef.search_one("type")
chain.insert(0, type_)
if tdefault is None:
tdef = typedef.search_one("default")
if tdef:
tdefault = tdef.arg
typedef = type_.i_typedef
if tdefault and p_elem.occur == 0:
p_elem.default = tdefault
self.propagate_occur(p_elem, 1)
self.type_handler[chain[0].arg](chain, p_elem) | Handle ``type`` statement.
Built-in types are handled by one of the specific type
callback methods defined below. |
def daily_from_hourly(df):
"""Aggregates data (hourly to daily values) according to the characteristics
of each variable (e.g., average for temperature, sum for precipitation)
Args:
df: dataframe including time series with one hour time steps
Returns:
dataframe (daily)
"""
df_daily = pd.DataFrame()
if 'temp' in df:
df_daily['temp'] = df.temp.resample('D').mean()
df_daily['tmin'] = df.temp.groupby(df.temp.index.date).min()
df_daily['tmax'] = df.temp.groupby(df.temp.index.date).max()
if 'precip' in df:
df_daily['precip'] = df.precip.resample('D').sum()
if 'glob' in df:
df_daily['glob'] = df.glob.resample('D').mean()
if 'hum' in df:
df_daily['hum'] = df.hum.resample('D').mean()
if 'hum' in df:
df_daily['hum_min'] = df.hum.groupby(df.hum.index.date).min()
if 'hum' in df:
df_daily['hum_max'] = df.hum.groupby(df.hum.index.date).max()
if 'wind' in df:
df_daily['wind'] = df.wind.resample('D').mean()
if 'ssd' in df:
df_daily['ssd'] = df.ssd.resample('D').sum() / 60 # minutes to hours
df_daily.index.name = None
return df_daily | Aggregates data (hourly to daily values) according to the characteristics
of each variable (e.g., average for temperature, sum for precipitation)
Args:
df: dataframe including time series with one hour time steps
Returns:
dataframe (daily) |
def _create_create_tracking_event(instance):
"""
Create a TrackingEvent and TrackedFieldModification for a CREATE event.
"""
event = _create_event(instance, CREATE)
for field in instance._tracked_fields:
if not isinstance(instance._meta.get_field(field), ManyToManyField):
_create_tracked_field(event, instance, field) | Create a TrackingEvent and TrackedFieldModification for a CREATE event. |
def token_generator(self, texts, **kwargs):
"""Yields tokens from texts as `(text_idx, character)`
"""
for text_idx, text in enumerate(texts):
if self.lower:
text = text.lower()
for char in text:
yield text_idx, char | Yields tokens from texts as `(text_idx, character)` |
def stringize(
self,
rnf_profile,
):
"""Create RNF representation of this segment.
Args:
rnf_profile (rnftools.rnfformat.RnfProfile): RNF profile (with widths).
"""
coor_width = max(rnf_profile.coor_width, len(str(self.left)), len(str(self.right)))
return "({},{},{},{},{})".format(
str(self.genome_id).zfill(rnf_profile.genome_id_width),
str(self.chr_id).zfill(rnf_profile.chr_id_width), self.direction,
str(self.left).zfill(coor_width),
str(self.right).zfill(coor_width)
) | Create RNF representation of this segment.
Args:
rnf_profile (rnftools.rnfformat.RnfProfile): RNF profile (with widths). |
def _initial_guess(self, countsmat):
"""Generate an initial guess for \theta.
"""
if self.theta_ is not None:
return self.theta_
if self.guess == 'log':
transmat, pi = _transmat_mle_prinz(countsmat)
K = np.real(scipy.linalg.logm(transmat)) / self.lag_time
elif self.guess == 'pseudo':
transmat, pi = _transmat_mle_prinz(countsmat)
K = (transmat - np.eye(self.n_states_)) / self.lag_time
elif isinstance(self.guess, np.ndarray):
pi = _solve_ratemat_eigensystem(self.guess)[1][:, 0]
K = self.guess
S = np.multiply(np.sqrt(np.outer(pi, 1/pi)), K)
sflat = np.maximum(S[np.triu_indices_from(countsmat, k=1)], 0)
theta0 = np.concatenate((sflat, np.log(pi)))
return theta0 | Generate an initial guess for \theta. |
def get_stp_mst_detail_output_msti_port_transmitted_stp_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
msti = ET.SubElement(output, "msti")
instance_id_key = ET.SubElement(msti, "instance-id")
instance_id_key.text = kwargs.pop('instance_id')
port = ET.SubElement(msti, "port")
transmitted_stp_type = ET.SubElement(port, "transmitted-stp-type")
transmitted_stp_type.text = kwargs.pop('transmitted_stp_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def number_peaks(self, x, n=None):
"""
As in tsfresh `number_peaks <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/feature_extraction/\
feature_calculators.py#L1003>`_
Calculates the number of peaks of at least support n in the time series x. A peak of support n is defined \
as a subsequence of x where a value occurs, which is bigger than its n neighbours to the left and to the right.
Hence in the sequence
>>> x = [3, 0, 0, 4, 0, 0, 13]
4 is a peak of support 1 and 2 because in the subsequences
>>> [0, 4, 0]
>>> [0, 0, 4, 0, 0]
4 is still the highest value. Here, 4 is not a peak of support 3 because 13 is the 3th neighbour to the \
right of 4 and its bigger than 4.
:param x: the time series to calculate the feature of
:type x: pandas.Series
:param n: the support of the peak
:type n: int
:return: the value of this feature
:rtype: float
"""
if n is None:
n = 5
peaks = feature_calculators.number_peaks(x, n)
logging.debug("agg linear trend by tsfresh calculated")
return peaks | As in tsfresh `number_peaks <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/feature_extraction/\
feature_calculators.py#L1003>`_
Calculates the number of peaks of at least support n in the time series x. A peak of support n is defined \
as a subsequence of x where a value occurs, which is bigger than its n neighbours to the left and to the right.
Hence in the sequence
>>> x = [3, 0, 0, 4, 0, 0, 13]
4 is a peak of support 1 and 2 because in the subsequences
>>> [0, 4, 0]
>>> [0, 0, 4, 0, 0]
4 is still the highest value. Here, 4 is not a peak of support 3 because 13 is the 3th neighbour to the \
right of 4 and its bigger than 4.
:param x: the time series to calculate the feature of
:type x: pandas.Series
:param n: the support of the peak
:type n: int
:return: the value of this feature
:rtype: float |
def file(cls, path, encoding=None, parser=None):
"""Set a file as a source.
File are parsed as literal python dicts by default, this behaviour
can be configured.
Args:
path: The path to the file to be parsed
encoding: The encoding of the file.
Defaults to 'raw'. Available built-in values: 'ini', 'json', 'yaml'.
Custom value can be used in conjunction with parser.
parser: A parser function for a custom encoder.
It is expected to return a dict containing the parsed values
when called with the contents of the file as an argument.
"""
cls.__hierarchy.append(file.File(path, encoding, parser)) | Set a file as a source.
File are parsed as literal python dicts by default, this behaviour
can be configured.
Args:
path: The path to the file to be parsed
encoding: The encoding of the file.
Defaults to 'raw'. Available built-in values: 'ini', 'json', 'yaml'.
Custom value can be used in conjunction with parser.
parser: A parser function for a custom encoder.
It is expected to return a dict containing the parsed values
when called with the contents of the file as an argument. |
def make_sshable(c):
"""
Set up passwordless SSH keypair & authorized_hosts access to localhost.
"""
user = c.travis.sudo.user
home = "~{0}".format(user)
# Run sudo() as the new sudo user; means less chown'ing, etc.
c.config.sudo.user = user
ssh_dir = "{0}/.ssh".format(home)
# TODO: worth wrapping in 'sh -c' and using '&&' instead of doing this?
for cmd in ("mkdir {0}", "chmod 0700 {0}"):
c.sudo(cmd.format(ssh_dir, user))
c.sudo('ssh-keygen -f {0}/id_rsa -N ""'.format(ssh_dir))
c.sudo("cp {0}/{{id_rsa.pub,authorized_keys}}".format(ssh_dir)) | Set up passwordless SSH keypair & authorized_hosts access to localhost. |
def flush(self, objects=None, batch_size=None, **kwargs):
''' flush objects stored in self.container or those passed in'''
batch_size = batch_size or self.config.get('batch_size')
# if we're flushing these from self.store, we'll want to
# pop them later.
if objects:
from_store = False
else:
from_store = True
objects = self.itervalues()
# sort by _oid for grouping by _oid below
objects = sorted(objects, key=lambda x: x['_oid'])
batch, _ids = [], []
# batch in groups with _oid, since upsert's delete
# all _oid rows when autosnap=False!
for key, group in groupby(objects, lambda x: x['_oid']):
_grouped = list(group)
if len(batch) + len(_grouped) > batch_size:
logger.debug("Upserting %s objects" % len(batch))
_ = self.upsert(objects=batch, **kwargs)
logger.debug("... done upserting %s objects" % len(batch))
_ids.extend(_)
# start a new batch
batch = _grouped
else:
# extend existing batch, since still will be < batch_size
batch.extend(_grouped)
else:
if batch:
# get the last batch too
logger.debug("Upserting last batch of %s objects" % len(batch))
_ = self.upsert(objects=batch, **kwargs)
_ids.extend(_)
logger.debug("... Finished upserting all objects!")
if from_store:
for _id in _ids:
# try to pop the _id's flushed from store; warn / ignore
# the KeyError if they're not there
try:
self.store.pop(_id)
except KeyError:
logger.warn(
"failed to pop {} from self.store!".format(_id))
return sorted(_ids) | flush objects stored in self.container or those passed in |
def registration_options(self):
"""Gathers values for common attributes between the
registration model and this instance.
"""
registration_options = {}
rs = self.registration_model()
for k, v in self.__dict__.items():
if k not in DEFAULT_BASE_FIELDS + ['_state']:
try:
getattr(rs, k)
registration_options.update({k: v})
except AttributeError:
pass
registration_identifier = registration_options.get(
'registration_identifier')
if registration_identifier:
registration_options['registration_identifier'] = self.to_string(
registration_identifier)
return registration_options | Gathers values for common attributes between the
registration model and this instance. |
def point_dist(pt1, pt2):
""" Calculate the Euclidean distance between two n-D points.
|pt1 - pt2|
.. todo:: Complete point_dist docstring
"""
# Imports
from scipy import linalg as spla
dist = spla.norm(point_displ(pt1, pt2))
return dist | Calculate the Euclidean distance between two n-D points.
|pt1 - pt2|
.. todo:: Complete point_dist docstring |
def teardown_logical_port_connectivity(self, context, port_db,
hosting_device_id):
"""Removes connectivity for a logical port.
Unplugs the corresponding data interface from the VM.
"""
if port_db is None or port_db.get('id') is None:
LOG.warning("Port id is None! Cannot remove port "
"from hosting_device:%s", hosting_device_id)
return
hosting_port_id = port_db.hosting_info.hosting_port.id
try:
self._dev_mgr.svc_vm_mgr.interface_detach(hosting_device_id,
hosting_port_id)
self._gt_pool.spawn_n(self._cleanup_hosting_port, context,
hosting_port_id)
LOG.debug("Teardown logicalport completed for port:%s", port_db.id)
except Exception as e:
LOG.error("Failed to detach interface corresponding to port:"
"%(p_id)s on hosting device:%(hd_id)s due to "
"error %(error)s", {'p_id': hosting_port_id,
'hd_id': hosting_device_id,
'error': str(e)}) | Removes connectivity for a logical port.
Unplugs the corresponding data interface from the VM. |
def _gcs_get_key_names(bucket, pattern):
""" Get names of all Google Cloud Storage keys in a specified bucket that match a pattern. """
return [obj.metadata.name for obj in _gcs_get_keys(bucket, pattern)] | Get names of all Google Cloud Storage keys in a specified bucket that match a pattern. |
def close(self):
"""Close the pooled connection."""
# Instead of actually closing the connection,
# return it to the pool so it can be reused.
if self._con is not None:
self._pool.cache(self._con)
self._con = None | Close the pooled connection. |
def getratio(self, code) :
""" Get ratio of code and pattern matched
"""
if len(code) == 0 : return 0
code_replaced = self.prog.sub('', code)
return (len(code) - len(code_replaced)) / len(code) | Get ratio of code and pattern matched |
def convert(cls, **kwargsql):
"""
:param dict kwargsql:
Kwargsql expression to convert
:return:
filter to be used in :py:method:`pymongo.collection.find`
:rtype: dict
"""
filters = []
for k, v in kwargsql.items():
terms = k.split('__')
if terms[-1] in cls.KWARGQL_SUPPORTED_MONGO_OPS:
v = {
'$' + terms[-1]: v
}
if terms[-1] == 'exists':
v['$exists'] = bool(v['$exists'])
terms = terms[:-1]
elif terms[-1] in cls.KWARGSQL_REGEX_OPS:
config = cls.KWARGSQL_REGEX_OPS[terms[-1]]
pattern = '^{prefix}{pattern}{suffix}$'.format(
prefix=config.get('prefix', ''),
pattern=re.escape(v),
suffix=config.get('suffix', '')
)
v = {
'$regex': pattern,
'$options': config.get('options', ''),
}
terms = terms[:-1]
k = '.'.join(terms)
filters.append({k: v})
if len(filters) == 0:
return {}
if len(filters) == 1:
return filters[0]
else:
return {
'$and': filters
} | :param dict kwargsql:
Kwargsql expression to convert
:return:
filter to be used in :py:method:`pymongo.collection.find`
:rtype: dict |
def load(cls, config: Optional[Config] = None):
"""Load a DataFlowKernel.
Args:
- config (Config) : Configuration to load. This config will be passed to a
new DataFlowKernel instantiation which will be set as the active DataFlowKernel.
Returns:
- DataFlowKernel : The loaded DataFlowKernel object.
"""
if cls._dfk is not None:
raise RuntimeError('Config has already been loaded')
if config is None:
cls._dfk = DataFlowKernel(Config())
else:
cls._dfk = DataFlowKernel(config)
return cls._dfk | Load a DataFlowKernel.
Args:
- config (Config) : Configuration to load. This config will be passed to a
new DataFlowKernel instantiation which will be set as the active DataFlowKernel.
Returns:
- DataFlowKernel : The loaded DataFlowKernel object. |
def switch_training(self, flag):
"""
Switch training mode.
:param flag: switch on training mode when flag is True.
"""
if self._is_training == flag: return
self._is_training = flag
if flag:
self._training_flag.set_value(1)
else:
self._training_flag.set_value(0) | Switch training mode.
:param flag: switch on training mode when flag is True. |
def export_coreml(self, filename):
"""
Save the model in Core ML format.
See Also
--------
save
Examples
--------
>>> model.export_coreml('myModel.mlmodel')
"""
import coremltools
# First define three internal helper functions
# Internal helper function
def _create_vision_feature_print_scene():
prob_name = self.target + 'Probability'
#
# Setup the top level (pipeline classifier) spec
#
top_spec = coremltools.proto.Model_pb2.Model()
top_spec.specificationVersion = 3
desc = top_spec.description
desc.output.add().name = prob_name
desc.output.add().name = self.target
desc.predictedFeatureName = self.target
desc.predictedProbabilitiesName = prob_name
input = desc.input.add()
input.name = self.feature
input.type.imageType.width = 299
input.type.imageType.height = 299
BGR_VALUE = coremltools.proto.FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value('BGR')
input.type.imageType.colorSpace = BGR_VALUE
#
# VisionFeaturePrint extractor
#
pipelineClassifier = top_spec.pipelineClassifier
scene_print = pipelineClassifier.pipeline.models.add()
scene_print.specificationVersion = 3
scene_print.visionFeaturePrint.scene.version = 1
input = scene_print.description.input.add()
input.name = self.feature
input.type.imageType.width = 299
input.type.imageType.height = 299
input.type.imageType.colorSpace = BGR_VALUE
output = scene_print.description.output.add()
output.name = "output_name"
DOUBLE_ARRAY_VALUE = coremltools.proto.FeatureTypes_pb2.ArrayFeatureType.ArrayDataType.Value('DOUBLE')
output.type.multiArrayType.dataType = DOUBLE_ARRAY_VALUE
output.type.multiArrayType.shape.append(2048)
#
# Neural Network Classifier, which is just logistic regression, in order to use GPUs
#
temp = top_spec.pipelineClassifier.pipeline.models.add()
temp.specificationVersion = 3
# Empty inner product layer
nn_spec = temp.neuralNetworkClassifier
feature_layer = nn_spec.layers.add()
feature_layer.name = "feature_layer"
feature_layer.input.append("output_name")
feature_layer.output.append("softmax_input")
fc_layer_params = feature_layer.innerProduct
fc_layer_params.inputChannels = 2048
# Softmax layer
softmax = nn_spec.layers.add()
softmax.name = "softmax"
softmax.softmax.MergeFromString(b'')
softmax.input.append("softmax_input")
softmax.output.append(prob_name)
input = temp.description.input.add()
input.name = "output_name"
input.type.multiArrayType.dataType = DOUBLE_ARRAY_VALUE
input.type.multiArrayType.shape.append(2048)
# Set outputs
desc = temp.description
prob_output = desc.output.add()
prob_output.name = prob_name
label_output = desc.output.add()
label_output.name = self.target
if type(self.classifier.classes[0]) == int:
prob_output.type.dictionaryType.int64KeyType.MergeFromString(b'')
label_output.type.int64Type.MergeFromString(b'')
else:
prob_output.type.dictionaryType.stringKeyType.MergeFromString(b'')
label_output.type.stringType.MergeFromString(b'')
temp.description.predictedFeatureName = self.target
temp.description.predictedProbabilitiesName = prob_name
return top_spec
# Internal helper function
def _update_last_two_layers(nn_spec):
# Replace the softmax layer with new coeffients
num_classes = self.num_classes
fc_layer = nn_spec.layers[-2]
fc_layer_params = fc_layer.innerProduct
fc_layer_params.outputChannels = self.classifier.num_classes
inputChannels = fc_layer_params.inputChannels
fc_layer_params.hasBias = True
coefs = self.classifier.coefficients
weights = fc_layer_params.weights
bias = fc_layer_params.bias
del weights.floatValue[:]
del bias.floatValue[:]
import numpy as np
W = np.array(coefs[coefs['index'] != None]['value'], ndmin = 2).reshape(
inputChannels, num_classes - 1, order = 'F')
b = coefs[coefs['index'] == None]['value']
Wa = np.hstack((np.zeros((inputChannels, 1)), W))
weights.floatValue.extend(Wa.flatten(order = 'F'))
bias.floatValue.extend([0.0] + list(b))
# Internal helper function
def _set_inputs_outputs_and_metadata(spec, nn_spec):
# Replace the classifier with the new classes
class_labels = self.classifier.classes
probOutput = spec.description.output[0]
classLabel = spec.description.output[1]
probOutput.type.dictionaryType.MergeFromString(b'')
if type(class_labels[0]) == int:
nn_spec.ClearField('int64ClassLabels')
probOutput.type.dictionaryType.int64KeyType.MergeFromString(b'')
classLabel.type.int64Type.MergeFromString(b'')
del nn_spec.int64ClassLabels.vector[:]
for c in class_labels:
nn_spec.int64ClassLabels.vector.append(c)
else:
nn_spec.ClearField('stringClassLabels')
probOutput.type.dictionaryType.stringKeyType.MergeFromString(b'')
classLabel.type.stringType.MergeFromString(b'')
del nn_spec.stringClassLabels.vector[:]
for c in class_labels:
nn_spec.stringClassLabels.vector.append(c)
prob_name = self.target + 'Probability'
label_name = self.target
old_output_name = nn_spec.layers[-1].name
coremltools.models.utils.rename_feature(spec, 'classLabel', label_name)
coremltools.models.utils.rename_feature(spec, old_output_name, prob_name)
if nn_spec.layers[-1].name == old_output_name:
nn_spec.layers[-1].name = prob_name
if nn_spec.labelProbabilityLayerName == old_output_name:
nn_spec.labelProbabilityLayerName = prob_name
coremltools.models.utils.rename_feature(spec, 'data', self.feature)
if len(nn_spec.preprocessing) > 0:
nn_spec.preprocessing[0].featureName = self.feature
mlmodel = coremltools.models.MLModel(spec)
model_type = 'image classifier (%s)' % self.model
mlmodel.short_description = _coreml_utils._mlmodel_short_description(model_type)
mlmodel.input_description[self.feature] = u'Input image'
mlmodel.output_description[prob_name] = 'Prediction probabilities'
mlmodel.output_description[label_name] = 'Class label of top prediction'
_coreml_utils._set_model_metadata(mlmodel, self.__class__.__name__, {
'model': self.model,
'target': self.target,
'features': self.feature,
'max_iterations': str(self.max_iterations),
}, version=ImageClassifier._PYTHON_IMAGE_CLASSIFIER_VERSION)
return mlmodel
# main part of the export_coreml function
if self.model in _pre_trained_models.MODELS:
ptModel = _pre_trained_models.MODELS[self.model]()
feature_extractor = _image_feature_extractor.MXFeatureExtractor(ptModel)
coreml_model = feature_extractor.get_coreml_model()
spec = coreml_model.get_spec()
nn_spec = spec.neuralNetworkClassifier
else: # model == VisionFeaturePrint_Scene
spec = _create_vision_feature_print_scene()
nn_spec = spec.pipelineClassifier.pipeline.models[1].neuralNetworkClassifier
_update_last_two_layers(nn_spec)
mlmodel = _set_inputs_outputs_and_metadata(spec, nn_spec)
mlmodel.save(filename) | Save the model in Core ML format.
See Also
--------
save
Examples
--------
>>> model.export_coreml('myModel.mlmodel') |
def _check_unpack_options(extensions, function, extra_args):
"""Checks what gets registered as an unpacker."""
# first make sure no other unpacker is registered for this extension
existing_extensions = {}
for name, info in _UNPACK_FORMATS.items():
for ext in info[0]:
existing_extensions[ext] = name
for extension in extensions:
if extension in existing_extensions:
msg = '%s is already registered for "%s"'
raise RegistryError(msg % (extension,
existing_extensions[extension]))
if not isinstance(function, collections.Callable):
raise TypeError('The registered function must be a callable') | Checks what gets registered as an unpacker. |
def get_sites(self, filter_func=lambda x: True):
"""
Returns a list of TSquareSite objects that represent the sites available
to a user.
@param filter_func - A function taking in a Site object as a parameter
that returns a True or False, depending on whether
or not that site should be returned by this
function. Filter_func should be used to create
filters on the list of sites (i.e. user's
preferences on what sites to display by default).
If not specified, no filter is applied.
@returns - A list of TSquareSite objects encapsulating t-square's JSON
response.
"""
response = self._session.get(BASE_URL_TSQUARE + 'site.json')
response.raise_for_status() # raise an exception if not 200: OK
site_list = response.json()['site_collection']
if not site_list:
# this means that this t-square session expired. It's up
# to the user to re-authenticate.
self._authenticated = False
raise SessionExpiredException('The session has expired')
result_list = []
for site in site_list:
t_site = TSquareSite(**site)
if not hasattr(t_site, "props"):
t_site.props = {}
if not 'banner-crn' in t_site.props:
t_site.props['banner-crn'] = None
if not 'term' in t_site.props:
t_site.props['term'] = None
if not 'term_eid' in t_site.props:
t_site.props['term_eid'] = None
if filter_func(t_site):
result_list.append(t_site)
return result_list | Returns a list of TSquareSite objects that represent the sites available
to a user.
@param filter_func - A function taking in a Site object as a parameter
that returns a True or False, depending on whether
or not that site should be returned by this
function. Filter_func should be used to create
filters on the list of sites (i.e. user's
preferences on what sites to display by default).
If not specified, no filter is applied.
@returns - A list of TSquareSite objects encapsulating t-square's JSON
response. |
def _handle_struct_ref(self, node, scope, ctxt, stream):
"""TODO: Docstring for _handle_struct_ref.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling struct ref")
# name
# field
struct = self._handle_node(node.name, scope, ctxt, stream)
try:
sub_field = getattr(struct, node.field.name)
except AttributeError as e:
# should be able to access implicit array items by index OR
# access the last one's members directly without index
#
# E.g.:
#
# local int total_length = 0;
# while(!FEof()) {
# HEADER header;
# total_length += header.length;
# }
if isinstance(struct, fields.Array) and struct.implicit:
last_item = struct[-1]
sub_field = getattr(last_item, node.field.name)
else:
raise
return sub_field | TODO: Docstring for _handle_struct_ref.
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO |
def delete_api_model(restApiId, modelName, region=None, key=None, keyid=None, profile=None):
'''
Delete a model identified by name in a given API
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.delete_api_model restApiId modelName
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_model(restApiId=restApiId, modelName=modelName)
return {'deleted': True}
except ClientError as e:
return {'deleted': False, 'error': __utils__['boto3.get_error'](e)} | Delete a model identified by name in a given API
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.delete_api_model restApiId modelName |
def quote_value(value):
"""
convert values to mysql code for the same
mostly delegate directly to the mysql lib, but some exceptions exist
"""
try:
if value == None:
return SQL_NULL
elif isinstance(value, SQL):
return quote_sql(value.template, value.param)
elif is_text(value):
return SQL("'" + "".join(ESCAPE_DCT.get(c, c) for c in value) + "'")
elif is_data(value):
return quote_value(json_encode(value))
elif is_number(value):
return SQL(text_type(value))
elif isinstance(value, datetime):
return SQL("str_to_date('" + value.strftime("%Y%m%d%H%M%S.%f") + "', '%Y%m%d%H%i%s.%f')")
elif isinstance(value, Date):
return SQL("str_to_date('" + value.format("%Y%m%d%H%M%S.%f") + "', '%Y%m%d%H%i%s.%f')")
elif hasattr(value, '__iter__'):
return quote_value(json_encode(value))
else:
return quote_value(text_type(value))
except Exception as e:
Log.error("problem quoting SQL {{value}}", value=repr(value), cause=e) | convert values to mysql code for the same
mostly delegate directly to the mysql lib, but some exceptions exist |
def _watch(inotify, watchers, watch_flags, s3_uploader):
"""As soon as a user is done with a file under `/opt/ml/output/intermediate`
we would get notified by using inotify. We would copy this file under
`/opt/ml/output/intermediate/.tmp.sagemaker_s3_sync` folder preserving
the same folder structure to prevent it from being further modified.
As we copy the file we would add timestamp with microseconds precision
to avoid modification during s3 upload.
After that we copy the file to s3 in a separate Thread.
We keep the queue of the files we need to move as FIFO.
"""
# initialize a thread pool with 1 worker
# to be used for uploading files to s3 in a separate thread
executor = futures.ThreadPoolExecutor(max_workers=1)
last_pass_done = False
stop_file_exists = False
# after we see stop file do one additional pass to make sure we didn't miss anything
while not last_pass_done:
# wait for any events in the directory for 1 sec and then re-check exit conditions
for event in inotify.read(timeout=1000):
for flag in inotify_simple.flags.from_mask(event.mask):
# if new directory was created traverse the directory tree to recursively add all
# created folders to the watchers list.
# Upload files to s3 if there any files.
# There is a potential race condition if upload the file and the see a notification
# for it which should cause any problems because when we copy files to temp dir
# we add a unique timestamp up to microseconds.
if flag is inotify_simple.flags.ISDIR and inotify_simple.flags.CREATE & event.mask:
path = os.path.join(intermediate_path, watchers[event.wd], event.name)
for folder, dirs, files in os.walk(path):
wd = inotify.add_watch(folder, watch_flags)
relative_path = os.path.relpath(folder, intermediate_path)
watchers[wd] = relative_path
tmp_sub_folder = os.path.join(tmp_dir_path, relative_path)
if not os.path.exists(tmp_sub_folder):
os.makedirs(tmp_sub_folder)
for file in files:
_copy_file(executor, s3_uploader, relative_path, file)
elif flag is inotify_simple.flags.CLOSE_WRITE:
_copy_file(executor, s3_uploader, watchers[event.wd], event.name)
last_pass_done = stop_file_exists
stop_file_exists = os.path.exists(success_file_path) or os.path.exists(failure_file_path)
# wait for all the s3 upload tasks to finish and shutdown the executor
executor.shutdown(wait=True) | As soon as a user is done with a file under `/opt/ml/output/intermediate`
we would get notified by using inotify. We would copy this file under
`/opt/ml/output/intermediate/.tmp.sagemaker_s3_sync` folder preserving
the same folder structure to prevent it from being further modified.
As we copy the file we would add timestamp with microseconds precision
to avoid modification during s3 upload.
After that we copy the file to s3 in a separate Thread.
We keep the queue of the files we need to move as FIFO. |
def ssh_invite(ctx, code_length, user, **kwargs):
"""
Add a public-key to a ~/.ssh/authorized_keys file
"""
for name, value in kwargs.items():
setattr(ctx.obj, name, value)
from . import cmd_ssh
ctx.obj.code_length = code_length
ctx.obj.ssh_user = user
return go(cmd_ssh.invite, ctx.obj) | Add a public-key to a ~/.ssh/authorized_keys file |
def identity_to_string(identity_dict):
"""Dump Identity dictionary into its string representation."""
result = []
if identity_dict.get('proto'):
result.append(identity_dict['proto'] + '://')
if identity_dict.get('user'):
result.append(identity_dict['user'] + '@')
result.append(identity_dict['host'])
if identity_dict.get('port'):
result.append(':' + identity_dict['port'])
if identity_dict.get('path'):
result.append(identity_dict['path'])
log.debug('identity parts: %s', result)
return ''.join(result) | Dump Identity dictionary into its string representation. |
def add_item(self, key, value, cache_name=None):
"""Add an item into the given cache.
This is a commodity option (mainly useful for testing) allowing you
to store an item in a uWSGI cache during startup.
:param str|unicode key:
:param value:
:param str|unicode cache_name: If not set, default will be used.
"""
cache_name = cache_name or ''
value = '%s %s=%s' % (cache_name, key, value)
self._set('add-cache-item', value.strip(), multi=True)
return self._section | Add an item into the given cache.
This is a commodity option (mainly useful for testing) allowing you
to store an item in a uWSGI cache during startup.
:param str|unicode key:
:param value:
:param str|unicode cache_name: If not set, default will be used. |
def contract_multiplier(self):
"""
[float] 合约乘数,例如沪深300股指期货的乘数为300.0(期货专用)
"""
try:
return self.__dict__["contract_multiplier"]
except (KeyError, ValueError):
raise AttributeError(
"Instrument(order_book_id={}) has no attribute 'contract_multiplier' ".format(self.order_book_id)
) | [float] 合约乘数,例如沪深300股指期货的乘数为300.0(期货专用) |
def get_params(self, deep=False):
"""Get parameters."""
params = super(XGBModel, self).get_params(deep=deep)
if isinstance(self.kwargs, dict): # if kwargs is a dict, update params accordingly
params.update(self.kwargs)
if params['missing'] is np.nan:
params['missing'] = None # sklearn doesn't handle nan. see #4725
if not params.get('eval_metric', True):
del params['eval_metric'] # don't give as None param to Booster
return params | Get parameters. |
def create(self, name, script, params=None):
''' /v1/startupscript/create
POST - account
Create a startup script
Link: https://www.vultr.com/api/#startupscript_create
'''
params = update_params(params, {
'name': name,
'script': script
})
return self.request('/v1/startupscript/create', params, 'POST') | /v1/startupscript/create
POST - account
Create a startup script
Link: https://www.vultr.com/api/#startupscript_create |
def thaw_from_args(parser):
"""Adds command line options for things related to inline thawing
of icefiles"""
parser.add_argument('--thaw-from',
dest='thaw_from',
help='Thaw an ICE file containing secrets')
parser.add_argument('--gpg-password-path',
dest='gpg_pass_path',
help='Vault path of GPG passphrase location') | Adds command line options for things related to inline thawing
of icefiles |
def get_language():
"""Create or retrieve the parse tree for defining a sensor graph."""
global sensor_graph, statement
if sensor_graph is not None:
return sensor_graph
_create_primitives()
_create_simple_statements()
_create_block_bnf()
sensor_graph = ZeroOrMore(statement) + StringEnd()
sensor_graph.ignore(comment)
return sensor_graph | Create or retrieve the parse tree for defining a sensor graph. |
def invertible_1x1_conv(name, x, reverse=False):
"""1X1 convolution on x.
The 1X1 convolution is parametrized as P*L*(U + sign(s)*exp(log(s))) where
1. P is a permutation matrix.
2. L is a lower triangular matrix with diagonal entries unity.
3. U is a upper triangular matrix where the diagonal entries zero.
4. s is a vector.
sign(s) and P are fixed and the remaining are optimized. P, L, U and s are
initialized by the PLU decomposition of a random rotation matrix.
Args:
name: scope
x: Input Tensor.
reverse: whether the pass is from z -> x or x -> z.
Returns:
x_conv: x after a 1X1 convolution is applied on x.
objective: sum(log(s))
"""
_, height, width, channels = common_layers.shape_list(x)
w_shape = [channels, channels]
# Random rotation-matrix Q
random_matrix = np.random.rand(channels, channels)
np_w = scipy.linalg.qr(random_matrix)[0].astype("float32")
# Initialize P,L,U and s from the LU decomposition of a random rotation matrix
np_p, np_l, np_u = scipy.linalg.lu(np_w)
np_s = np.diag(np_u)
np_sign_s = np.sign(np_s)
np_log_s = np.log(np.abs(np_s))
np_u = np.triu(np_u, k=1)
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
p = tf.get_variable("P", initializer=np_p, trainable=False)
l = tf.get_variable("L", initializer=np_l)
sign_s = tf.get_variable(
"sign_S", initializer=np_sign_s, trainable=False)
log_s = tf.get_variable("log_S", initializer=np_log_s)
u = tf.get_variable("U", initializer=np_u)
# W = P * L * (U + sign_s * exp(log_s))
l_mask = np.tril(np.ones([channels, channels], dtype=np.float32), -1)
l = l * l_mask + tf.eye(channels, channels)
u = u * np.transpose(l_mask) + tf.diag(sign_s * tf.exp(log_s))
w = tf.matmul(p, tf.matmul(l, u))
# If height or width cannot be statically determined then they end up as
# tf.int32 tensors, which cannot be directly multiplied with a floating
# point tensor without a cast.
objective = tf.reduce_sum(log_s) * tf.cast(height * width, log_s.dtype)
if not reverse:
w = tf.reshape(w, [1, 1] + w_shape)
x = tf.nn.conv2d(x, w, [1, 1, 1, 1], "SAME", data_format="NHWC")
else:
# TODO(b/111271662): Remove when supported.
def tpu_inv(m):
"""tf.linalg.inv workaround until it is supported on TPU."""
q, r = tf.linalg.qr(m)
return tf.linalg.triangular_solve(r, tf.transpose(q), lower=False)
w_inv = tf.reshape(tpu_inv(w), [1, 1]+w_shape)
x = tf.nn.conv2d(
x, w_inv, [1, 1, 1, 1], "SAME", data_format="NHWC")
objective *= -1
return x, objective | 1X1 convolution on x.
The 1X1 convolution is parametrized as P*L*(U + sign(s)*exp(log(s))) where
1. P is a permutation matrix.
2. L is a lower triangular matrix with diagonal entries unity.
3. U is a upper triangular matrix where the diagonal entries zero.
4. s is a vector.
sign(s) and P are fixed and the remaining are optimized. P, L, U and s are
initialized by the PLU decomposition of a random rotation matrix.
Args:
name: scope
x: Input Tensor.
reverse: whether the pass is from z -> x or x -> z.
Returns:
x_conv: x after a 1X1 convolution is applied on x.
objective: sum(log(s)) |
def _handle_start_center(self, attrs):
"""
Handle opening center element
:param attrs: Attributes of the element
:type attrs: Dict
"""
center_lat = attrs.get("lat")
center_lon = attrs.get("lon")
if center_lat is None or center_lon is None:
raise ValueError("Unable to get lat or lon of way center.")
self._curr["center_lat"] = Decimal(center_lat)
self._curr["center_lon"] = Decimal(center_lon) | Handle opening center element
:param attrs: Attributes of the element
:type attrs: Dict |
def consolidate_tarballs_job(job, fname_to_id):
"""
Combine the contents of separate tarballs into one.
Subdirs within the tarball will be named the keys in **fname_to_id
:param JobFunctionWrappingJob job: passed automatically by Toil
:param dict[str,str] fname_to_id: Dictionary of the form: file-name-prefix=FileStoreID
:return: The file store ID of the generated tarball
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
# Retrieve output file paths to consolidate
tar_paths = []
for fname, file_store_id in fname_to_id.iteritems():
p = job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, fname + '.tar.gz'))
tar_paths.append((p, fname))
# I/O
# output_name is arbitrary as this job function returns a FileStoreId
output_name = 'foo.tar.gz'
out_tar = os.path.join(work_dir, output_name)
# Consolidate separate tarballs into one
with tarfile.open(os.path.join(work_dir, out_tar), 'w:gz') as f_out:
for tar, fname in tar_paths:
with tarfile.open(tar, 'r') as f_in:
for tarinfo in f_in:
with closing(f_in.extractfile(tarinfo)) as f_in_file:
tarinfo.name = os.path.join(output_name, fname, os.path.basename(tarinfo.name))
f_out.addfile(tarinfo, fileobj=f_in_file)
return job.fileStore.writeGlobalFile(out_tar) | Combine the contents of separate tarballs into one.
Subdirs within the tarball will be named the keys in **fname_to_id
:param JobFunctionWrappingJob job: passed automatically by Toil
:param dict[str,str] fname_to_id: Dictionary of the form: file-name-prefix=FileStoreID
:return: The file store ID of the generated tarball
:rtype: str |
def colless(self, normalize='leaves'):
'''Compute the Colless balance index of this ``Tree``. If the tree has polytomies, they will be randomly resolved
Args:
``normalize`` (``str``): How to normalize the Colless index (if at all)
* ``None`` to not normalize
* ``"leaves"`` to normalize by the number of leaves
* ``"yule"`` to normalize to the Yule model
* ``"pda"`` to normalize to the Proportional to Distinguishable Arrangements model
Returns:
``float``: Colless index (either normalized or not)
'''
t_res = copy(self); t_res.resolve_polytomies(); leaves_below = dict(); n = 0; I = 0
for node in t_res.traverse_postorder():
if node.is_leaf():
leaves_below[node] = 1; n += 1
else:
cl,cr = node.children; nl = leaves_below[cl]; nr = leaves_below[cr]
leaves_below[node] = nl+nr; I += abs(nl-nr)
if normalize is None or normalize is False:
return I
elif not isinstance(normalize,str):
raise TypeError("normalize must be None or a string")
normalize = normalize.lower()
if normalize == 'leaves':
return (2.*I)/((n-1)*(n-2))
elif normalize == 'yule':
return (I - n*log(n) - n*(EULER_GAMMA-1-log(2)))/n
elif normalize == 'pda':
return I/(n**1.5)
else:
raise RuntimeError("normalize must be None, 'leaves', 'yule', or 'pda'") | Compute the Colless balance index of this ``Tree``. If the tree has polytomies, they will be randomly resolved
Args:
``normalize`` (``str``): How to normalize the Colless index (if at all)
* ``None`` to not normalize
* ``"leaves"`` to normalize by the number of leaves
* ``"yule"`` to normalize to the Yule model
* ``"pda"`` to normalize to the Proportional to Distinguishable Arrangements model
Returns:
``float``: Colless index (either normalized or not) |
def __check_success(resp):
""" Check a JSON server response to see if it was successful
:type resp: Dictionary (parsed JSON from response)
:param resp: the response string
:rtype: String
:returns: the success message, if it exists
:raises: APIError if the success message is not present
"""
if "success" not in resp.keys():
try:
raise APIError('200', 'Operation Failed', resp["error"])
except KeyError:
raise APIError('200', 'Operation Failed', str(resp))
return resp["success"] | Check a JSON server response to see if it was successful
:type resp: Dictionary (parsed JSON from response)
:param resp: the response string
:rtype: String
:returns: the success message, if it exists
:raises: APIError if the success message is not present |
def vm_netstats(vm_=None, **kwargs):
'''
Return combined network counters used by the vms on this hyper in a
list of dicts:
:param vm_: domain name
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
.. code-block:: python
[
'your-vm': {
'rx_bytes' : 0,
'rx_packets' : 0,
'rx_errs' : 0,
'rx_drop' : 0,
'tx_bytes' : 0,
'tx_packets' : 0,
'tx_errs' : 0,
'tx_drop' : 0
},
...
]
If you pass a VM name in as an argument then it will return info
for just the named VM, otherwise it will return all VMs.
CLI Example:
.. code-block:: bash
salt '*' virt.vm_netstats
'''
def _info(dom):
'''
Compute network stats of a domain
'''
nics = _get_nics(dom)
ret = {
'rx_bytes': 0,
'rx_packets': 0,
'rx_errs': 0,
'rx_drop': 0,
'tx_bytes': 0,
'tx_packets': 0,
'tx_errs': 0,
'tx_drop': 0
}
for attrs in six.itervalues(nics):
if 'target' in attrs:
dev = attrs['target']
stats = dom.interfaceStats(dev)
ret['rx_bytes'] += stats[0]
ret['rx_packets'] += stats[1]
ret['rx_errs'] += stats[2]
ret['rx_drop'] += stats[3]
ret['tx_bytes'] += stats[4]
ret['tx_packets'] += stats[5]
ret['tx_errs'] += stats[6]
ret['tx_drop'] += stats[7]
return ret
info = {}
conn = __get_conn(**kwargs)
if vm_:
info[vm_] = _info(_get_domain(conn, vm_))
else:
for domain in _get_domain(conn, iterable=True):
info[domain.name()] = _info(domain)
conn.close()
return info | Return combined network counters used by the vms on this hyper in a
list of dicts:
:param vm_: domain name
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
.. code-block:: python
[
'your-vm': {
'rx_bytes' : 0,
'rx_packets' : 0,
'rx_errs' : 0,
'rx_drop' : 0,
'tx_bytes' : 0,
'tx_packets' : 0,
'tx_errs' : 0,
'tx_drop' : 0
},
...
]
If you pass a VM name in as an argument then it will return info
for just the named VM, otherwise it will return all VMs.
CLI Example:
.. code-block:: bash
salt '*' virt.vm_netstats |
def reMutualReceptions(self, idA, idB):
""" Returns ruler and exaltation mutual receptions. """
mr = self.mutualReceptions(idA, idB)
filter_ = ['ruler', 'exalt']
# Each pair of dignities must be 'ruler' or 'exalt'
return [(a,b) for (a,b) in mr if (a in filter_ and b in filter_)] | Returns ruler and exaltation mutual receptions. |
def getTargetNamespace(self):
"""return targetNamespace
"""
parent = self
targetNamespace = 'targetNamespace'
tns = self.attributes.get(targetNamespace)
while not tns and parent and parent._parent is not None:
parent = parent._parent()
tns = parent.attributes.get(targetNamespace)
return tns or '' | return targetNamespace |
def obspy_3d_plot(inventory, catalog, size=(10.5, 7.5), **kwargs):
"""
Plot obspy Inventory and obspy Catalog classes in three dimensions.
:type inventory: obspy.core.inventory.inventory.Inventory
:param inventory: Obspy inventory class containing station metadata
:type catalog: obspy.core.event.catalog.Catalog
:param catalog: Obspy catalog class containing event metadata
:type save: bool
:param save: False will plot to screen, true will save plot and not show \
to screen.
:type savefile: str
:param savefile: Filename to save to, required for save=True
:type size: tuple
:param size: Size of figure in inches.
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example:
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> from eqcorrscan.utils.plotting import obspy_3d_plot
>>> client = Client('IRIS')
>>> t1 = UTCDateTime(2012, 3, 26)
>>> t2 = t1 + 86400
>>> catalog = client.get_events(starttime=t1, endtime=t2, latitude=-43,
... longitude=170, maxradius=5)
>>> inventory = client.get_stations(starttime=t1, endtime=t2, latitude=-43,
... longitude=170, maxradius=10)
>>> obspy_3d_plot(inventory=inventory, catalog=catalog) # doctest: +SKIP
.. plot::
from obspy.clients.fdsn import Client
from obspy import UTCDateTime
from eqcorrscan.utils.plotting import obspy_3d_plot
client = Client('IRIS')
t1 = UTCDateTime(2012, 3, 26)
t2 = t1 + 86400
catalog = client.get_events(starttime=t1, endtime=t2, latitude=-43,
longitude=170, maxradius=5)
inventory = client.get_stations(starttime=t1, endtime=t2, latitude=-43,
longitude=170, maxradius=10)
obspy_3d_plot(inventory=inventory, catalog=catalog)
"""
nodes = []
for ev in catalog:
nodes.append((ev.preferred_origin().latitude,
ev.preferred_origin().longitude,
ev.preferred_origin().depth / 1000))
# Will plot borehole instruments at elevation - depth if provided
all_stas = []
for net in inventory:
for sta in net:
if len(sta.channels) > 0:
all_stas.append((sta.latitude, sta.longitude,
sta.elevation / 1000 -
sta.channels[0].depth / 1000))
else:
warnings.warn('No channel information attached, '
'setting elevation without depth')
all_stas.append((sta.latitude, sta.longitude,
sta.elevation / 1000))
fig = threeD_seismplot(
stations=all_stas, nodes=nodes, size=size, **kwargs)
return fig | Plot obspy Inventory and obspy Catalog classes in three dimensions.
:type inventory: obspy.core.inventory.inventory.Inventory
:param inventory: Obspy inventory class containing station metadata
:type catalog: obspy.core.event.catalog.Catalog
:param catalog: Obspy catalog class containing event metadata
:type save: bool
:param save: False will plot to screen, true will save plot and not show \
to screen.
:type savefile: str
:param savefile: Filename to save to, required for save=True
:type size: tuple
:param size: Size of figure in inches.
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example:
>>> from obspy.clients.fdsn import Client
>>> from obspy import UTCDateTime
>>> from eqcorrscan.utils.plotting import obspy_3d_plot
>>> client = Client('IRIS')
>>> t1 = UTCDateTime(2012, 3, 26)
>>> t2 = t1 + 86400
>>> catalog = client.get_events(starttime=t1, endtime=t2, latitude=-43,
... longitude=170, maxradius=5)
>>> inventory = client.get_stations(starttime=t1, endtime=t2, latitude=-43,
... longitude=170, maxradius=10)
>>> obspy_3d_plot(inventory=inventory, catalog=catalog) # doctest: +SKIP
.. plot::
from obspy.clients.fdsn import Client
from obspy import UTCDateTime
from eqcorrscan.utils.plotting import obspy_3d_plot
client = Client('IRIS')
t1 = UTCDateTime(2012, 3, 26)
t2 = t1 + 86400
catalog = client.get_events(starttime=t1, endtime=t2, latitude=-43,
longitude=170, maxradius=5)
inventory = client.get_stations(starttime=t1, endtime=t2, latitude=-43,
longitude=170, maxradius=10)
obspy_3d_plot(inventory=inventory, catalog=catalog) |
def save_svg(string, parent=None):
""" Prompts the user to save an SVG document to disk.
Parameters:
-----------
string : basestring
A Python string containing a SVG document.
parent : QWidget, optional
The parent to use for the file dialog.
Returns:
--------
The name of the file to which the document was saved, or None if the save
was cancelled.
"""
if isinstance(string, unicode):
string = string.encode('utf-8')
dialog = QtGui.QFileDialog(parent, 'Save SVG Document')
dialog.setAcceptMode(QtGui.QFileDialog.AcceptSave)
dialog.setDefaultSuffix('svg')
dialog.setNameFilter('SVG document (*.svg)')
if dialog.exec_():
filename = dialog.selectedFiles()[0]
f = open(filename, 'w')
try:
f.write(string)
finally:
f.close()
return filename
return None | Prompts the user to save an SVG document to disk.
Parameters:
-----------
string : basestring
A Python string containing a SVG document.
parent : QWidget, optional
The parent to use for the file dialog.
Returns:
--------
The name of the file to which the document was saved, or None if the save
was cancelled. |
def dict_to_numpy_dict(obj_dict):
"""
Convert a dictionary of lists into a dictionary of numpy arrays
"""
return {key: np.asarray(value) if value is not None else None for key, value in obj_dict.items()} | Convert a dictionary of lists into a dictionary of numpy arrays |
def all_files_exist(file_list):
"""Check if all files exist.
:param file_list: the names of files to check.
:type file_list: list
:returns: ``True`` if all files exist, ``False`` otherwise.
"""
all_exist = True
for filename in file_list:
all_exist = all_exist and os.path.isfile(filename)
return all_exist | Check if all files exist.
:param file_list: the names of files to check.
:type file_list: list
:returns: ``True`` if all files exist, ``False`` otherwise. |
def format_usage_masks(self, V_usage_mask_in, J_usage_mask_in, print_warnings = True):
"""Format raw usage masks into lists of indices.
Usage masks allows the Pgen computation to be conditioned on the V and J
gene/allele identities. The inputted masks are lists of strings, or a
single string, of the names of the genes or alleles to be conditioned on.
The default mask includes all productive V or J genes.
Parameters
----------
V_usage_mask_in : str or list
An object to indicate which V alleles should be considered. The default
input is None which returns the list of all productive V alleles.
J_usage_mask_in : str or list
An object to indicate which J alleles should be considered. The default
input is None which returns the list of all productive J alleles.
print_warnings : bool
Determines whether warnings are printed or not. Default ON.
Returns
-------
V_usage_mask : list of integers
Indices of the V alleles to be considered in the Pgen computation
J_usage_mask : list of integers
Indices of the J alleles to be considered in the Pgen computation
Examples
--------
>>> generation_probability.format_usage_masks('TRBV27*01','TRBJ1-1*01')
([34], [0])
>>> generation_probability.format_usage_masks('TRBV27*01', '')
([34], [0, 1, 2, 3, 4, 7, 8, 9, 10, 11, 12, 13])
>>> generation_probability.format_usage_masks(['TRBV27*01', 'TRBV13*01'], 'TRBJ1-1*01')
([34, 18], [0])
"""
#Format the V usage mask
if V_usage_mask_in is None: #Default case, use all productive V genes with non-zero probability
#V_usage_mask = [v for v, V in enumerate(ppp['cutV_genomic_CDR3_segs']) if len(V) > 0]
V_usage_mask = self.d_V_usage_mask
elif isinstance(V_usage_mask_in, list):
e_V_usage_mask = set()
for v in V_usage_mask_in:
try:
e_V_usage_mask = e_V_usage_mask.union(self.V_mask_mapping[v])
except KeyError:
if print_warnings:
print 'Unfamiliar V gene/allele: ' + v
pass
if len(e_V_usage_mask) == 0:
if print_warnings:
print 'No recognized V genes/alleles. Using default V_usage_mask'
V_usage_mask = self.d_V_usage_mask
else:
V_usage_mask = list(e_V_usage_mask)
else:
try:
V_usage_mask = self.V_mask_mapping[V_usage_mask_in]
except KeyError:
#Do raise error here as the mask will be empty
if print_warnings:
print 'Unfamiliar V usage mask: ' + str(V_usage_mask_in) + ', please check the allowed V alleles. Using default V_usage_mask'
V_usage_mask = self.d_V_usage_mask
#Format the J usage mask
if J_usage_mask_in is None: #Default case, use all productive J genes with non-zero probability
#J_usage_mask = [j for j, J in enumerate(ppp['cutJ_genomic_CDR3_segs']) if len(J) > 0]
J_usage_mask = self.d_J_usage_mask
elif isinstance(J_usage_mask_in, list):
e_J_usage_mask = set()
for j in J_usage_mask_in:
try:
e_J_usage_mask = e_J_usage_mask.union(self.J_mask_mapping[j])
except KeyError:
if print_warnings:
print 'Unfamiliar J gene/allele: ' + j
pass
if len(e_J_usage_mask) == 0:
if print_warnings:
print 'No recognized J genes/alleles. Using default J_usage_mask'
J_usage_mask = self.d_J_usage_mask
else:
J_usage_mask = list(e_J_usage_mask)
else:
try:
J_usage_mask = self.J_mask_mapping[J_usage_mask_in]
except KeyError:
#Do raise error here as the mask will be empty
if print_warnings:
print 'Unfamiliar J usage mask: ' + str(J_usage_mask_in) + ', please check the allowed J alleles. Using default J_usage_mask'
J_usage_mask = self.d_J_usage_mask
return V_usage_mask, J_usage_mask | Format raw usage masks into lists of indices.
Usage masks allows the Pgen computation to be conditioned on the V and J
gene/allele identities. The inputted masks are lists of strings, or a
single string, of the names of the genes or alleles to be conditioned on.
The default mask includes all productive V or J genes.
Parameters
----------
V_usage_mask_in : str or list
An object to indicate which V alleles should be considered. The default
input is None which returns the list of all productive V alleles.
J_usage_mask_in : str or list
An object to indicate which J alleles should be considered. The default
input is None which returns the list of all productive J alleles.
print_warnings : bool
Determines whether warnings are printed or not. Default ON.
Returns
-------
V_usage_mask : list of integers
Indices of the V alleles to be considered in the Pgen computation
J_usage_mask : list of integers
Indices of the J alleles to be considered in the Pgen computation
Examples
--------
>>> generation_probability.format_usage_masks('TRBV27*01','TRBJ1-1*01')
([34], [0])
>>> generation_probability.format_usage_masks('TRBV27*01', '')
([34], [0, 1, 2, 3, 4, 7, 8, 9, 10, 11, 12, 13])
>>> generation_probability.format_usage_masks(['TRBV27*01', 'TRBV13*01'], 'TRBJ1-1*01')
([34, 18], [0]) |
def simplex_find_cycle(self):
'''
API:
simplex_find_cycle(self)
Description:
Returns a cycle (list of nodes) if the graph has one, returns None
otherwise. Uses DFS. During DFS checks existence of arcs to lower
depth regions. Note that direction of the arcs are not important.
Return:
Returns list of nodes that represents cycle. Returns None if the
graph does not have any cycle.
'''
# make a dfs, if you identify an arc to a lower depth node we have a
# cycle
nl = self.get_node_list()
q = [nl[0]]
visited = []
depth = {nl[0]:0}
pred = {nl[0]:None}
for n in nl:
self.get_node(n).set_attr('component', None)
component_nr = int(nl[0])
self.get_node(nl[0]).set_attr('component', component_nr)
while True:
while q:
current = q.pop()
visited.append(current)
neighbors = self.in_neighbors[current] +\
self.neighbors[current]
for n in neighbors:
if n==pred[current]:
continue
self.get_node(n).set_attr('component', component_nr)
if n in depth:
# we have a cycle
cycle1 = []
cycle2 = []
temp = n
while temp is not None:
cycle1.append(temp)
temp = pred[temp]
temp = current
while temp is not None:
cycle2.append(temp)
temp = pred[temp]
cycle1.pop()
cycle1.reverse()
cycle2.extend(cycle1)
return cycle2
else:
pred[n] = current
depth[n] = depth[current] + 1
if n not in visited:
q.append(n)
flag = False
for n in nl:
if self.get_node(n).get_attr('component') is None:
q.append(n)
depth = {n:0}
pred = {n:None}
visited = []
component_nr = int(n)
self.get_node(n).set_attr('component', component_nr)
flag = True
break
if not flag:
break
return None | API:
simplex_find_cycle(self)
Description:
Returns a cycle (list of nodes) if the graph has one, returns None
otherwise. Uses DFS. During DFS checks existence of arcs to lower
depth regions. Note that direction of the arcs are not important.
Return:
Returns list of nodes that represents cycle. Returns None if the
graph does not have any cycle. |
def p_ports(self, p):
'ports : ports COMMA portname'
wid = None
port = Port(name=p[3], width=wid, type=None, lineno=p.lineno(1))
p[0] = p[1] + (port,)
p.set_lineno(0, p.lineno(1)) | ports : ports COMMA portname |
def get_geostationary_angle_extent(geos_area):
"""Get the max earth (vs space) viewing angles in x and y."""
# TODO: take into account sweep_axis_angle parameter
# get some projection parameters
req = geos_area.proj_dict['a'] / 1000
rp = geos_area.proj_dict['b'] / 1000
h = geos_area.proj_dict['h'] / 1000 + req
# compute some constants
aeq = 1 - req**2 / (h ** 2)
ap_ = 1 - rp**2 / (h ** 2)
# generate points around the north hemisphere in satellite projection
# make it a bit smaller so that we stay inside the valid area
xmax = np.arccos(np.sqrt(aeq))
ymax = np.arccos(np.sqrt(ap_))
return xmax, ymax | Get the max earth (vs space) viewing angles in x and y. |
def main(argv):
"""This function sets up a command-line option parser and then calls match_and_print
to do all of the real work.
"""
import argparse
description = 'Uses Open Tree of Life web services to try to find a taxon ID for each name supplied. ' \
'Using a --context-name=NAME to provide a limited taxonomic context and using the ' \
' --prohibit-fuzzy-matching option can make the matching faster. If there is only' \
'one match finds, then it also calls the equivalent of the ot-taxon-info.py and ot-taxon-subtree.py scripts.'
parser = argparse.ArgumentParser(prog='ot-tnrs-match-names', description=description)
parser.add_argument('names', nargs='+', help='name(s) for which we will try to find OTT IDs')
parser.add_argument('--context-name', default=None, type=str, required=False)
parser.add_argument('--include-dubious',
action='store_true',
default=False,
required=False,
help='return matches to taxa that are not included the synthetic tree because their taxonomic status is doubtful')
parser.add_argument('--subtree',
action='store_true',
default=False,
required=False,
help='print the newick representation of the taxonomic subtree if there is only one matching OTT ID')
parser.add_argument('--include-deprecated', action='store_true', default=False, required=False)
parser.add_argument('--prohibit-fuzzy-matching', action='store_true', default=False, required=False)
args = parser.parse_args(argv)
# The service takes do_approximate_matching
# We use the opposite to make the command-line just include positive directives
# (as opposed to requiring --do-approximate-matching=False) so we use "not"
do_approximate_matching = not args.prohibit_fuzzy_matching
name_list = args.names
if len(name_list) == 0:
name_list = ["Homo sapiens", "Gorilla gorilla"]
sys.stderr.write('Running a demonstration query with {}\n'.format(name_list))
else:
for name in name_list:
if name.startswith('-'):
parser.print_help()
match_and_print(name_list,
context_name=args.context_name,
do_approximate_matching=do_approximate_matching,
include_dubious=args.include_dubious,
include_deprecated=args.include_deprecated,
include_subtree=args.subtree,
output=sys.stdout) | This function sets up a command-line option parser and then calls match_and_print
to do all of the real work. |
def pdf_row_limiter(rows, limits=None, **kwargs):
"""
Limit row passing a value. In this case we dont implementate a best effort
algorithm because the posibilities are infite with a data text structure
from a pdf.
"""
limits = limits or [None, None]
upper_limit = limits[0] if limits else None
lower_limit = limits[1] if len(limits) > 1 else None
return rows[upper_limit: lower_limit] | Limit row passing a value. In this case we dont implementate a best effort
algorithm because the posibilities are infite with a data text structure
from a pdf. |
def run(self):
""" Perform phantomas run """
self._logger.info("running for <{url}>".format(url=self._url))
args = format_args(self._options)
self._logger.debug("command: `{cmd}` / args: {args}".
format(cmd=self._cmd, args=args))
# run the process
try:
process = Popen(
args=[self._cmd] + args,
stdin=PIPE,
stdout=PIPE,
stderr=PIPE
)
pid = process.pid
self._logger.debug("running as PID #{pid}".format(pid=pid))
except OSError as ex:
raise PhantomasRunError(
"Failed to run phantomas: {0}".format(ex), ex.errno)
# wait to complete
try:
stdout, stderr = process.communicate()
returncode = process.returncode
except Exception:
raise PhantomasRunError("Failed to complete the run")
# for Python 3.x - decode bytes to string
stdout = stdout.decode('utf8')
stderr = stderr.decode('utf8')
# check the response code
self._logger.debug("completed with return code #{returncode}".
format(returncode=returncode))
if stderr != '':
self._logger.debug("stderr: {stderr}".format(stderr=stderr))
raise PhantomasFailedError(stderr.strip(), returncode)
# try parsing the response
try:
results = json.loads(stdout)
except Exception:
raise PhantomasResponseParsingError("Unable to parse the response")
if self._options.get("runs", 0) > 1:
return Runs(self._url, results)
else:
return Results(self._url, results) | Perform phantomas run |
def verify_invoice_params(self, price, currency):
"""
Deprecated, will be made private in 2.4
"""
if re.match("^[A-Z]{3,3}$", currency) is None:
raise BitPayArgumentError("Currency is invalid.")
try:
float(price)
except:
raise BitPayArgumentError("Price must be formatted as a float") | Deprecated, will be made private in 2.4 |
def register_opts(conf):
"""Configure options within configuration library."""
conf.register_cli_opts(CLI_OPTS)
conf.register_opts(EPISODE_OPTS)
conf.register_opts(FORMAT_OPTS)
conf.register_opts(CACHE_OPTS, 'cache') | Configure options within configuration library. |
def run_star(job, fastqs, univ_options, star_options):
"""
This module uses STAR to align the RNA fastqs to the reference
ARGUMENTS
1. fastqs: REFER RETURN VALUE of run_cutadapt()
2. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
3. star_options: Dict of parameters specific to STAR
star_options
|- 'index_tar': <JSid for the STAR index tarball>
+- 'n': <number of threads to allocate>
RETURN VALUES
1. output_files: Dict of aligned bams
output_files
|- 'rnaAligned.toTranscriptome.out.bam': <JSid>
+- 'rnaAligned.sortedByCoord.out.bam': Dict of genome bam + bai
|- 'rna_fix_pg_sorted.bam': <JSid>
+- 'rna_fix_pg_sorted.bam.bai': <JSid>
This module corresponds to node 9 on the tree
"""
assert star_options['type'] in ('star', 'starlong')
job.fileStore.logToMaster('Running STAR on %s' %univ_options['patient'])
work_dir = job.fileStore.getLocalTempDir()
input_files = {
'rna_cutadapt_1.fastq': fastqs['rna_cutadapt_1.fastq'],
'rna_cutadapt_2.fastq': fastqs['rna_cutadapt_2.fastq'],
'star_index.tar.gz': star_options['index_tar']}
input_files = get_files_from_filestore(job, input_files, work_dir,
docker=True)
parameters = ['--runThreadN', str(star_options['n']),
'--genomeDir', input_files['star_index'],
'--outFileNamePrefix', 'rna',
'--readFilesIn',
input_files['rna_cutadapt_1.fastq'],
input_files['rna_cutadapt_2.fastq'],
'--outSAMattributes', 'NH', 'HI', 'AS', 'NM', 'MD',
'--outSAMtype', 'BAM', 'SortedByCoordinate',
'--quantMode', 'TranscriptomeSAM',
'--outSAMunmapped', 'Within']
if star_options['type'] == 'star':
docker_call(tool='star', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'])
else:
docker_call(tool='starlong', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'])
output_files = defaultdict()
for bam_file in ['rnaAligned.toTranscriptome.out.bam',
'rnaAligned.sortedByCoord.out.bam']:
output_files[bam_file] = job.fileStore.writeGlobalFile('/'.join([
work_dir, bam_file]))
job.fileStore.deleteGlobalFile(fastqs['rna_cutadapt_1.fastq'])
job.fileStore.deleteGlobalFile(fastqs['rna_cutadapt_2.fastq'])
index_star = job.wrapJobFn(index_bamfile,
output_files['rnaAligned.sortedByCoord.out.bam'],
'rna', univ_options, disk='120G')
job.addChild(index_star)
output_files['rnaAligned.sortedByCoord.out.bam'] = index_star.rv()
return output_files | This module uses STAR to align the RNA fastqs to the reference
ARGUMENTS
1. fastqs: REFER RETURN VALUE of run_cutadapt()
2. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
3. star_options: Dict of parameters specific to STAR
star_options
|- 'index_tar': <JSid for the STAR index tarball>
+- 'n': <number of threads to allocate>
RETURN VALUES
1. output_files: Dict of aligned bams
output_files
|- 'rnaAligned.toTranscriptome.out.bam': <JSid>
+- 'rnaAligned.sortedByCoord.out.bam': Dict of genome bam + bai
|- 'rna_fix_pg_sorted.bam': <JSid>
+- 'rna_fix_pg_sorted.bam.bai': <JSid>
This module corresponds to node 9 on the tree |
def log_message(self, msg, *args):
"""Hook to log a message."""
if args:
msg = msg % args
self.logger.info(msg) | Hook to log a message. |
def relabel(self, qubits: Qubits) -> 'Channel':
"""Return a copy of this channel with new qubits"""
chan = copy(self)
chan.vec = chan.vec.relabel(qubits)
return chan | Return a copy of this channel with new qubits |
def get(self, name):
"""
Get workspace infos from name.
Return None if workspace doesn't exists.
"""
ws_list = self.list()
return ws_list[name] if name in ws_list else None | Get workspace infos from name.
Return None if workspace doesn't exists. |
def update(self):
"""Replace baseline representations previously registered for update."""
for linenum in reversed(sorted(self.updates)):
self.replace_baseline_repr(linenum, self.updates[linenum])
if not self.TEST_MODE:
path = '{}.update{}'.format(*os.path.splitext(self.path))
with io.open(path, 'w', encoding='utf-8') as fh:
fh.write('\n'.join(self.lines))
print('UPDATE: {}'.format(self.showpath(path))) | Replace baseline representations previously registered for update. |
def set_group_anonymous(self, *, group_id, enable=True):
"""
群组匿名
------------
:param int group_id: 群号
:param bool enable: 是否允许匿名聊天
:return: None
:rtype: None
"""
return super().__getattr__('set_group_anonymous') \
(group_id=group_id, enable=enable) | 群组匿名
------------
:param int group_id: 群号
:param bool enable: 是否允许匿名聊天
:return: None
:rtype: None |
def get_relationship_info(tree, media, image_sizes):
"""
There is a separate file holds the targets to links as well as the targets
for images. Return a dictionary based on the relationship id and the
target.
"""
if tree is None:
return {}
result = {}
# Loop through each relationship.
for el in tree.iter():
el_id = el.get('Id')
if el_id is None:
continue
# Store the target in the result dict.
target = el.get('Target')
if any(
target.lower().endswith(ext) for
ext in IMAGE_EXTENSIONS_TO_SKIP):
continue
if target in media:
image_size = image_sizes.get(el_id)
target = convert_image(media[target], image_size)
# cgi will replace things like & < > with & < >
result[el_id] = cgi.escape(target)
return result | There is a separate file holds the targets to links as well as the targets
for images. Return a dictionary based on the relationship id and the
target. |
def _parse_name(self, name):
"""Internal method to parse a `string` name into constituent
`ifo, `name` and `version` components.
Parameters
----------
name : `str`, `None`
the full name of a `DataQualityFlag` to parse, e.g.
``'H1:DMT-SCIENCE:1'``, or `None` to set all components
to `None`
Returns
-------
(ifo, name, version)
A tuple of component string parts
Raises
------
`ValueError`
If the input ``name`` cannot be parsed into
{ifo}:{tag}:{version} format.
"""
if name is None:
self.ifo = None
self.tag = None
self.version = None
elif re_IFO_TAG_VERSION.match(name):
match = re_IFO_TAG_VERSION.match(name).groupdict()
self.ifo = match['ifo']
self.tag = match['tag']
self.version = int(match['version'])
elif re_IFO_TAG.match(name):
match = re_IFO_TAG.match(name).groupdict()
self.ifo = match['ifo']
self.tag = match['tag']
self.version = None
elif re_TAG_VERSION.match(name):
match = re_TAG_VERSION.match(name).groupdict()
self.ifo = None
self.tag = match['tag']
self.version = int(match['version'])
else:
raise ValueError("No flag name structure detected in '%s', flags "
"should be named as '{ifo}:{tag}:{version}'. "
"For arbitrary strings, use the "
"`DataQualityFlag.label` attribute" % name)
return self.ifo, self.tag, self.version | Internal method to parse a `string` name into constituent
`ifo, `name` and `version` components.
Parameters
----------
name : `str`, `None`
the full name of a `DataQualityFlag` to parse, e.g.
``'H1:DMT-SCIENCE:1'``, or `None` to set all components
to `None`
Returns
-------
(ifo, name, version)
A tuple of component string parts
Raises
------
`ValueError`
If the input ``name`` cannot be parsed into
{ifo}:{tag}:{version} format. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.