code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def Dropout(p=0, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Augmenter that sets a certain fraction of pixels in images to zero.
dtype support::
See ``imgaug.augmenters.arithmetic.MultiplyElementwise``.
Parameters
----------
p : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The probability of any pixel being dropped (i.e. set to zero).
* If a float, then that value will be used for all images. A value
of 1.0 would mean that all pixels will be dropped and 0.0 that
no pixels would be dropped. A value of 0.05 corresponds to 5
percent of all pixels dropped.
* If a tuple ``(a, b)``, then a value p will be sampled from the
range ``a <= p <= b`` per image and be used as the pixel's dropout
probability.
* If a StochasticParameter, then this parameter will be used to
determine per pixel whether it should be dropped (sampled value
of 0) or shouldn't (sampled value of 1).
If you instead want to provide the probability as a stochastic
parameter, you can usually do ``imgaug.parameters.Binomial(1-p)``
to convert parameter `p` to a 0/1 representation.
per_channel : bool or float, optional
Whether to use the same value (is dropped / is not dropped)
for all channels of a pixel (False) or to sample a new value for each
channel (True).
If this value is a float p, then for p percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Dropout(0.02)
drops 2 percent of all pixels.
>>> aug = iaa.Dropout((0.0, 0.05))
drops in each image a random fraction of all pixels, where the fraction
is in the range ``0.0 <= x <= 0.05``.
>>> aug = iaa.Dropout(0.02, per_channel=True)
drops 2 percent of all pixels in a channel-wise fashion, i.e. it is unlikely
for any pixel to have all channels set to zero (black pixels).
>>> aug = iaa.Dropout(0.02, per_channel=0.5)
same as previous example, but the `per_channel` feature is only active
for 50 percent of all images.
"""
if ia.is_single_number(p):
p2 = iap.Binomial(1 - p)
elif ia.is_iterable(p):
ia.do_assert(len(p) == 2)
ia.do_assert(p[0] < p[1])
ia.do_assert(0 <= p[0] <= 1.0)
ia.do_assert(0 <= p[1] <= 1.0)
p2 = iap.Binomial(iap.Uniform(1 - p[1], 1 - p[0]))
elif isinstance(p, iap.StochasticParameter):
p2 = p
else:
raise Exception("Expected p to be float or int or StochasticParameter, got %s." % (type(p),))
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return MultiplyElementwise(p2, per_channel=per_channel, name=name, deterministic=deterministic,
random_state=random_state) | Augmenter that sets a certain fraction of pixels in images to zero.
dtype support::
See ``imgaug.augmenters.arithmetic.MultiplyElementwise``.
Parameters
----------
p : float or tuple of float or imgaug.parameters.StochasticParameter, optional
The probability of any pixel being dropped (i.e. set to zero).
* If a float, then that value will be used for all images. A value
of 1.0 would mean that all pixels will be dropped and 0.0 that
no pixels would be dropped. A value of 0.05 corresponds to 5
percent of all pixels dropped.
* If a tuple ``(a, b)``, then a value p will be sampled from the
range ``a <= p <= b`` per image and be used as the pixel's dropout
probability.
* If a StochasticParameter, then this parameter will be used to
determine per pixel whether it should be dropped (sampled value
of 0) or shouldn't (sampled value of 1).
If you instead want to provide the probability as a stochastic
parameter, you can usually do ``imgaug.parameters.Binomial(1-p)``
to convert parameter `p` to a 0/1 representation.
per_channel : bool or float, optional
Whether to use the same value (is dropped / is not dropped)
for all channels of a pixel (False) or to sample a new value for each
channel (True).
If this value is a float p, then for p percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Dropout(0.02)
drops 2 percent of all pixels.
>>> aug = iaa.Dropout((0.0, 0.05))
drops in each image a random fraction of all pixels, where the fraction
is in the range ``0.0 <= x <= 0.05``.
>>> aug = iaa.Dropout(0.02, per_channel=True)
drops 2 percent of all pixels in a channel-wise fashion, i.e. it is unlikely
for any pixel to have all channels set to zero (black pixels).
>>> aug = iaa.Dropout(0.02, per_channel=0.5)
same as previous example, but the `per_channel` feature is only active
for 50 percent of all images. |
def display_files(self, pcs_files):
'''重新格式化一下文件列表, 去除不需要的信息
这一操作主要是为了便于接下来的查找工作.
文件的path都被提取出来, 然后放到了一个list中.
'''
tree_iters = []
for pcs_file in pcs_files:
path = pcs_file['path']
pixbuf, type_ = self.app.mime.get(path, pcs_file['isdir'],
icon_size=self.ICON_SIZE)
name = os.path.split(path)[NAME_COL]
tooltip = gutil.escape(name)
size = pcs_file.get('size', 0)
if pcs_file['isdir']:
human_size = '--'
else:
human_size = util.get_human_size(pcs_file['size'])[0]
mtime = pcs_file.get('server_mtime', 0)
human_mtime = time.ctime(mtime)
tree_iter = self.liststore.append([
pixbuf, name, path, tooltip, size, human_size,
pcs_file['isdir'], mtime, human_mtime, type_,
json.dumps(pcs_file)
])
tree_iters.append(tree_iter)
cache_path = Config.get_cache_path(self.app.profile['username'])
gutil.async_call(gutil.update_liststore_image, self.liststore,
tree_iters, PIXBUF_COL, pcs_files, cache_path,
self.ICON_SIZE) | 重新格式化一下文件列表, 去除不需要的信息
这一操作主要是为了便于接下来的查找工作.
文件的path都被提取出来, 然后放到了一个list中. |
def from_tuples(cls, tups):
"""
Create a new IntervalTree from an iterable of 2- or 3-tuples,
where the tuple lists begin, end, and optionally data.
"""
ivs = [Interval(*t) for t in tups]
return IntervalTree(ivs) | Create a new IntervalTree from an iterable of 2- or 3-tuples,
where the tuple lists begin, end, and optionally data. |
def local_lru(obj):
""" Property that maps to a key in a local dict-like attribute.
self._cache must be an OrderedDict
self._cache_size must be defined as LRU size
..
class Foo(object):
def __init__(self, cache_size=5000):
self._cache = OrderedDict()
self._cache_size = cache_size
@local_lru
def expensive_meth(self, arg):
pass
..
"""
@wraps(obj)
def memoizer(*args, **kwargs):
instance = args[0]
lru_size = instance._cache_size
if lru_size:
cache = instance._cache
key = str((args, kwargs))
try:
r = cache.pop(key)
cache[key] = r
except KeyError:
if len(cache) >= lru_size:
cache.popitem(last=False)
r = cache[key] = obj(*args, **kwargs)
return r
return obj(*args, **kwargs)
return memoizer | Property that maps to a key in a local dict-like attribute.
self._cache must be an OrderedDict
self._cache_size must be defined as LRU size
..
class Foo(object):
def __init__(self, cache_size=5000):
self._cache = OrderedDict()
self._cache_size = cache_size
@local_lru
def expensive_meth(self, arg):
pass
.. |
def _deallocator(self):
"""Returns the name of the subroutine in ftypes_dealloc.f90 that can
deallocate the array for this Ftype's pointer.
:arg ctype: the string c-type of the variable.
"""
lookup = {
"c_bool": "logical",
"c_double": "double",
"c_double_complex": "complex",
"c_char": "char",
"c_int": "int",
"c_float": "float",
"c_short": "short",
"c_long": "long"
}
ctype = type(self.pointer).__name__.replace("LP_", "").lower()
if ctype in lookup:
return "dealloc_{0}_{1:d}d".format(lookup[ctype], len(self.indices))
else:
return None | Returns the name of the subroutine in ftypes_dealloc.f90 that can
deallocate the array for this Ftype's pointer.
:arg ctype: the string c-type of the variable. |
def get_querystring(self):
"""
Clean existing query string (GET parameters) by removing
arguments that we don't want to preserve (sort parameter, 'page')
"""
to_remove = self.get_querystring_parameter_to_remove()
query_string = urlparse(self.request.get_full_path()).query
query_dict = parse_qs(query_string.encode('utf-8'))
for arg in to_remove:
if arg in query_dict:
del query_dict[arg]
clean_query_string = urlencode(query_dict, doseq=True)
return clean_query_string | Clean existing query string (GET parameters) by removing
arguments that we don't want to preserve (sort parameter, 'page') |
def run(self, deploy_attempted=False):
'''
Execute the routine, the routine can be either:
1. Execute a raw shell command
2. Execute a wrapper func
3. Execute a remote Salt command
If a (re)deploy is needed, then retry the operation after a deploy
attempt
Returns tuple of (stdout, stderr, retcode)
'''
stdout = stderr = retcode = None
if self.opts.get('raw_shell', False):
cmd_str = ' '.join([self._escape_arg(arg) for arg in self.argv])
stdout, stderr, retcode = self.shell.exec_cmd(cmd_str)
elif self.fun in self.wfuncs or self.mine:
stdout, retcode = self.run_wfunc()
else:
stdout, stderr, retcode = self.cmd_block()
return stdout, stderr, retcode | Execute the routine, the routine can be either:
1. Execute a raw shell command
2. Execute a wrapper func
3. Execute a remote Salt command
If a (re)deploy is needed, then retry the operation after a deploy
attempt
Returns tuple of (stdout, stderr, retcode) |
def get_fresh_primary_tumors(biospecimen):
"""Filter biospecimen data to only keep non-FFPE primary tumor samples.
Parameters
----------
biospecimen : `pandas.DataFrame`
The biospecimen data frame. This type of data frame is returned by
:meth:`get_biospecimen_data`.
Returns
-------
`pandas.DataFrame`
The filtered data frame.
"""
df = biospecimen # use shorter variable name
# get rid of FFPE samples
num_before = len(df.index)
df = df.loc[~df['is_ffpe']]
logger.info('Excluded %d files associated with FFPE samples '
'(out of %d files in total).',
num_before - len(df.index), num_before)
# only keep primary tumors
num_before = len(df.index)
df = df.loc[df['sample_type'] == 'Primary Tumor']
logger.info('Excluded %d files not corresponding to primary tumor '
'samples (out of %d files in total).',
num_before - len(df.index), num_before)
return df | Filter biospecimen data to only keep non-FFPE primary tumor samples.
Parameters
----------
biospecimen : `pandas.DataFrame`
The biospecimen data frame. This type of data frame is returned by
:meth:`get_biospecimen_data`.
Returns
-------
`pandas.DataFrame`
The filtered data frame. |
def do_loop_turn(self):
# pylint: disable=too-many-branches, too-many-statements, too-many-locals
"""Loop turn for Arbiter
If not a master daemon, wait for my master death...
Else, run:
* Check satellites are alive
* Check and dispatch (if needed) the configuration
* Get broks and external commands from the satellites
* Push broks and external commands to the satellites
:return: None
"""
# If I am a spare, I only wait for the master arbiter to die...
if not self.is_master:
logger.debug("Waiting for my master death...")
self.wait_for_master_death()
return
if self.loop_count % self.alignak_monitor_period == 1:
self.get_alignak_status(details=True)
# Maybe an external process requested Alignak stop...
if self.kill_request:
logger.info("daemon stop mode ...")
if not self.dispatcher.stop_request_sent:
logger.info("entering daemon stop mode, time before exiting: %s",
self.conf.daemons_stop_timeout)
self.dispatcher.stop_request()
if time.time() > self.kill_timestamp + self.conf.daemons_stop_timeout:
logger.info("daemon stop mode delay reached, immediate stop")
self.dispatcher.stop_request(stop_now=True)
time.sleep(1)
self.interrupted = True
logger.info("exiting...")
if not self.kill_request:
# Main loop treatment
# Try to see if one of my module is dead, and restart previously dead modules
self.check_and_del_zombie_modules()
# Call modules that manage a starting tick pass
_t0 = time.time()
self.hook_point('tick')
statsmgr.timer('hook.tick', time.time() - _t0)
# Look for logging timeperiods activation change (active/inactive)
self.check_and_log_tp_activation_change()
# Check that my daemons are alive
if not self.daemons_check():
if self.conf.daemons_failure_kill:
self.request_stop(message="Some Alignak daemons cannot be checked.",
exit_code=4)
else:
logger.warning("Should have killed my children if "
"'daemons_failure_kill' were set!")
# Now the dispatcher job - check if all daemons are reachable and have a configuration
if not self.daemons_reachability_check():
logger.warning("A new configuration dispatch is required!")
# Prepare and dispatch the monitored configuration
self.configuration_dispatch(self.dispatcher.not_configured)
# Now get things from our module instances
_t0 = time.time()
self.get_objects_from_from_queues()
statsmgr.timer('get-objects-from-queues', time.time() - _t0)
# Maybe our satellites raised new broks. Reap them...
_t0 = time.time()
self.get_broks_from_satellites()
statsmgr.timer('broks.got.time', time.time() - _t0)
# One broker is responsible for our broks, we give him our broks
_t0 = time.time()
self.push_broks_to_broker()
statsmgr.timer('broks.pushed.time', time.time() - _t0)
# # We push our external commands to our schedulers...
# _t0 = time.time()
# self.push_external_commands_to_schedulers()
# statsmgr.timer('external-commands.pushed.time', time.time() - _t0)
if self.system_health and (self.loop_count % self.system_health_period == 1):
perfdatas = []
cpu_count = psutil.cpu_count()
perfdatas.append("'cpu_count'=%d" % cpu_count)
logger.debug(" . cpu count: %d", cpu_count)
cpu_percents = psutil.cpu_percent(percpu=True)
cpu = 1
for percent in cpu_percents:
perfdatas.append("'cpu_%d_percent'=%.2f%%" % (cpu, percent))
cpu += 1
cpu_times_percent = psutil.cpu_times_percent(percpu=True)
cpu = 1
for cpu_times_percent in cpu_times_percent:
logger.debug(" . cpu time percent: %s", cpu_times_percent)
for key in cpu_times_percent._fields:
perfdatas.append(
"'cpu_%d_%s_percent'=%.2f%%" % (cpu, key,
getattr(cpu_times_percent, key)))
cpu += 1
logger.info("%s cpu|%s", self.name, " ".join(perfdatas))
perfdatas = []
disk_partitions = psutil.disk_partitions(all=False)
for disk_partition in disk_partitions:
logger.debug(" . disk partition: %s", disk_partition)
disk = getattr(disk_partition, 'mountpoint')
disk_usage = psutil.disk_usage(disk)
logger.debug(" . disk usage: %s", disk_usage)
for key in disk_usage._fields:
if 'percent' in key:
perfdatas.append("'disk_%s_percent_used'=%.2f%%"
% (disk, getattr(disk_usage, key)))
else:
perfdatas.append("'disk_%s_%s'=%dB"
% (disk, key, getattr(disk_usage, key)))
logger.info("%s disks|%s", self.name, " ".join(perfdatas))
perfdatas = []
virtual_memory = psutil.virtual_memory()
logger.debug(" . memory: %s", virtual_memory)
for key in virtual_memory._fields:
if 'percent' in key:
perfdatas.append("'mem_percent_used_%s'=%.2f%%"
% (key, getattr(virtual_memory, key)))
else:
perfdatas.append("'mem_%s'=%dB"
% (key, getattr(virtual_memory, key)))
swap_memory = psutil.swap_memory()
logger.debug(" . memory: %s", swap_memory)
for key in swap_memory._fields:
if 'percent' in key:
perfdatas.append("'swap_used_%s'=%.2f%%"
% (key, getattr(swap_memory, key)))
else:
perfdatas.append("'swap_%s'=%dB"
% (key, getattr(swap_memory, key)))
logger.info("%s memory|%s", self.name, " ".join(perfdatas)) | Loop turn for Arbiter
If not a master daemon, wait for my master death...
Else, run:
* Check satellites are alive
* Check and dispatch (if needed) the configuration
* Get broks and external commands from the satellites
* Push broks and external commands to the satellites
:return: None |
def load(self, name):
"""Construct an object from a registered factory.
Parameters
----------
name : str
Name with which the factory was registered.
"""
try:
return self._factories[name]()
except KeyError:
raise ValueError(
"no %s factory registered under name %r, options are: %r" %
(self.interface.__name__, name, sorted(self._factories)),
) | Construct an object from a registered factory.
Parameters
----------
name : str
Name with which the factory was registered. |
def show_firmware_version_output_show_firmware_version_node_info_firmware_version_info_secondary_version(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_firmware_version = ET.Element("show_firmware_version")
config = show_firmware_version
output = ET.SubElement(show_firmware_version, "output")
show_firmware_version = ET.SubElement(output, "show-firmware-version")
node_info = ET.SubElement(show_firmware_version, "node-info")
firmware_version_info = ET.SubElement(node_info, "firmware-version-info")
secondary_version = ET.SubElement(firmware_version_info, "secondary-version")
secondary_version.text = kwargs.pop('secondary_version')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def load_extracted(src_dir: str,
patterns="*.npy",
vars_in_cols: bool = True,
index: pd.Series = None):
"""Load data extracted and stored by :py:func:`extract`
Arguments:
src_dir {str} -- The directory where the data is stored.
Keyword Arguments:
patterns {str, or list of str} -- A pattern (str) or list of patterns (list)
to identify the variables to be loaded.
The default loads all variables, i.e. all .npy files. (default: {'*.npy'})
vars_in_cols {bool} -- Return the variables in columns (``True``) or rows ``False``
(default: {True})
index {pd.Series} -- A boolean pandas Series which indicates with ``True`` which samples to
load.
Returns:
pandas.DataFrame -- A dataframe with the data.
"""
def _load(path, index):
if index is None:
arr = np.load(str(path))
else:
arr = np.load(str(path), mmap_mode="r")[index]
return arr
src_dir = Path(src_dir)
paths = []
if isinstance(patterns, str):
patterns = [patterns]
for pat in patterns:
paths += src_dir.glob(pat)
if vars_in_cols:
df_data = {}
for path in paths:
df_data[path.stem] = _load(path, index)
df_data = pd.DataFrame(df_data)
if index is not None:
df_data.index = index.index[index]
else:
df_data = []
for path in paths:
arr = _load(path, index)
df_data.append(pd.DataFrame(np.expand_dims(arr, 0), index=[path.stem]))
df_data = pd.concat(df_data)
if index is not None:
df_data.columns = index.index[index]
return df_data | Load data extracted and stored by :py:func:`extract`
Arguments:
src_dir {str} -- The directory where the data is stored.
Keyword Arguments:
patterns {str, or list of str} -- A pattern (str) or list of patterns (list)
to identify the variables to be loaded.
The default loads all variables, i.e. all .npy files. (default: {'*.npy'})
vars_in_cols {bool} -- Return the variables in columns (``True``) or rows ``False``
(default: {True})
index {pd.Series} -- A boolean pandas Series which indicates with ``True`` which samples to
load.
Returns:
pandas.DataFrame -- A dataframe with the data. |
def dad_status_output_dad_status_entries_message(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
dad_status = ET.Element("dad_status")
config = dad_status
output = ET.SubElement(dad_status, "output")
dad_status_entries = ET.SubElement(output, "dad-status-entries")
message = ET.SubElement(dad_status_entries, "message")
message.text = kwargs.pop('message')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def get_space(self, space_key, expand='description.plain,homepage'):
"""
Get information about a space through space key
:param space_key: The unique space key name
:param expand: OPTIONAL: additional info from description, homepage
:return: Returns the space along with its ID
"""
url = 'rest/api/space/{space_key}?expand={expand}'.format(space_key=space_key,
expand=expand)
return self.get(url) | Get information about a space through space key
:param space_key: The unique space key name
:param expand: OPTIONAL: additional info from description, homepage
:return: Returns the space along with its ID |
def client_id(self, client):
"""
Get a client's ID. Uses GET to /clients?name=<client> interface.
:Args:
* *client*: (str) Client's name
:Returns: (str) Client id
"""
params = {
"name": client
}
response = self._get(url.clients, params=params)
self._check_response(response, 200)
return self._create_response(response).get("client_id") | Get a client's ID. Uses GET to /clients?name=<client> interface.
:Args:
* *client*: (str) Client's name
:Returns: (str) Client id |
def df2chucks(din,chunksize,outd,fn,return_fmt='\t',force=False):
"""
:param return_fmt: '\t': tab-sep file, lly, '.', 'list': returns a list
"""
from os.path import exists#,splitext,dirname,splitext,basename,realpath
from os import makedirs
din.index=range(0,len(din),1)
chunkrange=list(np.arange(0,len(din),chunksize))
chunkrange=list(zip([c+1 if ci!=0 else 0 for ci,c in enumerate(chunkrange)],chunkrange[1:]+[len(din)-1]))
chunk2range={}
for ri,r in enumerate(chunkrange):
chunk2range[ri+1]=r
if not exists(outd):
makedirs(outd)
chunks=[]
chunkps=[]
for chunk in chunk2range:
chunkp='{}/{}_chunk{:08d}.tsv'.format(outd,fn,chunk)
rnge=chunk2range[chunk]
din_=din.loc[rnge[0]:rnge[1],:]
if not exists(chunkp) or force:
if return_fmt=='list':
chunks.append(din_)
else:
din_.to_csv(chunkp,sep=return_fmt)
del din_
chunkps.append(chunkp)
if return_fmt=='list':
return chunks
else:
return chunkps | :param return_fmt: '\t': tab-sep file, lly, '.', 'list': returns a list |
def load(cosmicFiles, tag=None, sat_id=None):
"""
cosmic data load routine, called by pysat
"""
import netCDF4
num = len(cosmicFiles)
# make sure there are files to read
if num != 0:
# call separate load_files routine, segemented for possible
# multiprocessor load, not included and only benefits about 20%
output = pysat.DataFrame(load_files(cosmicFiles, tag=tag, sat_id=sat_id))
output.index = pysat.utils.create_datetime_index(year=output.year,
month=output.month, day=output.day,
uts=output.hour*3600.+output.minute*60.+output.second)
# make sure UTS strictly increasing
output.sort_index(inplace=True)
# use the first available file to pick out meta information
meta = pysat.Meta()
ind = 0
repeat = True
while repeat:
try:
data = netCDF4.Dataset(cosmicFiles[ind])
ncattrsList = data.ncattrs()
for d in ncattrsList:
meta[d] = {'units':'', 'long_name':d}
keys = data.variables.keys()
for key in keys:
meta[key] = {'units':data.variables[key].units,
'long_name':data.variables[key].long_name}
repeat = False
except RuntimeError:
# file was empty, try the next one by incrementing ind
ind+=1
return output, meta
else:
# no data
return pysat.DataFrame(None), pysat.Meta() | cosmic data load routine, called by pysat |
def split_task_parameters(line):
""" Split a string of comma separated words."""
if line is None:
result = []
else:
result = [parameter.strip() for parameter in line.split(",")]
return result | Split a string of comma separated words. |
def add_field(self, fieldname, fieldspec=whoosh_module_fields.TEXT):
"""Add a field in the index of the model.
Args:
fieldname (Text): This parameters register a new field in specified model.
fieldspec (Name, optional): This option adds various options as were described before.
Returns:
TYPE: The new schema after deleted is returned.
"""
self._whoosh.add_field(fieldname, fieldspec)
return self._whoosh.schema | Add a field in the index of the model.
Args:
fieldname (Text): This parameters register a new field in specified model.
fieldspec (Name, optional): This option adds various options as were described before.
Returns:
TYPE: The new schema after deleted is returned. |
def _desy_bookkeeping(self, key, value):
"""Populate the ``_desy_bookkeeping`` key."""
return {
'date': normalize_date(value.get('d')),
'expert': force_single_element(value.get('a')),
'status': value.get('s'),
} | Populate the ``_desy_bookkeeping`` key. |
def safe_join(directory, *pathnames):
"""Safely join `directory` and one or more untrusted `pathnames`. If this
cannot be done, this function returns ``None``.
:param directory: the base directory.
:param pathnames: the untrusted pathnames relative to that directory.
"""
parts = [directory]
for filename in pathnames:
if filename != "":
filename = posixpath.normpath(filename)
for sep in _os_alt_seps:
if sep in filename:
return None
if os.path.isabs(filename) or filename == ".." or filename.startswith("../"):
return None
parts.append(filename)
return posixpath.join(*parts) | Safely join `directory` and one or more untrusted `pathnames`. If this
cannot be done, this function returns ``None``.
:param directory: the base directory.
:param pathnames: the untrusted pathnames relative to that directory. |
def get_transform(offset, scale):
'''
Parameters
----------
offset : pandas.Series
Cartesian ``(x, y)`` coordinate of offset origin.
scale : pandas.Series
Scaling factor for ``x`` and ``y`` dimensions.
Returns
-------
pandas.DataFrame
3x3 transformation matrix resulting in specified `x/y` offset and
scale. **Note that third row label is ``w`` and not ``z``).**
'''
return pd.DataFrame([[scale, 0, offset.x], [0, scale, offset.y],
[0, 0, 1]], index=['x', 'y', 'w']) | Parameters
----------
offset : pandas.Series
Cartesian ``(x, y)`` coordinate of offset origin.
scale : pandas.Series
Scaling factor for ``x`` and ``y`` dimensions.
Returns
-------
pandas.DataFrame
3x3 transformation matrix resulting in specified `x/y` offset and
scale. **Note that third row label is ``w`` and not ``z``).** |
def setex(self, name, time, value):
"""
Set the value of key ``name`` to ``value`` that expires in ``time``
seconds. ``time`` can be represented by an integer or a Python
timedelta object.
"""
if isinstance(time, datetime.timedelta):
time = int(time.total_seconds())
return self.execute_command('SETEX', name, time, value) | Set the value of key ``name`` to ``value`` that expires in ``time``
seconds. ``time`` can be represented by an integer or a Python
timedelta object. |
def expand_item(self, item, open_all=True):
"""Display a node as expanded
:param item: The item to show expanded
:param open_all: Whether all child nodes should be recursively
expanded.
"""
self.expand_row(self._view_path_for(item), open_all) | Display a node as expanded
:param item: The item to show expanded
:param open_all: Whether all child nodes should be recursively
expanded. |
def addChromosome(
self, chrom, tax_id, tax_label=None, build_id=None, build_label=None):
"""
if it's just the chromosome, add it as an instance of a SO:chromosome,
and add it to the genome. If a build is included,
punn the chromosome as a subclass of SO:chromsome, and make the
build-specific chromosome an instance of the supplied chr.
The chr then becomes part of the build or genome.
"""
family = Family(self.graph)
# first, make the chromosome class, at the taxon level
chr_id = makeChromID(str(chrom), tax_id)
if tax_label is not None:
chr_label = makeChromLabel(chrom, tax_label)
else:
chr_label = makeChromLabel(chrom)
genome_id = self.makeGenomeID(tax_id)
self.model.addClassToGraph(chr_id, chr_label, self.globaltt['chromosome'])
self.addTaxon(tax_id, genome_id) # add the taxon to the genome
if build_id is not None:
# the build-specific chromosome
chrinbuild_id = makeChromID(chrom, build_id)
if build_label is None:
build_label = build_id
chrinbuild_label = makeChromLabel(chrom, build_label)
# add the build-specific chromosome as an instance of the chr class
self.model.addIndividualToGraph(chrinbuild_id, chrinbuild_label, chr_id)
# add the build-specific chromosome
# as a member of the build (both ways)
family.addMember(build_id, chrinbuild_id)
family.addMemberOf(chrinbuild_id, build_id)
return | if it's just the chromosome, add it as an instance of a SO:chromosome,
and add it to the genome. If a build is included,
punn the chromosome as a subclass of SO:chromsome, and make the
build-specific chromosome an instance of the supplied chr.
The chr then becomes part of the build or genome. |
def gen_round_trip_stats(round_trips):
"""Generate various round-trip statistics.
Parameters
----------
round_trips : pd.DataFrame
DataFrame with one row per round trip trade.
- See full explanation in round_trips.extract_round_trips
Returns
-------
stats : dict
A dictionary where each value is a pandas DataFrame containing
various round-trip statistics.
See also
--------
round_trips.print_round_trip_stats
"""
stats = {}
stats['pnl'] = agg_all_long_short(round_trips, 'pnl', PNL_STATS)
stats['summary'] = agg_all_long_short(round_trips, 'pnl',
SUMMARY_STATS)
stats['duration'] = agg_all_long_short(round_trips, 'duration',
DURATION_STATS)
stats['returns'] = agg_all_long_short(round_trips, 'returns',
RETURN_STATS)
stats['symbols'] = \
round_trips.groupby('symbol')['returns'].agg(RETURN_STATS).T
return stats | Generate various round-trip statistics.
Parameters
----------
round_trips : pd.DataFrame
DataFrame with one row per round trip trade.
- See full explanation in round_trips.extract_round_trips
Returns
-------
stats : dict
A dictionary where each value is a pandas DataFrame containing
various round-trip statistics.
See also
--------
round_trips.print_round_trip_stats |
def build_rrule(count=None, interval=None, bysecond=None, byminute=None,
byhour=None, byweekno=None, bymonthday=None, byyearday=None,
bymonth=None, until=None, bysetpos=None, wkst=None, byday=None,
freq=None):
"""
Build rrule dictionary for vRecur class.
:param count: int
:param interval: int
:param bysecond: int
:param byminute: int
:param byhour: int
:param byweekno: int
:param bymonthday: int
:param byyearday: int
:param bymonth: int
:param until: datetime
:param bysetpos: int
:param wkst: str, two-letter weekday
:param byday: weekday
:param freq: str, frequency name ('WEEK', 'MONTH', etc)
:return: dict
"""
result = {}
if count is not None:
result['COUNT'] = count
if interval is not None:
result['INTERVAL'] = interval
if bysecond is not None:
result['BYSECOND'] = bysecond
if byminute is not None:
result['BYMINUTE'] = byminute
if byhour is not None:
result['BYHOUR'] = byhour
if byweekno is not None:
result['BYWEEKNO'] = byweekno
if bymonthday is not None:
result['BYMONTHDAY'] = bymonthday
if byyearday is not None:
result['BYYEARDAY'] = byyearday
if bymonth is not None:
result['BYMONTH'] = bymonth
if until is not None:
result['UNTIL'] = until
if bysetpos is not None:
result['BYSETPOS'] = bysetpos
if wkst is not None:
result['WKST'] = wkst
if byday is not None:
result['BYDAY'] = byday
if freq is not None:
if freq not in vRecur.frequencies:
raise ValueError('Frequency value should be one of: {0}'
.format(vRecur.frequencies))
result['FREQ'] = freq
return result | Build rrule dictionary for vRecur class.
:param count: int
:param interval: int
:param bysecond: int
:param byminute: int
:param byhour: int
:param byweekno: int
:param bymonthday: int
:param byyearday: int
:param bymonth: int
:param until: datetime
:param bysetpos: int
:param wkst: str, two-letter weekday
:param byday: weekday
:param freq: str, frequency name ('WEEK', 'MONTH', etc)
:return: dict |
def connect(self, recver):
"""
Rewire:
s1 -> m1 <- r1 --> s2 -> m2 <- r2
To:
s1 -> m1 <- r2
"""
r1 = recver
m1 = r1.middle
s2 = self
m2 = self.middle
r2 = self.other
r2.middle = m1
del m2.sender
del m2.recver
del m1.recver
m1.recver = weakref.ref(r2, m1.on_abandoned)
m1.recver_current = m2.recver_current
del r1.middle
del s2.middle
# if we are currently a chain, return the last recver of our chain
while True:
if getattr(r2, 'downstream', None) is None:
break
r2 = r2.downstream.other
return r2 | Rewire:
s1 -> m1 <- r1 --> s2 -> m2 <- r2
To:
s1 -> m1 <- r2 |
def raw_datastream_old(request, pid, dsid, type=None, repo=None,
headers=None, accept_range_request=False,
as_of_date=None, streaming=False):
'''
.. NOTE::
This version of :meth:`raw_datastream` is deprecated, and you
should update to the new :meth:`raw_datastream`. This version
is still available if you are using a version of Fedora
prior to 3.7 and need the additional functionality.
View to display a raw datastream that belongs to a Fedora Object.
Returns an :class:`~django.http.HttpResponse` with the response content
populated with the content of the datastream. The following HTTP headers
may be included in all the responses:
- Content-Type: mimetype of the datastream in Fedora
- ETag: datastream checksum, as long as the checksum type is not 'DISABLED'
The following HTTP headers may be set if the appropriate content is included
in the datastream metadata:
- Content-MD5: MD5 checksum of the datastream in Fedora, if available
- Content-Length: size of the datastream in Fedora
If either the datastream or object are not found, raises an
:class:`~django.http.Http404` . For any other errors (e.g., permission
denied by Fedora), the exception is re-raised and should be handled elsewhere.
:param request: HttpRequest
:param pid: Fedora object PID
:param dsid: datastream ID to be returned
:param type: custom object type (should extend
:class:`~eulcore.fedora.models.DigitalObject`) (optional)
:param repo: :class:`~eulcore.django.fedora.server.Repository` instance to use,
in case your application requires custom repository initialization (optional)
:param headers: dictionary of additional headers to include in the response
:param accept_range_request: enable HTTP Range requests (disabled by default)
:param as_of_date: access a historical version of the datastream
:param streaming: if True, response will be returned as an instance of
:class:`django.http.StreamingHttpResponse` instead of
:class:`django.http.HttpResponse`; intended for use with large
datastreams, defaults to False.
'''
if repo is None:
repo = Repository()
if headers is None:
headers = {}
get_obj_opts = {}
if type is not None:
get_obj_opts['type'] = type
obj = repo.get_object(pid, **get_obj_opts)
range_request = False
partial_request = False
try:
# NOTE: we could test that pid is actually the requested
# obj.has_requisite_content_models but that would mean
# an extra API call for every datastream but RELS-EXT
# Leaving out for now, for efficiency
ds = obj.getDatastreamObject(dsid, as_of_date=as_of_date)
if ds and ds.exists:
# because retrieving the content is expensive and checking
# headers can be useful, explicitly support HEAD requests
if request.method == 'HEAD':
content = ''
elif accept_range_request and request.META.get('HTTP_RANGE', None) is not None:
rng = request.META['HTTP_RANGE']
logger.debug('HTTP Range request: %s', rng)
range_request = True
kind, numbers = rng.split('=')
if kind != 'bytes':
return HttpResponseRangeNotSatisfiable()
try:
start, end = numbers.split('-')
# NOTE: could potentially be complicated stuff like
# this: 0-999,1002-9999,1-9999
# for now, only support the simple case of a single range
except ValueError:
return HttpResponseRangeNotSatisfiable()
start = int(start)
if not end:
end = ds.info.size - 1
else:
end = int(end)
# ignore requests where end is before start
if end < start:
return HttpResponseRangeNotSatisfiable()
if start == end: # safari sends this (weird?); don't 500
partial_length = 0
partial_request = True
content = ''
# special case for bytes=0-
elif start == 0 and end == (ds.info.size - 1):
# set chunksize and end so range headers can be set on response
# partial_length= ds.info.size
partial_length = end - start
content = ds.get_chunked_content()
# range with *NOT* full content requested
elif start != 0 or end != (ds.info.size - 1):
partial_request = True
partial_length = end - start
# chunksize = min(end - start, 4096)
# sample chunk 370726-3005759
content = get_range_content(ds, start, end)
else:
# get the datastream content in chunks, to handle larger datastreams
content = ds.get_chunked_content()
# not using serialize(pretty=True) for XML/RDF datastreams, since
# we actually want the raw datastream content.
http_response_class = HttpResponse
if streaming:
http_response_class = StreamingHttpResponse
response = http_response_class(content, content_type=ds.mimetype)
# NOTE: might want to use StreamingHttpResponse here, at least
# over some size threshold or for range requests
# if we have a checksum, use it as an ETag
# (but checksum not valid when sending partial content)
if ds.checksum_type != 'DISABLED' and not partial_request:
response['ETag'] = ds.checksum
# ds.created is the creation date of this *version* of the datastream,
# so it is effectively our last-modified date
response['Last-Modified'] = ds.created
# Where available, set content length & MD5 checksum in response headers.
# (but checksum not valid when sending partial content)
if ds.checksum_type == 'MD5' and not partial_request:
response['Content-MD5'] = ds.checksum
if ds.info.size and not range_request:
response['Content-Length'] = ds.info.size
if ds.info.size and accept_range_request:
response['Accept-Ranges'] = 'bytes'
# response['Content-Range'] = '0,%d/%d' % (ds.info.size, ds.info.size)
# if partial request, status should be 206 (even for whole file?)
if range_request:
response.status_code = 206
if partial_request:
response['Content-Length'] = partial_length
else:
response['Content-Length'] = ds.info.size
cont_range = 'bytes %d-%d/%d' % (start, end, ds.info.size)
response['Content-Range'] = cont_range
logger.debug('Content-Length=%s Content-Range=%s',
partial_length, cont_range)
# set any user-specified headers that were passed in
for header, val in six.iteritems(headers):
response[header] = val
# Fix for old Fedora data bug where the `Content-Length`
# was -1. IF it is -1 we're just going to get rid of it.
# Setting the value to an arbitrary value led to issues.
if int(response['Content-Length']) < 0:
del response['Content-Length']
return response
else:
raise Http404
except RequestFailed as rf:
# if object is not the speficied type or if either the object
# or the requested datastream doesn't exist, 404
if rf.code == 404 or \
(type is not None and not obj.has_requisite_content_models) or \
not getattr(obj, dsid).exists or not obj.exists:
raise Http404
# for anything else, re-raise & let Django's default 500 logic handle it
raise | .. NOTE::
This version of :meth:`raw_datastream` is deprecated, and you
should update to the new :meth:`raw_datastream`. This version
is still available if you are using a version of Fedora
prior to 3.7 and need the additional functionality.
View to display a raw datastream that belongs to a Fedora Object.
Returns an :class:`~django.http.HttpResponse` with the response content
populated with the content of the datastream. The following HTTP headers
may be included in all the responses:
- Content-Type: mimetype of the datastream in Fedora
- ETag: datastream checksum, as long as the checksum type is not 'DISABLED'
The following HTTP headers may be set if the appropriate content is included
in the datastream metadata:
- Content-MD5: MD5 checksum of the datastream in Fedora, if available
- Content-Length: size of the datastream in Fedora
If either the datastream or object are not found, raises an
:class:`~django.http.Http404` . For any other errors (e.g., permission
denied by Fedora), the exception is re-raised and should be handled elsewhere.
:param request: HttpRequest
:param pid: Fedora object PID
:param dsid: datastream ID to be returned
:param type: custom object type (should extend
:class:`~eulcore.fedora.models.DigitalObject`) (optional)
:param repo: :class:`~eulcore.django.fedora.server.Repository` instance to use,
in case your application requires custom repository initialization (optional)
:param headers: dictionary of additional headers to include in the response
:param accept_range_request: enable HTTP Range requests (disabled by default)
:param as_of_date: access a historical version of the datastream
:param streaming: if True, response will be returned as an instance of
:class:`django.http.StreamingHttpResponse` instead of
:class:`django.http.HttpResponse`; intended for use with large
datastreams, defaults to False. |
def get_gradient_y(shape, py):
"""Calculate the gradient in the y direction to the line at py
The y gradient operator is a block matrix, where each block is the size of the image width.
The matrix itself is made up of (img_height x img_height) blocks, most of which are all zeros.
"""
import scipy.sparse
height, width = shape
rows = []
empty = scipy.sparse.dia_matrix((width, width))
identity = scipy.sparse.identity(width)
# Create the blocks by row, beginning with blocks leading up to the peak row from the top
for n in range(py):
row = [empty]*n
row += [-identity, identity]
row += [empty]*(height-n-2)
rows.append(row)
# Set all elements in the peak row to zero
rows.append([empty]*height)
# Create the blocks for the rows leading up to the peak row from the bottom
for n in range(height-py-1):
row = [empty]*(py+n)
row += [identity, -identity]
row += [empty]*(height-py-n-2)
rows.append(row)
return scipy.sparse.bmat(rows) | Calculate the gradient in the y direction to the line at py
The y gradient operator is a block matrix, where each block is the size of the image width.
The matrix itself is made up of (img_height x img_height) blocks, most of which are all zeros. |
def loadSenderKey(self, senderKeyName):
"""
:type senderKeyName: SenderKeyName
"""
q = "SELECT record FROM sender_keys WHERE group_id = ? and sender_id = ?"
cursor = self.dbConn.cursor()
cursor.execute(q, (senderKeyName.getGroupId(), senderKeyName.getSender().getName()))
result = cursor.fetchone()
if not result:
return SenderKeyRecord()
return SenderKeyRecord(serialized = result[0]) | :type senderKeyName: SenderKeyName |
def p_for_stmt(p):
"""
for_stmt : FOR ident EQ expr SEMI stmt_list END_STMT
| FOR LPAREN ident EQ expr RPAREN SEMI stmt_list END_STMT
| FOR matrix EQ expr SEMI stmt_list END_STMT
"""
if len(p) == 8:
if not isinstance(p[2], node.ident):
raise_exception(SyntaxError, "Not implemented: for loop", new_lexer)
p[2].props = "I" # I= for-loop iteration variable
p[0] = node.for_stmt(ident=p[2], expr=p[4], stmt_list=p[6]) | for_stmt : FOR ident EQ expr SEMI stmt_list END_STMT
| FOR LPAREN ident EQ expr RPAREN SEMI stmt_list END_STMT
| FOR matrix EQ expr SEMI stmt_list END_STMT |
def mp2q(p, q):
"""Compute the MP2Q measure.
Args:
p (np.ndarray): The unpartitioned repertoire
q (np.ndarray): The partitioned repertoire
"""
p, q = flatten(p), flatten(q)
entropy_dist = 1 / len(p)
return sum(entropy_dist * np.nan_to_num((p ** 2) / q * np.log(p / q))) | Compute the MP2Q measure.
Args:
p (np.ndarray): The unpartitioned repertoire
q (np.ndarray): The partitioned repertoire |
def _generate_subscribe_headers(self):
"""
generate the subscribe stub headers based on the supplied config
:return: i
"""
headers =[]
headers.append(('predix-zone-id', self.eventhub_client.zone_id))
token = self.eventhub_client.service._get_bearer_token()
headers.append(('subscribername', self._config.subscriber_name))
headers.append(('authorization', token[(token.index(' ') + 1):]))
if self._config.topics is []:
headers.append(('topic', self.eventhub_client.zone_id + '_topic'))
else:
for topic in self._config.topics:
headers.append(('topic', topic))
headers.append(('offset-newest', str(self._config.recency == self._config.Recency.NEWEST).lower()))
headers.append(('acks', str(self._config.acks_enabled).lower()))
if self._config.acks_enabled:
headers.append(('max-retries', str(self._config.ack_max_retries)))
headers.append(('retry-interval', str(self._config.ack_retry_interval_seconds) + 's'))
headers.append(('duration-before-retry', str(self._config.ack_duration_before_retry_seconds) + 's'))
if self._config.batching_enabled:
headers.append(('batch-size', str(self._config.batch_size)))
headers.append(('batch-interval', str(self._config.batch_interval_millis) + 'ms'))
return headers | generate the subscribe stub headers based on the supplied config
:return: i |
def authorize_handler(self, f):
"""Authorization handler decorator.
This decorator will sort the parameters and headers out, and
pre validate everything::
@app.route('/oauth/authorize', methods=['GET', 'POST'])
@oauth.authorize_handler
def authorize(*args, **kwargs):
if request.method == 'GET':
# render a page for user to confirm the authorization
return render_template('oauthorize.html')
confirm = request.form.get('confirm', 'no')
return confirm == 'yes'
"""
@wraps(f)
def decorated(*args, **kwargs):
# raise if server not implemented
server = self.server
uri, http_method, body, headers = extract_params()
if request.method in ('GET', 'HEAD'):
redirect_uri = request.args.get('redirect_uri', self.error_uri)
log.debug('Found redirect_uri %s.', redirect_uri)
try:
ret = server.validate_authorization_request(
uri, http_method, body, headers
)
scopes, credentials = ret
kwargs['scopes'] = scopes
kwargs.update(credentials)
except oauth2.FatalClientError as e:
log.debug('Fatal client error %r', e, exc_info=True)
return self._on_exception(e, e.in_uri(self.error_uri))
except oauth2.OAuth2Error as e:
log.debug('OAuth2Error: %r', e, exc_info=True)
# on auth error, we should preserve state if it's present according to RFC 6749
state = request.values.get('state')
if state and not e.state:
e.state = state # set e.state so e.in_uri() can add the state query parameter to redirect uri
return self._on_exception(e, e.in_uri(redirect_uri))
except Exception as e:
log.exception(e)
return self._on_exception(e, add_params_to_uri(
self.error_uri, {'error': str(e)}
))
else:
redirect_uri = request.values.get(
'redirect_uri', self.error_uri
)
try:
rv = f(*args, **kwargs)
except oauth2.FatalClientError as e:
log.debug('Fatal client error %r', e, exc_info=True)
return self._on_exception(e, e.in_uri(self.error_uri))
except oauth2.OAuth2Error as e:
log.debug('OAuth2Error: %r', e, exc_info=True)
# on auth error, we should preserve state if it's present according to RFC 6749
state = request.values.get('state')
if state and not e.state:
e.state = state # set e.state so e.in_uri() can add the state query parameter to redirect uri
return self._on_exception(e, e.in_uri(redirect_uri))
if not isinstance(rv, bool):
# if is a response or redirect
return rv
if not rv:
# denied by user
e = oauth2.AccessDeniedError(state=request.values.get('state'))
return self._on_exception(e, e.in_uri(redirect_uri))
return self.confirm_authorization_request()
return decorated | Authorization handler decorator.
This decorator will sort the parameters and headers out, and
pre validate everything::
@app.route('/oauth/authorize', methods=['GET', 'POST'])
@oauth.authorize_handler
def authorize(*args, **kwargs):
if request.method == 'GET':
# render a page for user to confirm the authorization
return render_template('oauthorize.html')
confirm = request.form.get('confirm', 'no')
return confirm == 'yes' |
def do_run_one(self, args):
'''run a single job'''
work_spec_names = args.from_work_spec or None
worker = SingleWorker(self.config, task_master=self.task_master, work_spec_names=work_spec_names, max_jobs=args.max_jobs)
worker.register()
rc = False
starttime = time.time()
count = 0
try:
while True:
rc = worker.run()
if not rc:
break
count += 1
if (args.limit_seconds is None) and (args.limit_count is None):
# only do one
break
if (args.limit_seconds is not None) and ((time.time() - starttime) >= args.limit_seconds):
break
if (args.limit_count is not None) and (count >= args.limit_count):
break
finally:
worker.unregister()
if not rc:
self.exitcode = 2 | run a single job |
def format_rst(self):
"""
return table in RST format
"""
res = ''
num_cols = len(self.header)
col_width = 25
for _ in range(num_cols):
res += ''.join(['=' for _ in range(col_width - 1)]) + ' '
res += '\n'
for c in self.header:
res += c.ljust(col_width)
res += '\n'
for _ in range(num_cols):
res += ''.join(['=' for _ in range(col_width - 1)]) + ' '
res += '\n'
for row in self.arr:
for c in row:
res += self.force_to_string(c).ljust(col_width)
res += '\n'
for _ in range(num_cols):
res += ''.join(['=' for _ in range(col_width - 1)]) + ' '
res += '\n'
return res | return table in RST format |
def preserve_set_th1_add_directory(state=True):
"""
Context manager to temporarily set TH1.AddDirectory() state
"""
with LOCK:
status = ROOT.TH1.AddDirectoryStatus()
try:
ROOT.TH1.AddDirectory(state)
yield
finally:
ROOT.TH1.AddDirectory(status) | Context manager to temporarily set TH1.AddDirectory() state |
def get_lldp_neighbor_detail_output_lldp_neighbor_detail_remaining_life(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail")
config = get_lldp_neighbor_detail
output = ET.SubElement(get_lldp_neighbor_detail, "output")
lldp_neighbor_detail = ET.SubElement(output, "lldp-neighbor-detail")
local_interface_name_key = ET.SubElement(lldp_neighbor_detail, "local-interface-name")
local_interface_name_key.text = kwargs.pop('local_interface_name')
remote_interface_name_key = ET.SubElement(lldp_neighbor_detail, "remote-interface-name")
remote_interface_name_key.text = kwargs.pop('remote_interface_name')
remaining_life = ET.SubElement(lldp_neighbor_detail, "remaining-life")
remaining_life.text = kwargs.pop('remaining_life')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def unique_slugify(instance, value, slug_field_name='slug', queryset=None,
slug_separator='-'):
"""
Calculates and stores a unique slug of ``value`` for an instance.
``slug_field_name`` should be a string matching the name of the field to
store the slug in (and the field to check against for uniqueness).
``queryset`` usually doesn't need to be explicitly provided - it'll default
to using the ``.all()`` queryset from the model's default manager.
"""
slug_field = instance._meta.get_field(slug_field_name)
slug = getattr(instance, slug_field.attname)
slug_len = slug_field.max_length
# Sort out the initial slug, limiting its length if necessary.
slug = slugify(value)
if slug_len:
slug = slug[:slug_len]
slug = _slug_strip(slug, slug_separator)
original_slug = slug
# Create the queryset if one wasn't explicitly provided and exclude the
# current instance from the queryset.
if queryset is None:
queryset = instance.__class__._default_manager.all()
if instance.pk:
queryset = queryset.exclude(pk=instance.pk)
# Find a unique slug. If one matches, at '-2' to the end and try again
# (then '-3', etc).
next = 2
while not slug or queryset.filter(**{slug_field_name: slug}):
slug = original_slug
end = '%s%s' % (slug_separator, next)
if slug_len and len(slug) + len(end) > slug_len:
slug = slug[:slug_len-len(end)]
slug = _slug_strip(slug, slug_separator)
slug = '%s%s' % (slug, end)
next += 1
setattr(instance, slug_field.attname, slug) | Calculates and stores a unique slug of ``value`` for an instance.
``slug_field_name`` should be a string matching the name of the field to
store the slug in (and the field to check against for uniqueness).
``queryset`` usually doesn't need to be explicitly provided - it'll default
to using the ``.all()`` queryset from the model's default manager. |
def remove_volatile(type_):
"""removes volatile from the type definition
If type is not volatile type, it will be returned as is
"""
nake_type = remove_alias(type_)
if not is_volatile(nake_type):
return type_
else:
if isinstance(nake_type, cpptypes.array_t):
is_c = is_const(nake_type)
if is_c:
base_type_ = nake_type.base.base.base
else:
base_type_ = nake_type.base.base
result_type = base_type_
if is_c:
result_type = cpptypes.const_t(result_type)
return cpptypes.array_t(result_type, nake_type.size)
return nake_type.base | removes volatile from the type definition
If type is not volatile type, it will be returned as is |
def data_received(self, data):
"""Handle data received."""
self.tokenizer.feed(data)
while self.tokenizer.has_tokens():
raw = self.tokenizer.get_next_token()
frame = frame_from_raw(raw)
if frame is not None:
self.frame_received_cb(frame) | Handle data received. |
def target(self):
"""
Find the target name for this build.
:returns: deferred that when fired returns the build task's target
name. If we could not determine the build task, or the task's
target, return None.
"""
task = yield self.task()
if not task:
yield defer.succeed(None)
defer.returnValue(None)
defer.returnValue(task.target) | Find the target name for this build.
:returns: deferred that when fired returns the build task's target
name. If we could not determine the build task, or the task's
target, return None. |
def hpss_demo(input_file, output_harmonic, output_percussive):
'''HPSS demo function.
:parameters:
- input_file : str
path to input audio
- output_harmonic : str
path to save output harmonic (wav)
- output_percussive : str
path to save output harmonic (wav)
'''
# 1. Load the wav file, resample
print('Loading ', input_file)
y, sr = librosa.load(input_file)
# Separate components with the effects module
print('Separating harmonics and percussives... ')
y_harmonic, y_percussive = librosa.effects.hpss(y)
# 5. Save the results
print('Saving harmonic audio to: ', output_harmonic)
librosa.output.write_wav(output_harmonic, y_harmonic, sr)
print('Saving percussive audio to: ', output_percussive)
librosa.output.write_wav(output_percussive, y_percussive, sr) | HPSS demo function.
:parameters:
- input_file : str
path to input audio
- output_harmonic : str
path to save output harmonic (wav)
- output_percussive : str
path to save output harmonic (wav) |
def flexifunction_directory_send(self, target_system, target_component, directory_type, start_index, count, directory_data, force_mavlink1=False):
'''
Acknowldge sucess or failure of a flexifunction command
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
directory_type : 0=inputs, 1=outputs (uint8_t)
start_index : index of first directory entry to write (uint8_t)
count : count of directory entries to write (uint8_t)
directory_data : Settings data (int8_t)
'''
return self.send(self.flexifunction_directory_encode(target_system, target_component, directory_type, start_index, count, directory_data), force_mavlink1=force_mavlink1) | Acknowldge sucess or failure of a flexifunction command
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
directory_type : 0=inputs, 1=outputs (uint8_t)
start_index : index of first directory entry to write (uint8_t)
count : count of directory entries to write (uint8_t)
directory_data : Settings data (int8_t) |
def colorize(self, colormap):
"""Colorize the current image using
*colormap*. Works only on"L" or "LA" images.
"""
if self.mode not in ("L", "LA"):
raise ValueError("Image should be grayscale to colorize")
if self.mode == "LA":
alpha = self.channels[1]
else:
alpha = None
self.channels = colormap.colorize(self.channels[0])
if alpha is not None:
self.channels.append(alpha)
self.mode = "RGBA"
else:
self.mode = "RGB" | Colorize the current image using
*colormap*. Works only on"L" or "LA" images. |
def find(node, filter_=None, stop=None, maxlevel=None):
"""
Search for *single* node matching `filter_` but stop at `maxlevel` or `stop`.
Return matching node.
Args:
node: top node, start searching.
Keyword Args:
filter_: function called with every `node` as argument, `node` is returned if `True`.
stop: stop iteration at `node` if `stop` function returns `True` for `node`.
maxlevel (int): maximum decending in the node hierarchy.
Example tree:
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()).by_attr())
f
|-- b
| |-- a
| +-- d
| |-- c
| +-- e
+-- g
+-- i
+-- h
>>> find(f, lambda node: node.name == "d")
Node('/f/b/d')
>>> find(f, lambda node: node.name == "z")
>>> find(f, lambda node: b in node.path) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
anytree.search.CountError: Expecting 1 elements at maximum, but found 5. (Node('/f/b')... Node('/f/b/d/e'))
"""
return _find(node, filter_=filter_, stop=stop, maxlevel=maxlevel) | Search for *single* node matching `filter_` but stop at `maxlevel` or `stop`.
Return matching node.
Args:
node: top node, start searching.
Keyword Args:
filter_: function called with every `node` as argument, `node` is returned if `True`.
stop: stop iteration at `node` if `stop` function returns `True` for `node`.
maxlevel (int): maximum decending in the node hierarchy.
Example tree:
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()).by_attr())
f
|-- b
| |-- a
| +-- d
| |-- c
| +-- e
+-- g
+-- i
+-- h
>>> find(f, lambda node: node.name == "d")
Node('/f/b/d')
>>> find(f, lambda node: node.name == "z")
>>> find(f, lambda node: b in node.path) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
anytree.search.CountError: Expecting 1 elements at maximum, but found 5. (Node('/f/b')... Node('/f/b/d/e')) |
def combine_metadata(*metadata_objects, **kwargs):
"""Combine the metadata of two or more Datasets.
If any keys are not equal or do not exist in all provided dictionaries
then they are not included in the returned dictionary.
By default any keys with the word 'time' in them and consisting
of datetime objects will be averaged. This is to handle cases where
data were observed at almost the same time but not exactly.
Args:
*metadata_objects: MetadataObject or dict objects to combine
average_times (bool): Average any keys with 'time' in the name
Returns:
dict: the combined metadata
"""
average_times = kwargs.get('average_times', True) # python 2 compatibility (no kwarg after *args)
shared_keys = None
info_dicts = []
# grab all of the dictionary objects provided and make a set of the shared keys
for metadata_object in metadata_objects:
if isinstance(metadata_object, dict):
metadata_dict = metadata_object
elif hasattr(metadata_object, "attrs"):
metadata_dict = metadata_object.attrs
else:
continue
info_dicts.append(metadata_dict)
if shared_keys is None:
shared_keys = set(metadata_dict.keys())
else:
shared_keys &= set(metadata_dict.keys())
# combine all of the dictionaries
shared_info = {}
for k in shared_keys:
values = [nfo[k] for nfo in info_dicts]
any_arrays = any([isinstance(val, np.ndarray) for val in values])
if any_arrays:
if all(np.all(val == values[0]) for val in values[1:]):
shared_info[k] = values[0]
elif 'time' in k and isinstance(values[0], datetime) and average_times:
shared_info[k] = average_datetimes(values)
elif all(val == values[0] for val in values[1:]):
shared_info[k] = values[0]
return shared_info | Combine the metadata of two or more Datasets.
If any keys are not equal or do not exist in all provided dictionaries
then they are not included in the returned dictionary.
By default any keys with the word 'time' in them and consisting
of datetime objects will be averaged. This is to handle cases where
data were observed at almost the same time but not exactly.
Args:
*metadata_objects: MetadataObject or dict objects to combine
average_times (bool): Average any keys with 'time' in the name
Returns:
dict: the combined metadata |
def enable(self):
"""Return True|False if the AMP is enabled in the configuration file (enable=true|false)."""
ret = self.get('enable')
if ret is None:
return False
else:
return ret.lower().startswith('true') | Return True|False if the AMP is enabled in the configuration file (enable=true|false). |
def entrez_sets_of_results(url, retstart=False, retmax=False, count=False) -> Optional[List[requests.Response]]:
"""Gets sets of results back from Entrez.
Entrez can only return 500 results at a time. This creates a generator that gets results by incrementing
retstart and retmax.
Parameters
----------
url : str
The Entrez API url to use.
retstart : int
Return values starting at this index.
retmax : int
Return at most this number of values.
count : int
The number of results returned by EQuery.
Yields
------
requests.Response
"""
if not retstart:
retstart = 0
if not retmax:
retmax = 500
if not count:
count = retmax
retmax = 500 # Entrez can return a max of 500
while retstart < count:
diff = count - retstart
if diff < 500:
retmax = diff
_url = url + f'&retstart={retstart}&retmax={retmax}'
resp = entrez_try_get_multiple_times(_url)
if resp is None:
return
retstart += retmax
yield resp | Gets sets of results back from Entrez.
Entrez can only return 500 results at a time. This creates a generator that gets results by incrementing
retstart and retmax.
Parameters
----------
url : str
The Entrez API url to use.
retstart : int
Return values starting at this index.
retmax : int
Return at most this number of values.
count : int
The number of results returned by EQuery.
Yields
------
requests.Response |
def _checkstatus(status, line):
"""Returns state/status after reading the next line.
The status codes are::
LE07_clip_L1TP_039027_20150529_20160902_01_T1_B1.TIF - BEGIN parsing; 1 - ENTER METADATA GROUP, 2 - READ METADATA LINE,
3 - END METDADATA GROUP, 4 - END PARSING
Permitted Transitions::
LE07_clip_L1TP_039027_20150529_20160902_01_T1_B1.TIF --> 1, LE07_clip_L1TP_039027_20150529_20160902_01_T1_B1.TIF --> 4
1 --> 1, 1 --> 2, 1 --> 3
2 --> 2, 2 --> 3
3 --> 1, 1 --> 3, 3 --> 4
"""
newstatus = 0
if status == 0:
# begin --> enter metadata group OR end
if _islinetype(line, GRPSTART):
newstatus = 1
elif _isfinal(line):
newstatus = 4
elif status == 1:
# enter metadata group --> enter metadata group
# OR add metadata item OR leave metadata group
if _islinetype(line, GRPSTART):
newstatus = 1
elif _islinetype(line, GRPEND):
newstatus = 3
elif _isassignment(line):
# test AFTER start and end, as both are also assignments
newstatus = 2
elif status == 2:
if _islinetype(line, GRPEND):
newstatus = 3
elif _isassignment(line):
# test AFTER start and end, as both are also assignments
newstatus = 2
elif status == 3:
if _islinetype(line, GRPSTART):
newstatus = 1
elif _islinetype(line, GRPEND):
newstatus = 3
elif _isfinal(line):
newstatus = 4
if newstatus != 0:
return newstatus
elif status != 4:
raise MTLParseError(
"Cannot parse the following line after status "
+ "'%s':\n%s" % (STATUSCODE[status], line)) | Returns state/status after reading the next line.
The status codes are::
LE07_clip_L1TP_039027_20150529_20160902_01_T1_B1.TIF - BEGIN parsing; 1 - ENTER METADATA GROUP, 2 - READ METADATA LINE,
3 - END METDADATA GROUP, 4 - END PARSING
Permitted Transitions::
LE07_clip_L1TP_039027_20150529_20160902_01_T1_B1.TIF --> 1, LE07_clip_L1TP_039027_20150529_20160902_01_T1_B1.TIF --> 4
1 --> 1, 1 --> 2, 1 --> 3
2 --> 2, 2 --> 3
3 --> 1, 1 --> 3, 3 --> 4 |
def get_bucket(self, hash_name, bucket_key):
"""
Returns bucket content as list of tuples (vector, data).
"""
results = []
for row in self._get_bucket_rows(hash_name, bucket_key):
val_dict = pickle.loads(row)
# Depending on type (sparse or not) reconstruct vector
if 'sparse' in val_dict:
# Fill these for COO creation
row = []
col = []
data = []
# For each non-zero element, append values
for e in val_dict['nonzeros']:
row.append(e[0]) # Row index
data.append(e[1]) # Value
col.append(0) # Column index (always 0)
# Create numpy arrays for COO creation
coo_row = numpy.array(row, dtype=numpy.int32)
coo_col = numpy.array(col, dtype=numpy.int32)
coo_data = numpy.array(data)
# Create COO sparse vector
vector = scipy.sparse.coo_matrix((coo_data, (coo_row, coo_col)), shape=(val_dict['dim'], 1))
else:
vector = numpy.fromstring(val_dict['vector'],
dtype=val_dict['dtype'])
# Add data to result tuple, if present
results.append((vector, val_dict.get('data')))
return results | Returns bucket content as list of tuples (vector, data). |
def _add_grid_attributes(self, ds):
"""Add model grid attributes to a dataset"""
for name_int, names_ext in self._grid_attrs.items():
ds_coord_name = set(names_ext).intersection(set(ds.coords) |
set(ds.data_vars))
model_attr = getattr(self.model, name_int, None)
if ds_coord_name and (model_attr is not None):
# Force coords to have desired name.
ds = ds.rename({list(ds_coord_name)[0]: name_int})
ds = ds.set_coords(name_int)
if not np.array_equal(ds[name_int], model_attr):
if np.allclose(ds[name_int], model_attr):
msg = ("Values for '{0}' are nearly (but not exactly) "
"the same in the Run {1} and the Model {2}. "
"Therefore replacing Run's values with the "
"model's.".format(name_int, self.run,
self.model))
logging.info(msg)
ds[name_int].values = model_attr.values
else:
msg = ("Model coordinates for '{0}' do not match those"
" in Run: {1} vs. {2}"
"".format(name_int, ds[name_int], model_attr))
logging.info(msg)
else:
# Bring in coord from model object if it exists.
ds = ds.load()
if model_attr is not None:
ds[name_int] = model_attr
ds = ds.set_coords(name_int)
if (self.dtype_in_vert == 'pressure' and
internal_names.PLEVEL_STR in ds.coords):
self.pressure = ds.level
return ds | Add model grid attributes to a dataset |
def straddle(self, strike, expiry):
"""
Metrics for evaluating a straddle.
Parameters
------------
strike : numeric
Strike price.
expiry : date or date str (e.g. '2015-01-01')
Expiration date.
Returns
------------
metrics : DataFrame
Metrics for evaluating straddle.
"""
_rows = {}
_prices = {}
for _opttype in _constants.OPTTYPES:
_rows[_opttype] = _relevant_rows(self.data, (strike, expiry, _opttype,),
"No key for {} strike {} {}".format(expiry, strike, _opttype))
_prices[_opttype] = _getprice(_rows[_opttype])
_eq = _rows[_constants.OPTTYPES[0]].loc[:, 'Underlying_Price'].values[0]
_qt = _rows[_constants.OPTTYPES[0]].loc[:, 'Quote_Time'].values[0]
_index = ['Call', 'Put', 'Credit', 'Underlying_Price', 'Quote_Time']
_vals = np.array([_prices['call'], _prices['put'], _prices['call'] + _prices['put'], _eq, _qt])
return pd.DataFrame(_vals, index=_index, columns=['Value']) | Metrics for evaluating a straddle.
Parameters
------------
strike : numeric
Strike price.
expiry : date or date str (e.g. '2015-01-01')
Expiration date.
Returns
------------
metrics : DataFrame
Metrics for evaluating straddle. |
def stream(self, from_=values.unset, to=values.unset,
date_created_on_or_before=values.unset,
date_created_after=values.unset, limit=None, page_size=None):
"""
Streams FaxInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode from_: Retrieve only those faxes sent from this phone number
:param unicode to: Retrieve only those faxes sent to this phone number
:param datetime date_created_on_or_before: Retrieve only faxes created on or before this date
:param datetime date_created_after: Retrieve only faxes created after this date
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.fax.v1.fax.FaxInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
from_=from_,
to=to,
date_created_on_or_before=date_created_on_or_before,
date_created_after=date_created_after,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit']) | Streams FaxInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode from_: Retrieve only those faxes sent from this phone number
:param unicode to: Retrieve only those faxes sent to this phone number
:param datetime date_created_on_or_before: Retrieve only faxes created on or before this date
:param datetime date_created_after: Retrieve only faxes created after this date
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.fax.v1.fax.FaxInstance] |
def create_app(debug=False):
"""
Create the flask app
:param debug: Use debug mode
:type debug: bool
:return: Created app
:rtype: flask.Flask
"""
app = Flask(__name__)
app.secret_key = str(uuid.uuid4())
app.json_encoder = DateTimeEncoder
app.register_blueprint(page)
Bower(app)
api = Api(app)
api.add_resource(
PluginListAPI,
api_path + "plugins/",
endpoint="APIPlugins"
)
api.add_resource(
PluginAPI,
api_path + "plugins/<plugin_key>",
endpoint="APIPlugin"
)
api.add_resource(
PluginResourceAPI,
api_path + "plugins/<plugin_key>/resources/",
endpoint="APIPluginResource"
)
if debug:
# Setup app for real debuging mode
app.debug = True
# Force update of static files (even in dev mode, browsers still cache)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
@app.after_request
def add_header(response):
response.headers['Cache-Control'] = "public, max-age=0"
return response
return app | Create the flask app
:param debug: Use debug mode
:type debug: bool
:return: Created app
:rtype: flask.Flask |
def make_url(path, protocol=None, hosts=None):
"""Make an URL given a path, and optionally, a protocol and set of
hosts to select from randomly.
:param path: The Archive.org path.
:type path: str
:param protocol: (optional) The HTTP protocol to use. "https://" is
used by default.
:type protocol: str
:param hosts: (optional) A set of hosts. A host will be chosen at
random. The default host is "archive.org".
:type hosts: iterable
:rtype: str
:returns: An Absolute URI.
"""
protocol = 'https://' if not protocol else protocol
host = hosts[random.randrange(len(hosts))] if hosts else 'archive.org'
return protocol + host + path.strip() | Make an URL given a path, and optionally, a protocol and set of
hosts to select from randomly.
:param path: The Archive.org path.
:type path: str
:param protocol: (optional) The HTTP protocol to use. "https://" is
used by default.
:type protocol: str
:param hosts: (optional) A set of hosts. A host will be chosen at
random. The default host is "archive.org".
:type hosts: iterable
:rtype: str
:returns: An Absolute URI. |
def get_users_for_assigned_to():
""" Return a list of users who can be assigned to workflow states """
User = get_user_model()
return User.objects.filter(is_active=True, is_staff=True) | Return a list of users who can be assigned to workflow states |
def get_by_name(self, name):
"""
Gets a SAN Manager by name.
Args:
name: Name of the SAN Manager
Returns:
dict: SAN Manager.
"""
san_managers = self._client.get_all()
result = [x for x in san_managers if x['name'] == name]
return result[0] if result else None | Gets a SAN Manager by name.
Args:
name: Name of the SAN Manager
Returns:
dict: SAN Manager. |
def get_currency(self, code):
"""
Helper function
Returns a dict containing:
shortname (the code)
longname
users - a comma separated list of countries/regions/cities that use it
alternatives - alternative names, e.g. ewro, Quid, Buck
symbol - e.g. £, $
highlight - ?
"""
for currency in self.currencies:
if currency['shortname'] == code:
return currency
raise RuntimeError("%s: %s not found" % (self.name, code)) | Helper function
Returns a dict containing:
shortname (the code)
longname
users - a comma separated list of countries/regions/cities that use it
alternatives - alternative names, e.g. ewro, Quid, Buck
symbol - e.g. £, $
highlight - ? |
def _replace_with_specific_page(page, menu_item):
"""
If ``page`` is a vanilla ``Page` object, replace it with a 'specific'
version of itself. Also update ``menu_item``, depending on whether it's
a ``MenuItem`` object or a ``Page`` object.
"""
if type(page) is Page:
page = page.specific
if isinstance(menu_item, MenuItem):
menu_item.link_page = page
else:
menu_item = page
return page, menu_item | If ``page`` is a vanilla ``Page` object, replace it with a 'specific'
version of itself. Also update ``menu_item``, depending on whether it's
a ``MenuItem`` object or a ``Page`` object. |
def get_attributes(self, dataset):
"""Get available attritbutes from dataset you've selected"""
attributes = self.attributes(dataset)
attr_ = [ (k, v[0]) for k, v in attributes.items()]
return pd.DataFrame(attr_, columns=["Attribute","Description"]) | Get available attritbutes from dataset you've selected |
def _insert_html(self, cursor, html):
""" Inserts HTML using the specified cursor in such a way that future
formatting is unaffected.
"""
cursor.beginEditBlock()
cursor.insertHtml(html)
# After inserting HTML, the text document "remembers" it's in "html
# mode", which means that subsequent calls adding plain text will result
# in unwanted formatting, lost tab characters, etc. The following code
# hacks around this behavior, which I consider to be a bug in Qt, by
# (crudely) resetting the document's style state.
cursor.movePosition(QtGui.QTextCursor.Left,
QtGui.QTextCursor.KeepAnchor)
if cursor.selection().toPlainText() == ' ':
cursor.removeSelectedText()
else:
cursor.movePosition(QtGui.QTextCursor.Right)
cursor.insertText(' ', QtGui.QTextCharFormat())
cursor.endEditBlock() | Inserts HTML using the specified cursor in such a way that future
formatting is unaffected. |
def on_startup(self, callback: callable, polling=True, webhook=True):
"""
Register a callback for the startup process
:param callback:
:param polling: use with polling
:param webhook: use with webhook
"""
self._check_frozen()
if not webhook and not polling:
warn('This action has no effect!', UserWarning)
return
if isinstance(callback, (list, tuple, set)):
for cb in callback:
self.on_startup(cb, polling, webhook)
return
if polling:
self._on_startup_polling.append(callback)
if webhook:
self._on_startup_webhook.append(callback) | Register a callback for the startup process
:param callback:
:param polling: use with polling
:param webhook: use with webhook |
def price_unit(self):
"""Return the price unit."""
currency = self.currency
consumption_unit = self.consumption_unit
if not currency or not consumption_unit:
_LOGGER.error("Could not find price_unit.")
return " "
return currency + "/" + consumption_unit | Return the price unit. |
def sample(self, bqm, num_reads=10):
"""Give random samples for a binary quadratic model.
Variable assignments are chosen by coin flip.
Args:
bqm (:obj:`.BinaryQuadraticModel`):
Binary quadratic model to be sampled from.
num_reads (int, optional, default=10):
Number of reads.
Returns:
:obj:`.SampleSet`
"""
values = tuple(bqm.vartype.value)
def _itersample():
for __ in range(num_reads):
sample = {v: choice(values) for v in bqm.linear}
energy = bqm.energy(sample)
yield sample, energy
samples, energies = zip(*_itersample())
return SampleSet.from_samples(samples, bqm.vartype, energies) | Give random samples for a binary quadratic model.
Variable assignments are chosen by coin flip.
Args:
bqm (:obj:`.BinaryQuadraticModel`):
Binary quadratic model to be sampled from.
num_reads (int, optional, default=10):
Number of reads.
Returns:
:obj:`.SampleSet` |
def plot_welch_perdiogram(x, fs, nperseg):
'''Plot Welch perdiogram
Args
----
x: ndarray
Signal array
fs: float
Sampling frequency
nperseg: float
Length of each data segment in PSD
'''
import scipy.signal
import numpy
# Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
# 0.001V**2/Hz of white noise sampled at 10 kHz.
N = len(x)
time = numpy.arange(N) / fs
# Compute and plot the power spectral density.
f, Pxx_den = scipy.signal.welch(x, fs, nperseg=nperseg)
plt.semilogy(f, Pxx_den)
plt.ylim([0.5e-3, 1])
plt.xlabel('frequency [Hz]')
plt.ylabel('PSD [V**2/Hz]')
plt.show()
# If we average the last half of the spectral density, to exclude the peak,
# we can recover the noise power on the signal.
numpy.mean(Pxx_den[256:]) # 0.0009924865443739191
# compute power spectrum
f, Pxx_spec = scipy.signal.welch(x, fs, 'flattop', 1024,
scaling='spectrum')
plt.figure()
plt.semilogy(f, numpy.sqrt(Pxx_spec))
plt.xlabel('frequency [Hz]')
plt.ylabel('Linear spectrum [V RMS]')
plt.show()
return None | Plot Welch perdiogram
Args
----
x: ndarray
Signal array
fs: float
Sampling frequency
nperseg: float
Length of each data segment in PSD |
def _account_table(accounts):
"""
creates a lookup table (emailaddress -> account) for a given list of
accounts
:param accounts: list of accounts
:type accounts: list of `alot.account.Account`
:returns: hashtable
:rvalue: dict (str -> `alot.account.Account`)
"""
accountmap = {}
for acc in accounts:
accountmap[acc.address] = acc
for alias in acc.aliases:
accountmap[alias] = acc
return accountmap | creates a lookup table (emailaddress -> account) for a given list of
accounts
:param accounts: list of accounts
:type accounts: list of `alot.account.Account`
:returns: hashtable
:rvalue: dict (str -> `alot.account.Account`) |
def fetch_url(url):
"""
Fetch the given url, strip formfeeds and decode
it into the defined encoding
"""
with closing(urllib.urlopen(url)) as f:
if f.code is 200:
response = f.read()
return strip_formfeeds(response).decode(ENCODING) | Fetch the given url, strip formfeeds and decode
it into the defined encoding |
def _readClusterSettings(self):
"""
Reads the cluster settings from the instance metadata, which assumes the instance
is the leader.
"""
instanceMetaData = get_instance_metadata()
region = zoneToRegion(self._zone)
conn = boto.ec2.connect_to_region(region)
instance = conn.get_all_instances(instance_ids=[instanceMetaData["instance-id"]])[0].instances[0]
self.clusterName = str(instance.tags["Name"])
self._buildContext()
self._subnetID = instance.subnet_id
self._leaderPrivateIP = instanceMetaData['local-ipv4'] # this is PRIVATE IP
self._keyName = list(instanceMetaData['public-keys'].keys())[0]
self._tags = self.getLeader().tags
self._masterPublicKey = self._setSSH() | Reads the cluster settings from the instance metadata, which assumes the instance
is the leader. |
def exec_(self, columns=(), by=(), where=(), **kwds):
"""exec from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.exec_('a', where='b > 10').show()
2 3
"""
return self._seu('exec', columns, by, where, kwds) | exec from self
>>> t = q('([]a:1 2 3; b:10 20 30)')
>>> t.exec_('a', where='b > 10').show()
2 3 |
def kube_pod_status_phase(self, metric, scraper_config):
""" Phase a pod is in. """
metric_name = scraper_config['namespace'] + '.pod.status_phase'
status_phase_counter = Counter()
for sample in metric.samples:
# Counts aggregated cluster-wide to avoid no-data issues on pod churn,
# pod granularity available in the service checks
tags = [
self._label_to_tag('namespace', sample[self.SAMPLE_LABELS], scraper_config),
self._label_to_tag('phase', sample[self.SAMPLE_LABELS], scraper_config),
] + scraper_config['custom_tags']
status_phase_counter[tuple(sorted(tags))] += sample[self.SAMPLE_VALUE]
for tags, count in iteritems(status_phase_counter):
self.gauge(metric_name, count, tags=list(tags)) | Phase a pod is in. |
def _extract_html_hex(string):
"""Get the first 3 or 6 hex digits in the string"""
try:
hex_string = string and _hex_regexp().search(string).group(0) or ''
except AttributeError:
return None
if len(hex_string) == 3:
hex_string = hex_string[0] * 2 + hex_string[1] * 2 + hex_string[2] * 2
return hex_string | Get the first 3 or 6 hex digits in the string |
def is_prime(n, mr_rounds=25):
"""Test whether n is probably prime
See <https://en.wikipedia.org/wiki/Primality_test#Probabilistic_tests>
Arguments:
n (int): the number to be tested
mr_rounds (int, optional): number of Miller-Rabin iterations to run;
defaults to 25 iterations, which is what the GMP library uses
Returns:
bool: when this function returns False, `n` is composite (not prime);
when it returns True, `n` is prime with overwhelming probability
"""
# as an optimization we quickly detect small primes using the list above
if n <= first_primes[-1]:
return n in first_primes
# for small dividors (relatively frequent), euclidean division is best
for p in first_primes:
if n % p == 0:
return False
# the actual generic test; give a false prime with probability 2⁻⁵⁰
return miller_rabin(n, mr_rounds) | Test whether n is probably prime
See <https://en.wikipedia.org/wiki/Primality_test#Probabilistic_tests>
Arguments:
n (int): the number to be tested
mr_rounds (int, optional): number of Miller-Rabin iterations to run;
defaults to 25 iterations, which is what the GMP library uses
Returns:
bool: when this function returns False, `n` is composite (not prime);
when it returns True, `n` is prime with overwhelming probability |
def cancel_signature_request(self, signature_request_id):
''' Cancels a SignatureRequest
Cancels a SignatureRequest. After canceling, no one will be able to sign
or access the SignatureRequest or its documents. Only the requester can
cancel and only before everyone has signed.
Args:
signing_request_id (str): The id of the signature request to cancel
Returns:
None
'''
request = self._get_request()
request.post(url=self.SIGNATURE_REQUEST_CANCEL_URL + signature_request_id, get_json=False) | Cancels a SignatureRequest
Cancels a SignatureRequest. After canceling, no one will be able to sign
or access the SignatureRequest or its documents. Only the requester can
cancel and only before everyone has signed.
Args:
signing_request_id (str): The id of the signature request to cancel
Returns:
None |
def sortby(self, variables, ascending=True):
"""
Sort object by labels or values (along an axis).
Sorts the dataset, either along specified dimensions,
or according to values of 1-D dataarrays that share dimension
with calling object.
If the input variables are dataarrays, then the dataarrays are aligned
(via left-join) to the calling object prior to sorting by cell values.
NaNs are sorted to the end, following Numpy convention.
If multiple sorts along the same dimension is
given, numpy's lexsort is performed along that dimension:
https://docs.scipy.org/doc/numpy/reference/generated/numpy.lexsort.html
and the FIRST key in the sequence is used as the primary sort key,
followed by the 2nd key, etc.
Parameters
----------
variables: str, DataArray, or list of either
1D DataArray objects or name(s) of 1D variable(s) in
coords/data_vars whose values are used to sort the dataset.
ascending: boolean, optional
Whether to sort by ascending or descending order.
Returns
-------
sorted: Dataset
A new dataset where all the specified dims are sorted by dim
labels.
"""
from .dataarray import DataArray
if not isinstance(variables, list):
variables = [variables]
else:
variables = variables
variables = [v if isinstance(v, DataArray) else self[v]
for v in variables]
aligned_vars = align(self, *variables, join='left')
aligned_self = aligned_vars[0]
aligned_other_vars = aligned_vars[1:]
vars_by_dim = defaultdict(list)
for data_array in aligned_other_vars:
if data_array.ndim != 1:
raise ValueError("Input DataArray is not 1-D.")
if (data_array.dtype == object and
LooseVersion(np.__version__) < LooseVersion('1.11.0')):
raise NotImplementedError(
'sortby uses np.lexsort under the hood, which requires '
'numpy 1.11.0 or later to support object data-type.')
(key,) = data_array.dims
vars_by_dim[key].append(data_array)
indices = {}
for key, arrays in vars_by_dim.items():
order = np.lexsort(tuple(reversed(arrays)))
indices[key] = order if ascending else order[::-1]
return aligned_self.isel(**indices) | Sort object by labels or values (along an axis).
Sorts the dataset, either along specified dimensions,
or according to values of 1-D dataarrays that share dimension
with calling object.
If the input variables are dataarrays, then the dataarrays are aligned
(via left-join) to the calling object prior to sorting by cell values.
NaNs are sorted to the end, following Numpy convention.
If multiple sorts along the same dimension is
given, numpy's lexsort is performed along that dimension:
https://docs.scipy.org/doc/numpy/reference/generated/numpy.lexsort.html
and the FIRST key in the sequence is used as the primary sort key,
followed by the 2nd key, etc.
Parameters
----------
variables: str, DataArray, or list of either
1D DataArray objects or name(s) of 1D variable(s) in
coords/data_vars whose values are used to sort the dataset.
ascending: boolean, optional
Whether to sort by ascending or descending order.
Returns
-------
sorted: Dataset
A new dataset where all the specified dims are sorted by dim
labels. |
def _gdcm_to_numpy(self, image):
""" Converts a GDCM image to a numpy array.
:param image: GDCM.ImageReader.GetImage()
"""
gdcm_typemap = {
gdcm.PixelFormat.INT8: numpy.int8,
gdcm.PixelFormat.UINT8: numpy.uint8,
gdcm.PixelFormat.UINT16: numpy.uint16,
gdcm.PixelFormat.INT16: numpy.int16,
gdcm.PixelFormat.UINT32: numpy.uint32,
gdcm.PixelFormat.INT32: numpy.int32,
gdcm.PixelFormat.FLOAT32: numpy.float32,
gdcm.PixelFormat.FLOAT64: numpy.float64
}
pixel_format = image.GetPixelFormat().GetScalarType()
if pixel_format in gdcm_typemap:
self.data_type = gdcm_typemap[pixel_format]
else:
raise KeyError(''.join(pixel_format, \
" is not a supported pixel format"))
#dimension = image.GetDimension(0), image.GetDimension(1)
self.dimensions = image.GetDimension(1), image.GetDimension(0)
gdcm_array = image.GetBuffer()
# GDCM returns char* as type str. This converts it to type bytes
if sys.version_info >= (3, 0):
gdcm_array = gdcm_array.encode(sys.getfilesystemencoding(), "surrogateescape")
# use float for accurate scaling
dimensions = image.GetDimensions()
result = numpy.frombuffer(gdcm_array, dtype=self.data_type).astype(float)
if len(dimensions) == 3:
# for cine (animations) there are 3 dims: x, y, number of frames
result.shape = dimensions[2], dimensions[0], dimensions[1]
else:
result.shape = dimensions
return result | Converts a GDCM image to a numpy array.
:param image: GDCM.ImageReader.GetImage() |
def reset(self):
"Initialises all needed variables to default values"
self.metadata = {}
self.items = []
self.spine = []
self.guide = []
self.pages = []
self.toc = []
self.bindings = []
self.IDENTIFIER_ID = 'id'
self.FOLDER_NAME = 'EPUB'
self._id_html = 0
self._id_image = 0
self._id_static = 0
self.title = ''
self.language = 'en'
self.direction = None
self.templates = {
'ncx': NCX_XML,
'nav': NAV_XML,
'chapter': CHAPTER_XML,
'cover': COVER_XML
}
self.add_metadata('OPF', 'generator', '', {
'name': 'generator', 'content': 'Ebook-lib %s' % '.'.join([str(s) for s in VERSION])
})
# default to using a randomly-unique identifier if one is not specified manually
self.set_identifier(str(uuid.uuid4()))
# custom prefixes and namespaces to be set to the content.opf doc
self.prefixes = []
self.namespaces = {} | Initialises all needed variables to default values |
def ParseFromString(self, text, message):
"""Parses a text representation of a protocol message into a message."""
if not isinstance(text, str):
text = text.decode('utf-8')
return self.ParseLines(text.split('\n'), message) | Parses a text representation of a protocol message into a message. |
def update(self, x, w=1):
"""
Update the t-digest with value x and weight w.
"""
self.n += w
if len(self) == 0:
self._add_centroid(Centroid(x, w))
return
S = self._find_closest_centroids(x)
while len(S) != 0 and w > 0:
j = choice(list(range(len(S))))
c_j = S[j]
q = self._compute_centroid_quantile(c_j)
# This filters the out centroids that do not satisfy the second part
# of the definition of S. See original paper by Dunning.
if c_j.count + w > self._threshold(q):
S.pop(j)
continue
delta_w = min(self._threshold(q) - c_j.count, w)
self._update_centroid(c_j, x, delta_w)
w -= delta_w
S.pop(j)
if w > 0:
self._add_centroid(Centroid(x, w))
if len(self) > self.K / self.delta:
self.compress()
return | Update the t-digest with value x and weight w. |
def parseValue(self, value):
"""Parse the given value and return result."""
if self.isVector():
return list(map(self._pythonType, value.split(',')))
if self.typ == 'boolean':
return _parseBool(value)
return self._pythonType(value) | Parse the given value and return result. |
def get_routers(self, context, router_ids=None, hd_ids=None):
"""Make a remote process call to retrieve the sync data for routers.
:param context: session context
:param router_ids: list of routers to fetch
:param hd_ids: hosting device ids, only routers assigned to these
hosting devices will be returned.
"""
cctxt = self.client.prepare(version='1.1')
return cctxt.call(context, 'cfg_sync_routers', host=self.host,
router_ids=router_ids, hosting_device_ids=hd_ids) | Make a remote process call to retrieve the sync data for routers.
:param context: session context
:param router_ids: list of routers to fetch
:param hd_ids: hosting device ids, only routers assigned to these
hosting devices will be returned. |
def key(self, key, strictkey=None):
"""
Return a chunk referencing a key in a mapping with the name 'key'.
"""
return self._select(self._pointer.key(key, strictkey)) | Return a chunk referencing a key in a mapping with the name 'key'. |
def set_attributes(self, doc, fields, # pylint: disable=arguments-differ
parent_type=None, catch_all_field=None):
"""
:param UnionField catch_all_field: The field designated as the
catch-all. This field should be a member of the list of fields.
See :meth:`Composite.set_attributes` for parameter definitions.
"""
if parent_type:
assert isinstance(parent_type, Union)
super(Union, self).set_attributes(doc, fields, parent_type)
self.catch_all_field = catch_all_field
self.parent_type = parent_type | :param UnionField catch_all_field: The field designated as the
catch-all. This field should be a member of the list of fields.
See :meth:`Composite.set_attributes` for parameter definitions. |
def insert_many(self, rows, chunk_size=1000, ensure=None, types=None):
"""Add many rows at a time.
This is significantly faster than adding them one by one. Per default
the rows are processed in chunks of 1000 per commit, unless you specify
a different ``chunk_size``.
See :py:meth:`insert() <dataset.Table.insert>` for details on
the other parameters.
::
rows = [dict(name='Dolly')] * 10000
table.insert_many(rows)
"""
chunk = []
for row in rows:
row = self._sync_columns(row, ensure, types=types)
chunk.append(row)
if len(chunk) == chunk_size:
chunk = pad_chunk_columns(chunk)
self.table.insert().execute(chunk)
chunk = []
if len(chunk):
chunk = pad_chunk_columns(chunk)
self.table.insert().execute(chunk) | Add many rows at a time.
This is significantly faster than adding them one by one. Per default
the rows are processed in chunks of 1000 per commit, unless you specify
a different ``chunk_size``.
See :py:meth:`insert() <dataset.Table.insert>` for details on
the other parameters.
::
rows = [dict(name='Dolly')] * 10000
table.insert_many(rows) |
def route(self, fn, **kwargs):
""" Route helper : apply fn function but keep the calling object, *ie* kwargs, for other functions
:param fn: Function to run the route with
:type fn: function
:param kwargs: Parsed url arguments
:type kwargs: dict
:return: HTTP Response with rendered template
:rtype: flask.Response
"""
new_kwargs = fn(**kwargs)
# If there is no templates, we assume that the response is finalized :
if not isinstance(new_kwargs, dict):
return new_kwargs
new_kwargs["url"] = kwargs
return self.render(**new_kwargs) | Route helper : apply fn function but keep the calling object, *ie* kwargs, for other functions
:param fn: Function to run the route with
:type fn: function
:param kwargs: Parsed url arguments
:type kwargs: dict
:return: HTTP Response with rendered template
:rtype: flask.Response |
def _formatFilepaths(self):
"""
Join dirnames and filenames from config.
"""
likedir=self['output']['likedir']
self.likefile = join(likedir,self['output']['likefile'])
self.mergefile = join(likedir,self['output']['mergefile'])
self.roifile = join(likedir,self['output']['roifile'])
searchdir=self['output']['searchdir']
self.labelfile = join(searchdir,self['output']['labelfile'])
self.objectfile = join(searchdir,self['output']['objectfile'])
self.assocfile = join(searchdir,self['output']['assocfile'])
self.candfile = join(searchdir,self['output']['candfile'])
mcmcdir=self['output']['mcmcdir']
self.mcmcfile = join(mcmcdir,self['output']['mcmcfile']) | Join dirnames and filenames from config. |
def platform_detect():
"""Detect if running on the Raspberry Pi or Beaglebone Black and return the
platform type. Will return RASPBERRY_PI, BEAGLEBONE_BLACK, or UNKNOWN."""
# Handle Raspberry Pi
pi = pi_version()
if pi is not None:
return RASPBERRY_PI
# Handle Beaglebone Black
# TODO: Check the Beaglebone Black /proc/cpuinfo value instead of reading
# the platform.
plat = platform.platform()
if plat.lower().find('armv7l-with-debian') > -1:
return BEAGLEBONE_BLACK
elif plat.lower().find('armv7l-with-ubuntu') > -1:
return BEAGLEBONE_BLACK
elif plat.lower().find('armv7l-with-glibc2.4') > -1:
return BEAGLEBONE_BLACK
elif plat.lower().find('tegra-aarch64-with-ubuntu') > -1:
return JETSON_NANO
# Handle Minnowboard
# Assumption is that mraa is installed
try:
import mraa
if mraa.getPlatformName()=='MinnowBoard MAX':
return MINNOWBOARD
except ImportError:
pass
# Couldn't figure out the platform, just return unknown.
return UNKNOWN | Detect if running on the Raspberry Pi or Beaglebone Black and return the
platform type. Will return RASPBERRY_PI, BEAGLEBONE_BLACK, or UNKNOWN. |
def leave_scope(self):
""" Ends a function body and pops current scope out of the symbol table.
"""
def entry_size(entry):
""" For local variables and params, returns the real variable or
local array size in bytes
"""
if entry.scope == SCOPE.global_ or \
entry.is_aliased: # aliases or global variables = 0
return 0
if entry.class_ != CLASS.array:
return entry.size
return entry.memsize
for v in self.table[self.current_scope].values(filter_by_opt=False):
if not v.accessed:
if v.scope == SCOPE.parameter:
kind = 'Parameter'
v.accessed = True # HINT: Parameters must always be present even if not used!
warning_not_used(v.lineno, v.name, kind=kind)
entries = sorted(self.table[self.current_scope].values(filter_by_opt=True), key=entry_size)
offset = 0
for entry in entries: # Symbols of the current level
if entry.class_ is CLASS.unknown:
self.move_to_global_scope(entry.name)
if entry.class_ in (CLASS.function, CLASS.label, CLASS.type_):
continue
# Local variables offset
if entry.class_ == CLASS.var and entry.scope == SCOPE.local:
if entry.alias is not None: # alias of another variable?
if entry.offset is None:
entry.offset = entry.alias.offset
else:
entry.offset = entry.alias.offset - entry.offset
else:
offset += entry_size(entry)
entry.offset = offset
if entry.class_ == CLASS.array and entry.scope == SCOPE.local:
entry.offset = entry_size(entry) + offset
offset = entry.offset
self.mangle = self[self.current_scope].parent_mangle
self.table.pop()
global_.LOOPS = global_.META_LOOPS.pop()
return offset | Ends a function body and pops current scope out of the symbol table. |
def buscar_healthchecks(self, id_ambiente_vip):
"""Search healthcheck by environmentvip_id
:return: Dictionary with the following structure:
::
{'healthcheck_opt': [{'name': <name>, 'id': <id>},...]}
:raise InvalidParameterError: Environment VIP identifier is null and invalid.
:raise EnvironmentVipNotFoundError: Environment VIP not registered.
:raise InvalidParameterError: id_ambiente_vip is null and invalid.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
url = 'environment-vip/get/healthcheck/' + str(id_ambiente_vip)
code, xml = self.submit(None, 'GET', url)
return self.response(code, xml, ['healthcheck_opt']) | Search healthcheck by environmentvip_id
:return: Dictionary with the following structure:
::
{'healthcheck_opt': [{'name': <name>, 'id': <id>},...]}
:raise InvalidParameterError: Environment VIP identifier is null and invalid.
:raise EnvironmentVipNotFoundError: Environment VIP not registered.
:raise InvalidParameterError: id_ambiente_vip is null and invalid.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response. |
def update_mode(arg_namespace):
"""Check command line arguments and run update function."""
try:
updater.update(custom_sources=arg_namespace.custom)
except (PermissionError, FileNotFoundError) as exception:
if isinstance(exception, PermissionError):
print('No write permission for current working directory.')
if isinstance(exception, FileNotFoundError):
print('Necessary resources for updating not found in current '
'working directory.') | Check command line arguments and run update function. |
def to_netjson(self, remove_block=True):
"""
Converts the intermediate data structure (``self.intermediate_datra``)
to a NetJSON configuration dictionary (``self.config``)
"""
result = OrderedDict()
# copy list
intermediate_data = list(self.intermediate_data[self.intermediate_key])
# iterate over copied intermediate data structure
for index, block in enumerate(intermediate_data):
if self.should_skip_block(block):
continue
# remove processed block from intermediate data
# this makes processing remaining blocks easier
# for some backends
if remove_block:
self.intermediate_data[self.intermediate_key].remove(block)
# specific converter operations are delegated
# to the ``to_netjson_loop`` method
result = self.to_netjson_loop(block, result, index + 1)
# return result, expects dict
return result | Converts the intermediate data structure (``self.intermediate_datra``)
to a NetJSON configuration dictionary (``self.config``) |
def update(self, num_iid, session, **kwargs):
'''taobao.item.update 更新商品信息
根据传入的num_iid更新对应的商品的数据 传入的num_iid所对应的商品必须属于当前会话的用户 商品的属性和sku的属性有包含的关系,商品的价格要位于sku的价格区间之中(例如,sku价格有5元、10元两种,那么商品的价格就需要大于等于5元,小于等于10元,否则更新商品会失败) 商品的类目和商品的价格、sku的价格都有一定的相关性(具体的关系要通过类目属性查询接口获得) 当关键属性值更新为“其他”的时候,需要输入input_pids和input_str商品才能更新成功。'''
request = TOPRequest('taobao.item.update')
request['num_iid'] = num_iid
for k, v in kwargs.iteritems():
if k not in ('cid', 'props', 'num', 'price', 'title', 'desc', 'location_state', 'location_city', 'post_fee', 'express_fee', 'ems_fee', 'list_time', 'increment', 'image', 'stuff_status', 'auction_point', 'property_alias', 'input_pids', 'sku_quantities', 'sku_prices', 'sku_properties', 'seller_cids', 'postage_id', 'outer_id', 'product_id', 'pic_path', 'auto_fill', 'sku_outer_ids', 'is_taobao', 'is_ex', 'is_3D', 'is_replace_sku', 'input_str', 'lang', 'has_discount', 'has_showcase', 'approve_status', 'freight_payer', 'valid_thru', 'has_invoice', 'has_warranty', 'after_sale_id', 'sell_promise', 'cod_postage_id', 'is_lightning_consignment', 'weight', 'is_xinpin', 'sub_stock') and v==None: continue
if k == 'location_state': k = 'location.state'
if k == 'location_city': k = 'location.city'
request[k] = v
self.create(self.execute(request, session)['item'])
return self | taobao.item.update 更新商品信息
根据传入的num_iid更新对应的商品的数据 传入的num_iid所对应的商品必须属于当前会话的用户 商品的属性和sku的属性有包含的关系,商品的价格要位于sku的价格区间之中(例如,sku价格有5元、10元两种,那么商品的价格就需要大于等于5元,小于等于10元,否则更新商品会失败) 商品的类目和商品的价格、sku的价格都有一定的相关性(具体的关系要通过类目属性查询接口获得) 当关键属性值更新为“其他”的时候,需要输入input_pids和input_str商品才能更新成功。 |
def add_auth(self, req, **kwargs):
"""
Add AWS3 authentication to a request.
:type req: :class`boto.connection.HTTPRequest`
:param req: The HTTPRequest object.
"""
# This could be a retry. Make sure the previous
# authorization header is removed first.
if 'X-Amzn-Authorization' in req.headers:
del req.headers['X-Amzn-Authorization']
req.headers['X-Amz-Date'] = formatdate(usegmt=True)
req.headers['X-Amz-Security-Token'] = self._provider.security_token
string_to_sign, headers_to_sign = self.string_to_sign(req)
boto.log.debug('StringToSign:\n%s' % string_to_sign)
hash_value = sha256(string_to_sign).digest()
b64_hmac = self.sign_string(hash_value)
s = "AWS3 AWSAccessKeyId=%s," % self._provider.access_key
s += "Algorithm=%s," % self.algorithm()
s += "SignedHeaders=%s," % ';'.join(headers_to_sign)
s += "Signature=%s" % b64_hmac
req.headers['X-Amzn-Authorization'] = s | Add AWS3 authentication to a request.
:type req: :class`boto.connection.HTTPRequest`
:param req: The HTTPRequest object. |
def GetBalance(self, asset_id, watch_only=0):
"""
Get the balance of a specific token by its asset id.
Args:
asset_id (NEP5Token|TransactionOutput): an instance of type neo.Wallets.NEP5Token or neo.Core.TX.Transaction.TransactionOutput to get the balance from.
watch_only (bool): True, to limit to watch only wallets.
Returns:
Fixed8: total balance.
"""
total = Fixed8(0)
if type(asset_id) is NEP5Token.NEP5Token:
return self.GetTokenBalance(asset_id, watch_only)
for coin in self.GetCoins():
if coin.Output.AssetId == asset_id:
if coin.State & CoinState.Confirmed > 0 and \
coin.State & CoinState.Spent == 0 and \
coin.State & CoinState.Locked == 0 and \
coin.State & CoinState.Frozen == 0 and \
coin.State & CoinState.WatchOnly == watch_only:
total = total + coin.Output.Value
return total | Get the balance of a specific token by its asset id.
Args:
asset_id (NEP5Token|TransactionOutput): an instance of type neo.Wallets.NEP5Token or neo.Core.TX.Transaction.TransactionOutput to get the balance from.
watch_only (bool): True, to limit to watch only wallets.
Returns:
Fixed8: total balance. |
def _build(self, src, path, dest, mtime):
"""Calls `build` after testing that at least one output file (as
returned by `_outputs()` does not exist or is older than `mtime`. If
the build fails, the build time is recorded and no other builds will be
attempted on `input` until this method is called with a larger mtime.
"""
input_path = os.path.join(src, path)
output_paths = [os.path.join(dest, output) for output in
self._outputs(src, path)]
if path in self.failures and mtime <= self.failures[path]:
# the input file was not modified since the last recorded failure
# as such, assume that the task will fail again and skip it
return
for output in output_paths:
try:
if \
os.path.exists(output) and \
mtime <= os.path.getmtime(output):
# output file exists and is up to date; no need to trigger
# build on this file's expense
continue
except EnvironmentError:
# usually happens when the output file has been deleted in
# between the call to exists and the call to getmtime
pass
start = time.time()
try:
self.build(input_path, output_paths)
except Exception as e:
if isinstance(e, EnvironmentError):
# non-zero return code in sub-process; only show message
logging.error("{0} failed after {1:.2f}s: {2}".format(
termcolor.colored(path, "red", attrs=["bold"]),
time.time() - start, e.args[0]
))
else:
# probably a bug in the handler; show full trace
logging.exception("{0} failed after {1:.2f}s".format(
termcolor.colored(path, "red", attrs=["bold"]),
time.time() - start
))
self.failures[path] = start
else:
logging.info("{0} completed in {1:.2f}s".format(
termcolor.colored(path, "green", attrs=["bold"]),
time.time() - start
))
self.failures.pop(path, None)
break | Calls `build` after testing that at least one output file (as
returned by `_outputs()` does not exist or is older than `mtime`. If
the build fails, the build time is recorded and no other builds will be
attempted on `input` until this method is called with a larger mtime. |
def pkg_blacklist(self):
"""Manage blacklist packages
"""
blacklist = BlackList()
options = [
"-b",
"--blacklist"
]
flag = [
"--add",
"--remove"
]
command = ["list"]
if (len(self.args) == 2 and self.args[0] in options and
self.args[1] == command[0]):
blacklist.listed()
elif (len(self.args) > 2 and self.args[0] in options and
flag[0] in self.args):
self.args.remove(flag[0])
blacklist.add(self.args[1:])
elif (len(self.args) == 3 and self.args[0] in options and
"ALL" in self.args and flag[1] in self.args):
self.args.remove(flag[1])
blacklist.remove(blacklist.get_black())
elif (len(self.args) > 2 and self.args[0] in options and
flag[1] in self.args):
self.args.remove(flag[1])
blacklist.remove(self.args[1:])
else:
usage("") | Manage blacklist packages |
def get_ntlm_response(self, flags, challenge, target_info=None, channel_binding=None):
"""
Computes the 24 byte NTLM challenge response given the 8 byte server challenge, along with the session key.
If NTLMv2 is used, the TargetInfo structure must be supplied, the updated TargetInfo structure will be returned
:param challenge: The 8-byte challenge message generated by the server
:return: A tuple containing the 24 byte NTLM Hash, Session Key and TargetInfo
"""
# TODO: IMPLEMENT THE FOLLOWING FEATURES
# If NTLM v2 authentication is used and the CHALLENGE_MESSAGE does not contain both MsvAvNbComputerName and
# MsvAvNbDomainName AVPairs and either Integrity is TRUE or Confidentiality is TRUE, then return STATUS_LOGON_FAILURE.
# If NTLM v2 authentication is used and the CHALLENGE_MESSAGE contains a TargetInfo field, the client SHOULD NOT send
# the LmChallengeResponse and SHOULD set the LmChallengeResponseLen and LmChallengeResponseMaxLen fields in the
# AUTHENTICATE_MESSAGE to zero.
# If lm compatibility level is 3 or lower, but the server negotiated NTLM2, generate an
# NTLM2 response in preference to the weaker NTLMv1.
if flags & NegotiateFlag.NTLMSSP_NTLM2_KEY and self._lm_compatibility < 3:
response, key = PasswordAuthentication.get_ntlm2_response(self._password, challenge, self._client_challenge)
elif 0 <= self._lm_compatibility < 3:
response, key = PasswordAuthentication.get_ntlmv1_response(self._password, challenge)
else:
# We should use the timestamp included in TargetInfo, if no timestamp is set we generate one and add it to
# the outgoing TargetInfo. If the timestamp is set, we should also set the MIC flag
if target_info is None:
target_info = TargetInfo()
if target_info[TargetInfo.NTLMSSP_AV_TIME] is None:
timestamp = PasswordAuthentication._get_ntlm_timestamp()
else:
# TODO: If the CHALLENGE_MESSAGE TargetInfo field (section 2.2.1.2) has an MsvAvTimestamp present,
# TODO: the client SHOULD provide a MIC.
timestamp = target_info[TargetInfo.NTLMSSP_AV_TIME][1]
#target_info[TargetInfo.NTLMSSP_AV_FLAGS] = struct.pack('<I', 2)
# Calculating channel bindings is poorly documented. It is implemented in winrmlib, and needs to be
# moved here
# if self._av_channel_bindings is True and channel_binding is not None:
# target_info[TargetInfo.NTLMSSP_AV_CHANNEL_BINDINGS] = channel_binding
response, key, target_info = PasswordAuthentication.get_ntlmv2_response(
self._domain, self._username, self._password.encode('utf-16le'), challenge,
self._client_challenge, timestamp, target_info)
return response, key, target_info | Computes the 24 byte NTLM challenge response given the 8 byte server challenge, along with the session key.
If NTLMv2 is used, the TargetInfo structure must be supplied, the updated TargetInfo structure will be returned
:param challenge: The 8-byte challenge message generated by the server
:return: A tuple containing the 24 byte NTLM Hash, Session Key and TargetInfo |
def _VarintBytes(value):
"""Encode the given integer as a varint and return the bytes. This is only
called at startup time so it doesn't need to be fast."""
pieces = []
_EncodeVarint(pieces.append, value)
return b"".join(pieces) | Encode the given integer as a varint and return the bytes. This is only
called at startup time so it doesn't need to be fast. |
def find_one(self, cls, id):
"""Required functionality."""
try:
db_result = self.get_class_table(cls).lookup(id)
except ItemNotFound:
# according to docs, this shouldn't be required, but it IS
db_result = None
if not db_result:
return None
obj = cls.from_data(db_result['value'])
return obj | Required functionality. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.