code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def _to_DOM(self):
"""
Dumps object data to a fully traversable DOM representation of the
object.
:returns: a ``xml.etree.Element`` object
"""
root_node = ET.Element("forecast")
interval_node = ET.SubElement(root_node, "interval")
interval_node.text = self._interval
reception_time_node = ET.SubElement(root_node, "reception_time")
reception_time_node.text = str(self._reception_time)
root_node.append(self._location._to_DOM())
weathers_node = ET.SubElement(root_node, "weathers")
for weather in self:
weathers_node.append(weather._to_DOM())
return root_node | Dumps object data to a fully traversable DOM representation of the
object.
:returns: a ``xml.etree.Element`` object | Below is the the instruction that describes the task:
### Input:
Dumps object data to a fully traversable DOM representation of the
object.
:returns: a ``xml.etree.Element`` object
### Response:
def _to_DOM(self):
"""
Dumps object data to a fully traversable DOM representation of the
object.
:returns: a ``xml.etree.Element`` object
"""
root_node = ET.Element("forecast")
interval_node = ET.SubElement(root_node, "interval")
interval_node.text = self._interval
reception_time_node = ET.SubElement(root_node, "reception_time")
reception_time_node.text = str(self._reception_time)
root_node.append(self._location._to_DOM())
weathers_node = ET.SubElement(root_node, "weathers")
for weather in self:
weathers_node.append(weather._to_DOM())
return root_node |
def value(self):
"""
Return the current evaluation of a condition statement
"""
return ''.join(map(str, self.evaluate(self.trigger.user))) | Return the current evaluation of a condition statement | Below is the the instruction that describes the task:
### Input:
Return the current evaluation of a condition statement
### Response:
def value(self):
"""
Return the current evaluation of a condition statement
"""
return ''.join(map(str, self.evaluate(self.trigger.user))) |
def get_monophyletic(self, values, target_attr):
"""
Returns a list of nodes matching the provided monophyly
criteria. For a node to be considered a match, all
`target_attr` values within and node, and exclusively them,
should be grouped.
:param values: a set of values for which monophyly is
expected.
:param target_attr: node attribute being used to check
monophyly (i.e. species for species trees, names for gene
family trees).
"""
if type(values) != set:
values = set(values)
n2values = self.get_cached_content(store_attr=target_attr)
is_monophyletic = lambda node: n2values[node] == values
for match in self.iter_leaves(is_leaf_fn=is_monophyletic):
if is_monophyletic(match):
yield match | Returns a list of nodes matching the provided monophyly
criteria. For a node to be considered a match, all
`target_attr` values within and node, and exclusively them,
should be grouped.
:param values: a set of values for which monophyly is
expected.
:param target_attr: node attribute being used to check
monophyly (i.e. species for species trees, names for gene
family trees). | Below is the the instruction that describes the task:
### Input:
Returns a list of nodes matching the provided monophyly
criteria. For a node to be considered a match, all
`target_attr` values within and node, and exclusively them,
should be grouped.
:param values: a set of values for which monophyly is
expected.
:param target_attr: node attribute being used to check
monophyly (i.e. species for species trees, names for gene
family trees).
### Response:
def get_monophyletic(self, values, target_attr):
"""
Returns a list of nodes matching the provided monophyly
criteria. For a node to be considered a match, all
`target_attr` values within and node, and exclusively them,
should be grouped.
:param values: a set of values for which monophyly is
expected.
:param target_attr: node attribute being used to check
monophyly (i.e. species for species trees, names for gene
family trees).
"""
if type(values) != set:
values = set(values)
n2values = self.get_cached_content(store_attr=target_attr)
is_monophyletic = lambda node: n2values[node] == values
for match in self.iter_leaves(is_leaf_fn=is_monophyletic):
if is_monophyletic(match):
yield match |
def is_fraction(value,
minimum = None,
maximum = None,
**kwargs):
"""Indicate whether ``value`` is a :class:`Fraction <python:fractions.Fraction>`.
:param value: The value to evaluate.
:param minimum: If supplied, will make sure that ``value`` is greater than or
equal to this value.
:type minimum: numeric
:param maximum: If supplied, will make sure that ``value`` is less than or
equal to this value.
:type maximum: numeric
:returns: ``True`` if ``value`` is valid, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
"""
try:
value = validators.fraction(value,
minimum = minimum,
maximum = maximum,
**kwargs)
except SyntaxError as error:
raise error
except Exception:
return False
return True | Indicate whether ``value`` is a :class:`Fraction <python:fractions.Fraction>`.
:param value: The value to evaluate.
:param minimum: If supplied, will make sure that ``value`` is greater than or
equal to this value.
:type minimum: numeric
:param maximum: If supplied, will make sure that ``value`` is less than or
equal to this value.
:type maximum: numeric
:returns: ``True`` if ``value`` is valid, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator | Below is the the instruction that describes the task:
### Input:
Indicate whether ``value`` is a :class:`Fraction <python:fractions.Fraction>`.
:param value: The value to evaluate.
:param minimum: If supplied, will make sure that ``value`` is greater than or
equal to this value.
:type minimum: numeric
:param maximum: If supplied, will make sure that ``value`` is less than or
equal to this value.
:type maximum: numeric
:returns: ``True`` if ``value`` is valid, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
### Response:
def is_fraction(value,
minimum = None,
maximum = None,
**kwargs):
"""Indicate whether ``value`` is a :class:`Fraction <python:fractions.Fraction>`.
:param value: The value to evaluate.
:param minimum: If supplied, will make sure that ``value`` is greater than or
equal to this value.
:type minimum: numeric
:param maximum: If supplied, will make sure that ``value`` is less than or
equal to this value.
:type maximum: numeric
:returns: ``True`` if ``value`` is valid, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
"""
try:
value = validators.fraction(value,
minimum = minimum,
maximum = maximum,
**kwargs)
except SyntaxError as error:
raise error
except Exception:
return False
return True |
def call_function(self, c, i):
"""
Implement the CALL_FUNCTION_ operation.
.. _CALL_FUNCTION: https://docs.python.org/3/library/dis.html#opcode-CALL_FUNCTION
"""
callable_ = self.__stack[-1-i.arg]
args = tuple(self.__stack[len(self.__stack) - i.arg:])
self._print('call function')
self._print('\tfunction ', callable_)
self._print('\ti.arg ', i.arg)
self._print('\targs ', args)
self.call_callbacks('CALL_FUNCTION', callable_, *args)
if isinstance(callable_, FunctionType):
ret = callable_(*args)
elif callable_ is builtins.__build_class__:
ret = self.build_class(callable_, args)
elif callable_ is builtins.globals:
ret = self.builtins_globals()
else:
ret = callable_(*args)
self.pop(1 + i.arg)
self.__stack.append(ret) | Implement the CALL_FUNCTION_ operation.
.. _CALL_FUNCTION: https://docs.python.org/3/library/dis.html#opcode-CALL_FUNCTION | Below is the the instruction that describes the task:
### Input:
Implement the CALL_FUNCTION_ operation.
.. _CALL_FUNCTION: https://docs.python.org/3/library/dis.html#opcode-CALL_FUNCTION
### Response:
def call_function(self, c, i):
"""
Implement the CALL_FUNCTION_ operation.
.. _CALL_FUNCTION: https://docs.python.org/3/library/dis.html#opcode-CALL_FUNCTION
"""
callable_ = self.__stack[-1-i.arg]
args = tuple(self.__stack[len(self.__stack) - i.arg:])
self._print('call function')
self._print('\tfunction ', callable_)
self._print('\ti.arg ', i.arg)
self._print('\targs ', args)
self.call_callbacks('CALL_FUNCTION', callable_, *args)
if isinstance(callable_, FunctionType):
ret = callable_(*args)
elif callable_ is builtins.__build_class__:
ret = self.build_class(callable_, args)
elif callable_ is builtins.globals:
ret = self.builtins_globals()
else:
ret = callable_(*args)
self.pop(1 + i.arg)
self.__stack.append(ret) |
def filter_rank_genes_groups(adata, key=None, groupby=None, use_raw=True, log=True,
key_added='rank_genes_groups_filtered',
min_in_group_fraction=0.25, min_fold_change=2,
max_out_group_fraction=0.5):
"""Filters out genes based on fold change and fraction of genes expressing the gene within and outside the `groupby` categories.
See :func:`~scanpy.tl.rank_genes_groups`.
Results are stored in `adata.uns[key_added]` (default: 'rank_genes_groups_filtered').
To preserve the original structure of adata.uns['rank_genes_groups'], filtered genes
are set to `NaN`.
Parameters
----------
adata: :class:`~anndata.AnnData`
key
groupby
use_raw
log : if true, it means that the values to work with are in log scale
key_added
min_in_group_fraction
min_fold_change
max_out_group_fraction
Returns
-------
Same output as :ref:`scanpy.tl.rank_genes_groups` but with filtered genes names set to
`nan`
Examples
--------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> sc.tl.rank_genes_groups(adata, 'bulk_labels', method='wilcoxon')
>>> sc.tl.filter_rank_genes_groups(adata, min_fold_change=3)
>>> # visualize results
>>> sc.pl.rank_genes_groups(adata, key='rank_genes_groups_filtered')
>>> # visualize results using dotplot
>>> sc.pl.rank_genes_groups_dotplot(adata, key='rank_genes_groups_filtered')
"""
if key is None:
key = 'rank_genes_groups'
if groupby is None:
groupby = str(adata.uns[key]['params']['groupby'])
# convert structured numpy array into DataFrame
gene_names = pd.DataFrame(adata.uns[key]['names'])
fraction_in_cluster_matrix = pd.DataFrame(np.zeros(gene_names.shape), columns=gene_names.columns,
index=gene_names.index)
fold_change_matrix = pd.DataFrame(np.zeros(gene_names.shape), columns=gene_names.columns, index=gene_names.index)
fraction_out_cluster_matrix = pd.DataFrame(np.zeros(gene_names.shape), columns=gene_names.columns,
index=gene_names.index)
logg.info("Filtering genes using: min_in_group_fraction: {} "
"min_fold_change: {}, max_out_group_fraction: {}".format(min_in_group_fraction, min_fold_change,
max_out_group_fraction))
from ..plotting._anndata import _prepare_dataframe
for cluster in gene_names.columns:
# iterate per column
var_names = gene_names[cluster].values
# add column to adata as __is_in_cluster__. This facilitates to measure fold change
# of each gene with respect to all other clusters
adata.obs['__is_in_cluster__'] = pd.Categorical(adata.obs[groupby] == cluster)
# obs_tidy has rows=groupby, columns=var_names
categories, obs_tidy = _prepare_dataframe(adata, var_names, groupby='__is_in_cluster__', use_raw=use_raw)
# for if category defined by groupby (if any) compute for each var_name
# 1. the mean value over the category
# 2. the fraction of cells in the category having a value > 0
# 1. compute mean value
mean_obs = obs_tidy.groupby(level=0).mean()
# 2. compute fraction of cells having value >0
# transform obs_tidy into boolean matrix
obs_bool = obs_tidy.astype(bool)
# compute the sum per group which in the boolean matrix this is the number
# of values >0, and divide the result by the total number of values in the group
# (given by `count()`)
fraction_obs = obs_bool.groupby(level=0).sum() / obs_bool.groupby(level=0).count()
# Because the dataframe groupby is based on the '__is_in_cluster__' column,
# in this context, [True] means __is_in_cluster__.
# Also, in this context, fraction_obs.loc[True].values is the row of values
# that is assigned *as column* to fraction_in_cluster_matrix to follow the
# structure of the gene_names dataFrame
fraction_in_cluster_matrix.loc[:, cluster] = fraction_obs.loc[True].values
fraction_out_cluster_matrix.loc[:, cluster] = fraction_obs.loc[False].values
# compute fold change.
if log:
fold_change_matrix.loc[:, cluster] = (np.exp(mean_obs.loc[True]) / np.exp(mean_obs.loc[False])).values
else:
fold_change_matrix.loc[:, cluster] = (mean_obs.loc[True] / mean_obs.loc[False]).values
# remove temporary columns
adata.obs.drop(columns='__is_in_cluster__')
# filter original_matrix
gene_names = gene_names[(fraction_in_cluster_matrix > min_in_group_fraction) &
(fraction_out_cluster_matrix < max_out_group_fraction) &
(fold_change_matrix > min_fold_change)]
# create new structured array using 'key_added'.
adata.uns[key_added] = adata.uns[key].copy()
adata.uns[key_added]['names'] = gene_names.to_records(index=False) | Filters out genes based on fold change and fraction of genes expressing the gene within and outside the `groupby` categories.
See :func:`~scanpy.tl.rank_genes_groups`.
Results are stored in `adata.uns[key_added]` (default: 'rank_genes_groups_filtered').
To preserve the original structure of adata.uns['rank_genes_groups'], filtered genes
are set to `NaN`.
Parameters
----------
adata: :class:`~anndata.AnnData`
key
groupby
use_raw
log : if true, it means that the values to work with are in log scale
key_added
min_in_group_fraction
min_fold_change
max_out_group_fraction
Returns
-------
Same output as :ref:`scanpy.tl.rank_genes_groups` but with filtered genes names set to
`nan`
Examples
--------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> sc.tl.rank_genes_groups(adata, 'bulk_labels', method='wilcoxon')
>>> sc.tl.filter_rank_genes_groups(adata, min_fold_change=3)
>>> # visualize results
>>> sc.pl.rank_genes_groups(adata, key='rank_genes_groups_filtered')
>>> # visualize results using dotplot
>>> sc.pl.rank_genes_groups_dotplot(adata, key='rank_genes_groups_filtered') | Below is the the instruction that describes the task:
### Input:
Filters out genes based on fold change and fraction of genes expressing the gene within and outside the `groupby` categories.
See :func:`~scanpy.tl.rank_genes_groups`.
Results are stored in `adata.uns[key_added]` (default: 'rank_genes_groups_filtered').
To preserve the original structure of adata.uns['rank_genes_groups'], filtered genes
are set to `NaN`.
Parameters
----------
adata: :class:`~anndata.AnnData`
key
groupby
use_raw
log : if true, it means that the values to work with are in log scale
key_added
min_in_group_fraction
min_fold_change
max_out_group_fraction
Returns
-------
Same output as :ref:`scanpy.tl.rank_genes_groups` but with filtered genes names set to
`nan`
Examples
--------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> sc.tl.rank_genes_groups(adata, 'bulk_labels', method='wilcoxon')
>>> sc.tl.filter_rank_genes_groups(adata, min_fold_change=3)
>>> # visualize results
>>> sc.pl.rank_genes_groups(adata, key='rank_genes_groups_filtered')
>>> # visualize results using dotplot
>>> sc.pl.rank_genes_groups_dotplot(adata, key='rank_genes_groups_filtered')
### Response:
def filter_rank_genes_groups(adata, key=None, groupby=None, use_raw=True, log=True,
key_added='rank_genes_groups_filtered',
min_in_group_fraction=0.25, min_fold_change=2,
max_out_group_fraction=0.5):
"""Filters out genes based on fold change and fraction of genes expressing the gene within and outside the `groupby` categories.
See :func:`~scanpy.tl.rank_genes_groups`.
Results are stored in `adata.uns[key_added]` (default: 'rank_genes_groups_filtered').
To preserve the original structure of adata.uns['rank_genes_groups'], filtered genes
are set to `NaN`.
Parameters
----------
adata: :class:`~anndata.AnnData`
key
groupby
use_raw
log : if true, it means that the values to work with are in log scale
key_added
min_in_group_fraction
min_fold_change
max_out_group_fraction
Returns
-------
Same output as :ref:`scanpy.tl.rank_genes_groups` but with filtered genes names set to
`nan`
Examples
--------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> sc.tl.rank_genes_groups(adata, 'bulk_labels', method='wilcoxon')
>>> sc.tl.filter_rank_genes_groups(adata, min_fold_change=3)
>>> # visualize results
>>> sc.pl.rank_genes_groups(adata, key='rank_genes_groups_filtered')
>>> # visualize results using dotplot
>>> sc.pl.rank_genes_groups_dotplot(adata, key='rank_genes_groups_filtered')
"""
if key is None:
key = 'rank_genes_groups'
if groupby is None:
groupby = str(adata.uns[key]['params']['groupby'])
# convert structured numpy array into DataFrame
gene_names = pd.DataFrame(adata.uns[key]['names'])
fraction_in_cluster_matrix = pd.DataFrame(np.zeros(gene_names.shape), columns=gene_names.columns,
index=gene_names.index)
fold_change_matrix = pd.DataFrame(np.zeros(gene_names.shape), columns=gene_names.columns, index=gene_names.index)
fraction_out_cluster_matrix = pd.DataFrame(np.zeros(gene_names.shape), columns=gene_names.columns,
index=gene_names.index)
logg.info("Filtering genes using: min_in_group_fraction: {} "
"min_fold_change: {}, max_out_group_fraction: {}".format(min_in_group_fraction, min_fold_change,
max_out_group_fraction))
from ..plotting._anndata import _prepare_dataframe
for cluster in gene_names.columns:
# iterate per column
var_names = gene_names[cluster].values
# add column to adata as __is_in_cluster__. This facilitates to measure fold change
# of each gene with respect to all other clusters
adata.obs['__is_in_cluster__'] = pd.Categorical(adata.obs[groupby] == cluster)
# obs_tidy has rows=groupby, columns=var_names
categories, obs_tidy = _prepare_dataframe(adata, var_names, groupby='__is_in_cluster__', use_raw=use_raw)
# for if category defined by groupby (if any) compute for each var_name
# 1. the mean value over the category
# 2. the fraction of cells in the category having a value > 0
# 1. compute mean value
mean_obs = obs_tidy.groupby(level=0).mean()
# 2. compute fraction of cells having value >0
# transform obs_tidy into boolean matrix
obs_bool = obs_tidy.astype(bool)
# compute the sum per group which in the boolean matrix this is the number
# of values >0, and divide the result by the total number of values in the group
# (given by `count()`)
fraction_obs = obs_bool.groupby(level=0).sum() / obs_bool.groupby(level=0).count()
# Because the dataframe groupby is based on the '__is_in_cluster__' column,
# in this context, [True] means __is_in_cluster__.
# Also, in this context, fraction_obs.loc[True].values is the row of values
# that is assigned *as column* to fraction_in_cluster_matrix to follow the
# structure of the gene_names dataFrame
fraction_in_cluster_matrix.loc[:, cluster] = fraction_obs.loc[True].values
fraction_out_cluster_matrix.loc[:, cluster] = fraction_obs.loc[False].values
# compute fold change.
if log:
fold_change_matrix.loc[:, cluster] = (np.exp(mean_obs.loc[True]) / np.exp(mean_obs.loc[False])).values
else:
fold_change_matrix.loc[:, cluster] = (mean_obs.loc[True] / mean_obs.loc[False]).values
# remove temporary columns
adata.obs.drop(columns='__is_in_cluster__')
# filter original_matrix
gene_names = gene_names[(fraction_in_cluster_matrix > min_in_group_fraction) &
(fraction_out_cluster_matrix < max_out_group_fraction) &
(fold_change_matrix > min_fold_change)]
# create new structured array using 'key_added'.
adata.uns[key_added] = adata.uns[key].copy()
adata.uns[key_added]['names'] = gene_names.to_records(index=False) |
def add(self, value):
"""Add element *value* to the set."""
# Raise TypeError if value is not hashable
hash(value)
self.redis.sadd(self.key, self._pickle(value)) | Add element *value* to the set. | Below is the the instruction that describes the task:
### Input:
Add element *value* to the set.
### Response:
def add(self, value):
"""Add element *value* to the set."""
# Raise TypeError if value is not hashable
hash(value)
self.redis.sadd(self.key, self._pickle(value)) |
def setWorker(self, *args, **kwargs):
"""Add a worker assignation
Arguments and order to pass are defined in LAUNCHING_ARGUMENTS
Using named args is advised.
"""
try:
la = self.LAUNCHING_ARGUMENTS(*args, **kwargs)
except TypeError as e:
scoop.logger.error(("addWorker failed to convert args %s and kwargs %s "
"to namedtuple (requires %s arguments (names %s)") %
(args, kwargs, len(self.LAUNCHING_ARGUMENTS._fields),
self.LAUNCHING_ARGUMENTS._fields))
self.workersArguments = la | Add a worker assignation
Arguments and order to pass are defined in LAUNCHING_ARGUMENTS
Using named args is advised. | Below is the the instruction that describes the task:
### Input:
Add a worker assignation
Arguments and order to pass are defined in LAUNCHING_ARGUMENTS
Using named args is advised.
### Response:
def setWorker(self, *args, **kwargs):
"""Add a worker assignation
Arguments and order to pass are defined in LAUNCHING_ARGUMENTS
Using named args is advised.
"""
try:
la = self.LAUNCHING_ARGUMENTS(*args, **kwargs)
except TypeError as e:
scoop.logger.error(("addWorker failed to convert args %s and kwargs %s "
"to namedtuple (requires %s arguments (names %s)") %
(args, kwargs, len(self.LAUNCHING_ARGUMENTS._fields),
self.LAUNCHING_ARGUMENTS._fields))
self.workersArguments = la |
def emg_linear_envelope(emg, sampling_rate=1000, freqs=[10, 400], lfreq=4):
r"""Calculate the linear envelope of a signal.
Parameters
----------
emg : array
raw EMG signal.
sampling_rate : int
Sampling rate (samples/second).
freqs : list [fc_h, fc_l], optional
cutoff frequencies for the band-pass filter (in Hz).
lfreq : number, optional
cutoff frequency for the low-pass filter (in Hz).
Returns
-------
envelope : array
linear envelope of the signal.
Notes
-----
*Authors*
- Marcos Duarte
*See Also*
See this notebook [1]_.
References
----------
.. [1] https://github.com/demotu/BMC/blob/master/notebooks/Electromyography.ipynb
"""
emg = emg_tkeo(emg)
if np.size(freqs) == 2:
# band-pass filter
b, a = scipy.signal.butter(2, np.array(freqs)/(sampling_rate/2.), btype = 'bandpass')
emg = scipy.signal.filtfilt(b, a, emg)
if np.size(lfreq) == 1:
# full-wave rectification
envelope = abs(emg)
# low-pass Butterworth filter
b, a = scipy.signal.butter(2, np.array(lfreq)/(sampling_rate/2.), btype = 'low')
envelope = scipy.signal.filtfilt(b, a, envelope)
return (envelope) | r"""Calculate the linear envelope of a signal.
Parameters
----------
emg : array
raw EMG signal.
sampling_rate : int
Sampling rate (samples/second).
freqs : list [fc_h, fc_l], optional
cutoff frequencies for the band-pass filter (in Hz).
lfreq : number, optional
cutoff frequency for the low-pass filter (in Hz).
Returns
-------
envelope : array
linear envelope of the signal.
Notes
-----
*Authors*
- Marcos Duarte
*See Also*
See this notebook [1]_.
References
----------
.. [1] https://github.com/demotu/BMC/blob/master/notebooks/Electromyography.ipynb | Below is the the instruction that describes the task:
### Input:
r"""Calculate the linear envelope of a signal.
Parameters
----------
emg : array
raw EMG signal.
sampling_rate : int
Sampling rate (samples/second).
freqs : list [fc_h, fc_l], optional
cutoff frequencies for the band-pass filter (in Hz).
lfreq : number, optional
cutoff frequency for the low-pass filter (in Hz).
Returns
-------
envelope : array
linear envelope of the signal.
Notes
-----
*Authors*
- Marcos Duarte
*See Also*
See this notebook [1]_.
References
----------
.. [1] https://github.com/demotu/BMC/blob/master/notebooks/Electromyography.ipynb
### Response:
def emg_linear_envelope(emg, sampling_rate=1000, freqs=[10, 400], lfreq=4):
r"""Calculate the linear envelope of a signal.
Parameters
----------
emg : array
raw EMG signal.
sampling_rate : int
Sampling rate (samples/second).
freqs : list [fc_h, fc_l], optional
cutoff frequencies for the band-pass filter (in Hz).
lfreq : number, optional
cutoff frequency for the low-pass filter (in Hz).
Returns
-------
envelope : array
linear envelope of the signal.
Notes
-----
*Authors*
- Marcos Duarte
*See Also*
See this notebook [1]_.
References
----------
.. [1] https://github.com/demotu/BMC/blob/master/notebooks/Electromyography.ipynb
"""
emg = emg_tkeo(emg)
if np.size(freqs) == 2:
# band-pass filter
b, a = scipy.signal.butter(2, np.array(freqs)/(sampling_rate/2.), btype = 'bandpass')
emg = scipy.signal.filtfilt(b, a, emg)
if np.size(lfreq) == 1:
# full-wave rectification
envelope = abs(emg)
# low-pass Butterworth filter
b, a = scipy.signal.butter(2, np.array(lfreq)/(sampling_rate/2.), btype = 'low')
envelope = scipy.signal.filtfilt(b, a, envelope)
return (envelope) |
def extract_value_from_output(canary, split_offset, kal_out):
"""Return value parsed from output.
Args:
canary(str): This string must exist in the target line.
split_offset(int): Split offset for target value in string.
kal_out(int): Output from kal.
"""
retval = ""
while retval == "":
for line in kal_out.splitlines():
if canary in line:
retval = str(line.split()[split_offset])
if retval == "":
retval = None
return retval | Return value parsed from output.
Args:
canary(str): This string must exist in the target line.
split_offset(int): Split offset for target value in string.
kal_out(int): Output from kal. | Below is the the instruction that describes the task:
### Input:
Return value parsed from output.
Args:
canary(str): This string must exist in the target line.
split_offset(int): Split offset for target value in string.
kal_out(int): Output from kal.
### Response:
def extract_value_from_output(canary, split_offset, kal_out):
"""Return value parsed from output.
Args:
canary(str): This string must exist in the target line.
split_offset(int): Split offset for target value in string.
kal_out(int): Output from kal.
"""
retval = ""
while retval == "":
for line in kal_out.splitlines():
if canary in line:
retval = str(line.split()[split_offset])
if retval == "":
retval = None
return retval |
def set_chksum(self, doc, chk_sum):
"""
Sets the external document reference's check sum, if not already set.
chk_sum - The checksum value in the form of a string.
"""
if chk_sum:
doc.ext_document_references[-1].check_sum = checksum.Algorithm(
'SHA1', chk_sum)
else:
raise SPDXValueError('ExternalDocumentRef::Checksum') | Sets the external document reference's check sum, if not already set.
chk_sum - The checksum value in the form of a string. | Below is the the instruction that describes the task:
### Input:
Sets the external document reference's check sum, if not already set.
chk_sum - The checksum value in the form of a string.
### Response:
def set_chksum(self, doc, chk_sum):
"""
Sets the external document reference's check sum, if not already set.
chk_sum - The checksum value in the form of a string.
"""
if chk_sum:
doc.ext_document_references[-1].check_sum = checksum.Algorithm(
'SHA1', chk_sum)
else:
raise SPDXValueError('ExternalDocumentRef::Checksum') |
def fix_varscan_output(line, normal_name="", tumor_name=""):
"""Fix a varscan VCF line.
Fixes the ALT column and also fixes floating point values
output as strings to by Floats: FREQ, SSC.
This function was contributed by Sean Davis <[email protected]>,
with minor modifications by Luca Beltrame <[email protected]>.
"""
line = line.strip()
tofix = ("##INFO=<ID=SSC", "##FORMAT=<ID=FREQ")
if(line.startswith("##")):
if line.startswith(tofix):
line = line.replace('Number=1,Type=String',
'Number=1,Type=Float')
return line
line = line.split("\t")
if line[0].startswith("#CHROM"):
if tumor_name and normal_name:
mapping = {"NORMAL": normal_name, "TUMOR": tumor_name}
base_header = line[:9]
old_samples = line[9:]
if len(old_samples) == 0:
return "\t".join(line)
samples = [mapping[sample_name] for sample_name in old_samples]
assert len(old_samples) == len(samples)
return "\t".join(base_header + samples)
else:
return "\t".join(line)
try:
REF, ALT = line[3:5]
except ValueError:
return "\t".join(line)
def _normalize_freq(line, sample_i):
"""Ensure FREQ genotype value is float as defined in header.
"""
ft_parts = line[8].split(":")
dat = line[sample_i].split(":")
# Non-conforming no-call sample, don't try to fix FREQ
if len(dat) != len(ft_parts):
return line
freq_i = ft_parts.index("FREQ")
try:
dat[freq_i] = str(float(dat[freq_i].rstrip("%")) / 100)
except ValueError: # illegal binary characters -- set frequency to zero
dat[freq_i] = "0.0"
line[sample_i] = ":".join(dat)
return line
if len(line) > 9:
line = _normalize_freq(line, 9)
if len(line) > 10:
line = _normalize_freq(line, 10)
# HACK: The position of the SS= changes, so we just search for it
ss_vals = [item for item in line[7].split(";") if item.startswith("SS=")]
if len(ss_vals) > 0:
somatic_status = int(ss_vals[0].split("=")[1]) # Get the number
else:
somatic_status = None
if somatic_status == 5:
# "Unknown" states are broken in current versions of VarScan
# so we just bail out here for now
return
# fix FREQ for any additional samples -- multi-sample VarScan calling
if len(line) > 11:
for i in range(11, len(line)):
line = _normalize_freq(line, i)
#FIXME: VarScan also produces invalid REF records (e.g. CAA/A)
# This is not handled yet.
if "+" in ALT or "-" in ALT:
if "/" not in ALT:
if ALT[0] == "+":
R = REF
A = REF + ALT[1:]
elif ALT[0] == "-":
R = REF + ALT[1:]
A = REF
else:
Ins = [p[1:] for p in ALT.split("/") if p[0] == "+"]
Del = [p[1:] for p in ALT.split("/") if p[0] == "-"]
if len(Del):
REF += sorted(Del, key=lambda x: len(x))[-1]
A = ",".join([REF[::-1].replace(p[::-1], "", 1)[::-1]
for p in Del] + [REF + p for p in Ins])
R = REF
REF = R
ALT = A
else:
ALT = ALT.replace('/', ',')
line[3] = REF
line[4] = ALT
return "\t".join(line) | Fix a varscan VCF line.
Fixes the ALT column and also fixes floating point values
output as strings to by Floats: FREQ, SSC.
This function was contributed by Sean Davis <[email protected]>,
with minor modifications by Luca Beltrame <[email protected]>. | Below is the the instruction that describes the task:
### Input:
Fix a varscan VCF line.
Fixes the ALT column and also fixes floating point values
output as strings to by Floats: FREQ, SSC.
This function was contributed by Sean Davis <[email protected]>,
with minor modifications by Luca Beltrame <[email protected]>.
### Response:
def fix_varscan_output(line, normal_name="", tumor_name=""):
"""Fix a varscan VCF line.
Fixes the ALT column and also fixes floating point values
output as strings to by Floats: FREQ, SSC.
This function was contributed by Sean Davis <[email protected]>,
with minor modifications by Luca Beltrame <[email protected]>.
"""
line = line.strip()
tofix = ("##INFO=<ID=SSC", "##FORMAT=<ID=FREQ")
if(line.startswith("##")):
if line.startswith(tofix):
line = line.replace('Number=1,Type=String',
'Number=1,Type=Float')
return line
line = line.split("\t")
if line[0].startswith("#CHROM"):
if tumor_name and normal_name:
mapping = {"NORMAL": normal_name, "TUMOR": tumor_name}
base_header = line[:9]
old_samples = line[9:]
if len(old_samples) == 0:
return "\t".join(line)
samples = [mapping[sample_name] for sample_name in old_samples]
assert len(old_samples) == len(samples)
return "\t".join(base_header + samples)
else:
return "\t".join(line)
try:
REF, ALT = line[3:5]
except ValueError:
return "\t".join(line)
def _normalize_freq(line, sample_i):
"""Ensure FREQ genotype value is float as defined in header.
"""
ft_parts = line[8].split(":")
dat = line[sample_i].split(":")
# Non-conforming no-call sample, don't try to fix FREQ
if len(dat) != len(ft_parts):
return line
freq_i = ft_parts.index("FREQ")
try:
dat[freq_i] = str(float(dat[freq_i].rstrip("%")) / 100)
except ValueError: # illegal binary characters -- set frequency to zero
dat[freq_i] = "0.0"
line[sample_i] = ":".join(dat)
return line
if len(line) > 9:
line = _normalize_freq(line, 9)
if len(line) > 10:
line = _normalize_freq(line, 10)
# HACK: The position of the SS= changes, so we just search for it
ss_vals = [item for item in line[7].split(";") if item.startswith("SS=")]
if len(ss_vals) > 0:
somatic_status = int(ss_vals[0].split("=")[1]) # Get the number
else:
somatic_status = None
if somatic_status == 5:
# "Unknown" states are broken in current versions of VarScan
# so we just bail out here for now
return
# fix FREQ for any additional samples -- multi-sample VarScan calling
if len(line) > 11:
for i in range(11, len(line)):
line = _normalize_freq(line, i)
#FIXME: VarScan also produces invalid REF records (e.g. CAA/A)
# This is not handled yet.
if "+" in ALT or "-" in ALT:
if "/" not in ALT:
if ALT[0] == "+":
R = REF
A = REF + ALT[1:]
elif ALT[0] == "-":
R = REF + ALT[1:]
A = REF
else:
Ins = [p[1:] for p in ALT.split("/") if p[0] == "+"]
Del = [p[1:] for p in ALT.split("/") if p[0] == "-"]
if len(Del):
REF += sorted(Del, key=lambda x: len(x))[-1]
A = ",".join([REF[::-1].replace(p[::-1], "", 1)[::-1]
for p in Del] + [REF + p for p in Ins])
R = REF
REF = R
ALT = A
else:
ALT = ALT.replace('/', ',')
line[3] = REF
line[4] = ALT
return "\t".join(line) |
def _convert_to_json(self, response):
"""Converts response to JSON.
If the response cannot be converted to JSON then `None` is returned.
Args:
response - An object of type `requests.models.Response`
Returns:
Response in JSON format if the response can be converted to JSON. `None` otherwise.
"""
try:
return response.json()
except ValueError:
logging.warning('Expected response in JSON format from {0} but the actual response text is: {1}'.format(
response.request.url, response.text,
))
return None | Converts response to JSON.
If the response cannot be converted to JSON then `None` is returned.
Args:
response - An object of type `requests.models.Response`
Returns:
Response in JSON format if the response can be converted to JSON. `None` otherwise. | Below is the the instruction that describes the task:
### Input:
Converts response to JSON.
If the response cannot be converted to JSON then `None` is returned.
Args:
response - An object of type `requests.models.Response`
Returns:
Response in JSON format if the response can be converted to JSON. `None` otherwise.
### Response:
def _convert_to_json(self, response):
"""Converts response to JSON.
If the response cannot be converted to JSON then `None` is returned.
Args:
response - An object of type `requests.models.Response`
Returns:
Response in JSON format if the response can be converted to JSON. `None` otherwise.
"""
try:
return response.json()
except ValueError:
logging.warning('Expected response in JSON format from {0} but the actual response text is: {1}'.format(
response.request.url, response.text,
))
return None |
def _GetLink(self):
"""Retrieves the link.
Returns:
str: full path of the linked file entry.
"""
if self._link is None:
self._link = ''
if self.entry_type != definitions.FILE_ENTRY_TYPE_LINK:
return self._link
cpio_archive_file = self._file_system.GetCPIOArchiveFile()
link_data = cpio_archive_file.ReadDataAtOffset(
self._cpio_archive_file_entry.data_offset,
self._cpio_archive_file_entry.data_size)
# TODO: should this be ASCII?
self._link = link_data.decode('ascii')
return self._link | Retrieves the link.
Returns:
str: full path of the linked file entry. | Below is the the instruction that describes the task:
### Input:
Retrieves the link.
Returns:
str: full path of the linked file entry.
### Response:
def _GetLink(self):
"""Retrieves the link.
Returns:
str: full path of the linked file entry.
"""
if self._link is None:
self._link = ''
if self.entry_type != definitions.FILE_ENTRY_TYPE_LINK:
return self._link
cpio_archive_file = self._file_system.GetCPIOArchiveFile()
link_data = cpio_archive_file.ReadDataAtOffset(
self._cpio_archive_file_entry.data_offset,
self._cpio_archive_file_entry.data_size)
# TODO: should this be ASCII?
self._link = link_data.decode('ascii')
return self._link |
def download_file(save_path, file_url):
""" Download file from http url link """
r = requests.get(file_url) # create HTTP response object
with open(save_path, 'wb') as f:
f.write(r.content)
return save_path | Download file from http url link | Below is the the instruction that describes the task:
### Input:
Download file from http url link
### Response:
def download_file(save_path, file_url):
""" Download file from http url link """
r = requests.get(file_url) # create HTTP response object
with open(save_path, 'wb') as f:
f.write(r.content)
return save_path |
def refresh_token(self):
"""
Refreshing the current expired access token
"""
self.token = self.oauth.refresh_token(self.access_token_url, refresh_token=self.get_refresh_token())
self.access_token = self.token.get("access_token") | Refreshing the current expired access token | Below is the the instruction that describes the task:
### Input:
Refreshing the current expired access token
### Response:
def refresh_token(self):
"""
Refreshing the current expired access token
"""
self.token = self.oauth.refresh_token(self.access_token_url, refresh_token=self.get_refresh_token())
self.access_token = self.token.get("access_token") |
def render_local_template(service_name, environment, repo_root, template_file):
"""
Render a given service's template for a given environment and return it
"""
cmd = 'cd {} && ef-cf {} {} --devel --verbose'.format(repo_root, template_file, environment)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
stderr = indentify('\n{}'.format(stderr))
stdout = indentify('\n{}'.format(stdout))
raise Exception('Service: `{}`, Env: `{}`, Msg: `{}{}`'
.format(service_name, environment, stderr, stdout))
logger.debug('Rendered template for `%s` in `%s`', template_file, environment)
r = re.match(r".*(^{.*^})$", stdout, re.MULTILINE | re.DOTALL)
return jsonify(json.loads(r.group(1))) | Render a given service's template for a given environment and return it | Below is the the instruction that describes the task:
### Input:
Render a given service's template for a given environment and return it
### Response:
def render_local_template(service_name, environment, repo_root, template_file):
"""
Render a given service's template for a given environment and return it
"""
cmd = 'cd {} && ef-cf {} {} --devel --verbose'.format(repo_root, template_file, environment)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
stderr = indentify('\n{}'.format(stderr))
stdout = indentify('\n{}'.format(stdout))
raise Exception('Service: `{}`, Env: `{}`, Msg: `{}{}`'
.format(service_name, environment, stderr, stdout))
logger.debug('Rendered template for `%s` in `%s`', template_file, environment)
r = re.match(r".*(^{.*^})$", stdout, re.MULTILINE | re.DOTALL)
return jsonify(json.loads(r.group(1))) |
def get_cookies(self):
'''
Retreive the cookies from the remote browser.
Return value is a list of http.cookiejar.Cookie() instances.
These can be directly used with the various http.cookiejar.XXXCookieJar
cookie management classes.
'''
ret = self.Network_getAllCookies()
assert 'result' in ret, "No return value in function response!"
assert 'cookies' in ret['result'], "No 'cookies' key in function response"
cookies = []
for raw_cookie in ret['result']['cookies']:
# Chromium seems to support the following key values for the cookie dict:
# "name"
# "value"
# "domain"
# "path"
# "expires"
# "httpOnly"
# "session"
# "secure"
#
# This seems supported by the fact that the underlying chromium cookie implementation has
# the following members:
# std::string name_;
# std::string value_;
# std::string domain_;
# std::string path_;
# base::Time creation_date_;
# base::Time expiry_date_;
# base::Time last_access_date_;
# bool secure_;
# bool httponly_;
# CookieSameSite same_site_;
# CookiePriority priority_;
#
# See chromium/net/cookies/canonical_cookie.h for more.
#
# I suspect the python cookie implementation is derived exactly from the standard, while the
# chromium implementation is more of a practically derived structure.
# Network.setCookie
baked_cookie = http.cookiejar.Cookie(
# We assume V0 cookies, principally because I don't think I've /ever/ actually encountered a V1 cookie.
# Chromium doesn't seem to specify it.
version = 0,
name = raw_cookie['name'],
value = raw_cookie['value'],
port = None,
port_specified = False,
domain = raw_cookie['domain'],
domain_specified = True,
domain_initial_dot = False,
path = raw_cookie['path'],
path_specified = False,
secure = raw_cookie['secure'],
expires = raw_cookie['expires'],
discard = raw_cookie['session'],
comment = None,
comment_url = None,
rest = {"httponly":"%s" % raw_cookie['httpOnly']},
rfc2109 = False
)
cookies.append(baked_cookie)
return cookies | Retreive the cookies from the remote browser.
Return value is a list of http.cookiejar.Cookie() instances.
These can be directly used with the various http.cookiejar.XXXCookieJar
cookie management classes. | Below is the the instruction that describes the task:
### Input:
Retreive the cookies from the remote browser.
Return value is a list of http.cookiejar.Cookie() instances.
These can be directly used with the various http.cookiejar.XXXCookieJar
cookie management classes.
### Response:
def get_cookies(self):
'''
Retreive the cookies from the remote browser.
Return value is a list of http.cookiejar.Cookie() instances.
These can be directly used with the various http.cookiejar.XXXCookieJar
cookie management classes.
'''
ret = self.Network_getAllCookies()
assert 'result' in ret, "No return value in function response!"
assert 'cookies' in ret['result'], "No 'cookies' key in function response"
cookies = []
for raw_cookie in ret['result']['cookies']:
# Chromium seems to support the following key values for the cookie dict:
# "name"
# "value"
# "domain"
# "path"
# "expires"
# "httpOnly"
# "session"
# "secure"
#
# This seems supported by the fact that the underlying chromium cookie implementation has
# the following members:
# std::string name_;
# std::string value_;
# std::string domain_;
# std::string path_;
# base::Time creation_date_;
# base::Time expiry_date_;
# base::Time last_access_date_;
# bool secure_;
# bool httponly_;
# CookieSameSite same_site_;
# CookiePriority priority_;
#
# See chromium/net/cookies/canonical_cookie.h for more.
#
# I suspect the python cookie implementation is derived exactly from the standard, while the
# chromium implementation is more of a practically derived structure.
# Network.setCookie
baked_cookie = http.cookiejar.Cookie(
# We assume V0 cookies, principally because I don't think I've /ever/ actually encountered a V1 cookie.
# Chromium doesn't seem to specify it.
version = 0,
name = raw_cookie['name'],
value = raw_cookie['value'],
port = None,
port_specified = False,
domain = raw_cookie['domain'],
domain_specified = True,
domain_initial_dot = False,
path = raw_cookie['path'],
path_specified = False,
secure = raw_cookie['secure'],
expires = raw_cookie['expires'],
discard = raw_cookie['session'],
comment = None,
comment_url = None,
rest = {"httponly":"%s" % raw_cookie['httpOnly']},
rfc2109 = False
)
cookies.append(baked_cookie)
return cookies |
def _webfinger(provider, request, **kwargs):
"""Handle webfinger requests."""
params = urlparse.parse_qs(request)
if params["rel"][0] == OIC_ISSUER:
wf = WebFinger()
return Response(wf.response(params["resource"][0], provider.baseurl),
headers=[("Content-Type", "application/jrd+json")])
else:
return BadRequest("Incorrect webfinger.") | Handle webfinger requests. | Below is the the instruction that describes the task:
### Input:
Handle webfinger requests.
### Response:
def _webfinger(provider, request, **kwargs):
"""Handle webfinger requests."""
params = urlparse.parse_qs(request)
if params["rel"][0] == OIC_ISSUER:
wf = WebFinger()
return Response(wf.response(params["resource"][0], provider.baseurl),
headers=[("Content-Type", "application/jrd+json")])
else:
return BadRequest("Incorrect webfinger.") |
def tofits(self, filename, overwrite=True, velocity=True,
optical=True, bitpix=-32, minpix=1, maxpix=-1):
"""Write the image to a file in FITS format.
`filename`
FITS file name
`overwrite`
If False, an exception is raised if the new image file already exists.
Default is True.
`velocity`
By default a velocity primary spectral axis is written if possible.
`optical`
If writing a velocity, use the optical definition
(otherwise use radio).
`bitpix`
can be set to -32 (float) or 16 (short) only. When `bitpix` is
16 it will write BSCALE and BZERO into the FITS file. If minPix
`minpix` and `maxpix`
are used to determine BSCALE and BZERO if `bitpix=16`.
If `minpix` is greater than `maxpix` (which is the default),
the minimum and maximum pixel values will be determined from the ddta.
Oherwise the supplied values will be used and pixels outside that
range will be clipped to the minimum and maximum pixel values.
Note that this truncation does not occur for `bitpix=-32`.
"""
return self._tofits(filename, overwrite, velocity, optical,
bitpix, minpix, maxpix) | Write the image to a file in FITS format.
`filename`
FITS file name
`overwrite`
If False, an exception is raised if the new image file already exists.
Default is True.
`velocity`
By default a velocity primary spectral axis is written if possible.
`optical`
If writing a velocity, use the optical definition
(otherwise use radio).
`bitpix`
can be set to -32 (float) or 16 (short) only. When `bitpix` is
16 it will write BSCALE and BZERO into the FITS file. If minPix
`minpix` and `maxpix`
are used to determine BSCALE and BZERO if `bitpix=16`.
If `minpix` is greater than `maxpix` (which is the default),
the minimum and maximum pixel values will be determined from the ddta.
Oherwise the supplied values will be used and pixels outside that
range will be clipped to the minimum and maximum pixel values.
Note that this truncation does not occur for `bitpix=-32`. | Below is the the instruction that describes the task:
### Input:
Write the image to a file in FITS format.
`filename`
FITS file name
`overwrite`
If False, an exception is raised if the new image file already exists.
Default is True.
`velocity`
By default a velocity primary spectral axis is written if possible.
`optical`
If writing a velocity, use the optical definition
(otherwise use radio).
`bitpix`
can be set to -32 (float) or 16 (short) only. When `bitpix` is
16 it will write BSCALE and BZERO into the FITS file. If minPix
`minpix` and `maxpix`
are used to determine BSCALE and BZERO if `bitpix=16`.
If `minpix` is greater than `maxpix` (which is the default),
the minimum and maximum pixel values will be determined from the ddta.
Oherwise the supplied values will be used and pixels outside that
range will be clipped to the minimum and maximum pixel values.
Note that this truncation does not occur for `bitpix=-32`.
### Response:
def tofits(self, filename, overwrite=True, velocity=True,
optical=True, bitpix=-32, minpix=1, maxpix=-1):
"""Write the image to a file in FITS format.
`filename`
FITS file name
`overwrite`
If False, an exception is raised if the new image file already exists.
Default is True.
`velocity`
By default a velocity primary spectral axis is written if possible.
`optical`
If writing a velocity, use the optical definition
(otherwise use radio).
`bitpix`
can be set to -32 (float) or 16 (short) only. When `bitpix` is
16 it will write BSCALE and BZERO into the FITS file. If minPix
`minpix` and `maxpix`
are used to determine BSCALE and BZERO if `bitpix=16`.
If `minpix` is greater than `maxpix` (which is the default),
the minimum and maximum pixel values will be determined from the ddta.
Oherwise the supplied values will be used and pixels outside that
range will be clipped to the minimum and maximum pixel values.
Note that this truncation does not occur for `bitpix=-32`.
"""
return self._tofits(filename, overwrite, velocity, optical,
bitpix, minpix, maxpix) |
def register_watcher(self, event_type, callback, register_timeout=None):
"""
Register a callback for a given event type.
"""
self._push_watchers[event_type].add(callback)
self.wait_for_response(
RegisterMessage(event_list=[event_type]),
timeout=register_timeout) | Register a callback for a given event type. | Below is the the instruction that describes the task:
### Input:
Register a callback for a given event type.
### Response:
def register_watcher(self, event_type, callback, register_timeout=None):
"""
Register a callback for a given event type.
"""
self._push_watchers[event_type].add(callback)
self.wait_for_response(
RegisterMessage(event_list=[event_type]),
timeout=register_timeout) |
async def _start_payloads(self, nursery):
"""Start all queued payloads"""
with self._lock:
for coroutine in self._payloads:
nursery.start_soon(coroutine)
self._payloads.clear()
await trio.sleep(0) | Start all queued payloads | Below is the the instruction that describes the task:
### Input:
Start all queued payloads
### Response:
async def _start_payloads(self, nursery):
"""Start all queued payloads"""
with self._lock:
for coroutine in self._payloads:
nursery.start_soon(coroutine)
self._payloads.clear()
await trio.sleep(0) |
def dump(u, *args, **kwargs):
"""Dump the users as a list of dictionaries.
:param u: User to be dumped.
:type u: `invenio.modules.accounts.models.User [Invenio2.x]` or namedtuple.
:returns: User serialized to dictionary.
:rtype: dict
"""
return dict(
id=u.id,
email=u.email,
password=u.password,
password_salt=u.password_salt,
note=u.note,
full_name=u.full_name if hasattr(u, 'full_name') else '{0} {1}'.format(
u.given_names, u.family_name),
settings=u.settings,
nickname=u.nickname,
last_login=dt2iso_or_empty(u.last_login)) | Dump the users as a list of dictionaries.
:param u: User to be dumped.
:type u: `invenio.modules.accounts.models.User [Invenio2.x]` or namedtuple.
:returns: User serialized to dictionary.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Dump the users as a list of dictionaries.
:param u: User to be dumped.
:type u: `invenio.modules.accounts.models.User [Invenio2.x]` or namedtuple.
:returns: User serialized to dictionary.
:rtype: dict
### Response:
def dump(u, *args, **kwargs):
"""Dump the users as a list of dictionaries.
:param u: User to be dumped.
:type u: `invenio.modules.accounts.models.User [Invenio2.x]` or namedtuple.
:returns: User serialized to dictionary.
:rtype: dict
"""
return dict(
id=u.id,
email=u.email,
password=u.password,
password_salt=u.password_salt,
note=u.note,
full_name=u.full_name if hasattr(u, 'full_name') else '{0} {1}'.format(
u.given_names, u.family_name),
settings=u.settings,
nickname=u.nickname,
last_login=dt2iso_or_empty(u.last_login)) |
def parse_html(html):
"""Attempt to convert html to plain text while keeping line breaks.
Returns a list of paragraphs, each being a list of lines.
"""
paragraphs = re.split("</?p[^>]*>", html)
# Convert <br>s to line breaks and remove empty paragraphs
paragraphs = [re.split("<br */?>", p) for p in paragraphs if p]
# Convert each line in each paragraph to plain text:
return [[get_text(l) for l in p] for p in paragraphs] | Attempt to convert html to plain text while keeping line breaks.
Returns a list of paragraphs, each being a list of lines. | Below is the the instruction that describes the task:
### Input:
Attempt to convert html to plain text while keeping line breaks.
Returns a list of paragraphs, each being a list of lines.
### Response:
def parse_html(html):
"""Attempt to convert html to plain text while keeping line breaks.
Returns a list of paragraphs, each being a list of lines.
"""
paragraphs = re.split("</?p[^>]*>", html)
# Convert <br>s to line breaks and remove empty paragraphs
paragraphs = [re.split("<br */?>", p) for p in paragraphs if p]
# Convert each line in each paragraph to plain text:
return [[get_text(l) for l in p] for p in paragraphs] |
def cwt_coefficients(x, param):
"""
Calculates a Continuous wavelet transform for the Ricker wavelet, also known as the "Mexican hat wavelet" which is
defined by
.. math::
\\frac{2}{\\sqrt{3a} \\pi^{\\frac{1}{4}}} (1 - \\frac{x^2}{a^2}) exp(-\\frac{x^2}{2a^2})
where :math:`a` is the width parameter of the wavelet function.
This feature calculator takes three different parameter: widths, coeff and w. The feature calculater takes all the
different widths arrays and then calculates the cwt one time for each different width array. Then the values for the
different coefficient for coeff and width w are returned. (For each dic in param one feature is returned)
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"widths":x, "coeff": y, "w": z} with x array of int and y,z int
:type param: list
:return: the different feature values
:return type: pandas.Series
"""
calculated_cwt = {}
res = []
indices = []
for parameter_combination in param:
widths = parameter_combination["widths"]
w = parameter_combination["w"]
coeff = parameter_combination["coeff"]
if widths not in calculated_cwt:
calculated_cwt[widths] = cwt(x, ricker, widths)
calculated_cwt_for_widths = calculated_cwt[widths]
indices += ["widths_{}__coeff_{}__w_{}".format(widths, coeff, w)]
i = widths.index(w)
if calculated_cwt_for_widths.shape[1] <= coeff:
res += [np.NaN]
else:
res += [calculated_cwt_for_widths[i, coeff]]
return zip(indices, res) | Calculates a Continuous wavelet transform for the Ricker wavelet, also known as the "Mexican hat wavelet" which is
defined by
.. math::
\\frac{2}{\\sqrt{3a} \\pi^{\\frac{1}{4}}} (1 - \\frac{x^2}{a^2}) exp(-\\frac{x^2}{2a^2})
where :math:`a` is the width parameter of the wavelet function.
This feature calculator takes three different parameter: widths, coeff and w. The feature calculater takes all the
different widths arrays and then calculates the cwt one time for each different width array. Then the values for the
different coefficient for coeff and width w are returned. (For each dic in param one feature is returned)
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"widths":x, "coeff": y, "w": z} with x array of int and y,z int
:type param: list
:return: the different feature values
:return type: pandas.Series | Below is the the instruction that describes the task:
### Input:
Calculates a Continuous wavelet transform for the Ricker wavelet, also known as the "Mexican hat wavelet" which is
defined by
.. math::
\\frac{2}{\\sqrt{3a} \\pi^{\\frac{1}{4}}} (1 - \\frac{x^2}{a^2}) exp(-\\frac{x^2}{2a^2})
where :math:`a` is the width parameter of the wavelet function.
This feature calculator takes three different parameter: widths, coeff and w. The feature calculater takes all the
different widths arrays and then calculates the cwt one time for each different width array. Then the values for the
different coefficient for coeff and width w are returned. (For each dic in param one feature is returned)
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"widths":x, "coeff": y, "w": z} with x array of int and y,z int
:type param: list
:return: the different feature values
:return type: pandas.Series
### Response:
def cwt_coefficients(x, param):
"""
Calculates a Continuous wavelet transform for the Ricker wavelet, also known as the "Mexican hat wavelet" which is
defined by
.. math::
\\frac{2}{\\sqrt{3a} \\pi^{\\frac{1}{4}}} (1 - \\frac{x^2}{a^2}) exp(-\\frac{x^2}{2a^2})
where :math:`a` is the width parameter of the wavelet function.
This feature calculator takes three different parameter: widths, coeff and w. The feature calculater takes all the
different widths arrays and then calculates the cwt one time for each different width array. Then the values for the
different coefficient for coeff and width w are returned. (For each dic in param one feature is returned)
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"widths":x, "coeff": y, "w": z} with x array of int and y,z int
:type param: list
:return: the different feature values
:return type: pandas.Series
"""
calculated_cwt = {}
res = []
indices = []
for parameter_combination in param:
widths = parameter_combination["widths"]
w = parameter_combination["w"]
coeff = parameter_combination["coeff"]
if widths not in calculated_cwt:
calculated_cwt[widths] = cwt(x, ricker, widths)
calculated_cwt_for_widths = calculated_cwt[widths]
indices += ["widths_{}__coeff_{}__w_{}".format(widths, coeff, w)]
i = widths.index(w)
if calculated_cwt_for_widths.shape[1] <= coeff:
res += [np.NaN]
else:
res += [calculated_cwt_for_widths[i, coeff]]
return zip(indices, res) |
def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None, sinfo=None):
"""Override default logger to allow overriding of internal attributes."""
# See below commented section for a simple example of what the docstring refers to
if six.PY2:
rv = logging.LogRecord(name, level, fn, lno, msg, args, exc_info, func)
else:
rv = logging.LogRecord(name, level, fn, lno, msg, args, exc_info, func, sinfo)
if extra is None:
extra = dict()
extra.update({"pid": os.getpid(), "uid": os.getuid(), "gid": os.getgid(), "ppid": os.getppid()})
for key in extra:
# if (key in ["message", "asctime"]) or (key in rv.__dict__):
# raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv | Override default logger to allow overriding of internal attributes. | Below is the the instruction that describes the task:
### Input:
Override default logger to allow overriding of internal attributes.
### Response:
def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None, sinfo=None):
"""Override default logger to allow overriding of internal attributes."""
# See below commented section for a simple example of what the docstring refers to
if six.PY2:
rv = logging.LogRecord(name, level, fn, lno, msg, args, exc_info, func)
else:
rv = logging.LogRecord(name, level, fn, lno, msg, args, exc_info, func, sinfo)
if extra is None:
extra = dict()
extra.update({"pid": os.getpid(), "uid": os.getuid(), "gid": os.getgid(), "ppid": os.getppid()})
for key in extra:
# if (key in ["message", "asctime"]) or (key in rv.__dict__):
# raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv |
def _disambiguate_pos(self, terms, pos):
"""
Disambiguates a list of tokens of a given PoS.
"""
# Map the terms to candidate concepts
# Consider only the top 3 most common senses
candidate_map = {term: wn.synsets(term, pos=pos)[:3] for term in terms}
# Filter to unique concepts
concepts = set(c for cons in candidate_map.values() for c in cons)
# Back to list for consistent ordering
concepts = list(concepts)
sim_mat = self._similarity_matrix(concepts)
# Final map of terms to their disambiguated concepts
map = {}
# This is terrible
# For each term, select the candidate concept
# which has the maximum aggregate similarity score against
# all other candidate concepts of all other terms sharing the same PoS
for term, cons in candidate_map.items():
# Some words may not be in WordNet
# and thus have no candidate concepts, so skip
if not cons:
continue
scores = []
for con in cons:
i = concepts.index(con)
scores_ = []
for term_, cons_ in candidate_map.items():
# Some words may not be in WordNet
# and thus have no candidate concepts, so skip
if term == term_ or not cons_:
continue
cons_idx = [concepts.index(c) for c in cons_]
top_sim = max(sim_mat[i,cons_idx])
scores_.append(top_sim)
scores.append(sum(scores_))
best_idx = np.argmax(scores)
map[term] = cons[best_idx]
return map | Disambiguates a list of tokens of a given PoS. | Below is the the instruction that describes the task:
### Input:
Disambiguates a list of tokens of a given PoS.
### Response:
def _disambiguate_pos(self, terms, pos):
"""
Disambiguates a list of tokens of a given PoS.
"""
# Map the terms to candidate concepts
# Consider only the top 3 most common senses
candidate_map = {term: wn.synsets(term, pos=pos)[:3] for term in terms}
# Filter to unique concepts
concepts = set(c for cons in candidate_map.values() for c in cons)
# Back to list for consistent ordering
concepts = list(concepts)
sim_mat = self._similarity_matrix(concepts)
# Final map of terms to their disambiguated concepts
map = {}
# This is terrible
# For each term, select the candidate concept
# which has the maximum aggregate similarity score against
# all other candidate concepts of all other terms sharing the same PoS
for term, cons in candidate_map.items():
# Some words may not be in WordNet
# and thus have no candidate concepts, so skip
if not cons:
continue
scores = []
for con in cons:
i = concepts.index(con)
scores_ = []
for term_, cons_ in candidate_map.items():
# Some words may not be in WordNet
# and thus have no candidate concepts, so skip
if term == term_ or not cons_:
continue
cons_idx = [concepts.index(c) for c in cons_]
top_sim = max(sim_mat[i,cons_idx])
scores_.append(top_sim)
scores.append(sum(scores_))
best_idx = np.argmax(scores)
map[term] = cons[best_idx]
return map |
def create(cls, pid_value, **kwargs):
"""Create a new record identifier.
For more information about parameters,
see :meth:`invenio_pidstore.providers.BaseProvider.create`.
:param pid_value: Persistent identifier value.
:params **kwargs: See
:meth:`invenio_pidstore.providers.base.BaseProvider.create` extra
parameters.
:returns: A :class:`invenio_pidstore.providers.DataCiteProvider`
instance.
"""
return super(DataCiteProvider, cls).create(
pid_value=pid_value, **kwargs) | Create a new record identifier.
For more information about parameters,
see :meth:`invenio_pidstore.providers.BaseProvider.create`.
:param pid_value: Persistent identifier value.
:params **kwargs: See
:meth:`invenio_pidstore.providers.base.BaseProvider.create` extra
parameters.
:returns: A :class:`invenio_pidstore.providers.DataCiteProvider`
instance. | Below is the the instruction that describes the task:
### Input:
Create a new record identifier.
For more information about parameters,
see :meth:`invenio_pidstore.providers.BaseProvider.create`.
:param pid_value: Persistent identifier value.
:params **kwargs: See
:meth:`invenio_pidstore.providers.base.BaseProvider.create` extra
parameters.
:returns: A :class:`invenio_pidstore.providers.DataCiteProvider`
instance.
### Response:
def create(cls, pid_value, **kwargs):
"""Create a new record identifier.
For more information about parameters,
see :meth:`invenio_pidstore.providers.BaseProvider.create`.
:param pid_value: Persistent identifier value.
:params **kwargs: See
:meth:`invenio_pidstore.providers.base.BaseProvider.create` extra
parameters.
:returns: A :class:`invenio_pidstore.providers.DataCiteProvider`
instance.
"""
return super(DataCiteProvider, cls).create(
pid_value=pid_value, **kwargs) |
def disable_svc_freshness_check(self, service):
"""Disable freshness check for a service
Format of the line that triggers function call::
DISABLE_SERVICE_FRESHNESS_CHECK;<host_name>;<service_description>
:param service: service to edit
:type service: alignak.objects.service.Service
:return: None
"""
if service.check_freshness:
service.modified_attributes |= DICT_MODATTR["MODATTR_FRESHNESS_CHECKS_ENABLED"].value
service.check_freshness = False
self.send_an_element(service.get_update_status_brok()) | Disable freshness check for a service
Format of the line that triggers function call::
DISABLE_SERVICE_FRESHNESS_CHECK;<host_name>;<service_description>
:param service: service to edit
:type service: alignak.objects.service.Service
:return: None | Below is the the instruction that describes the task:
### Input:
Disable freshness check for a service
Format of the line that triggers function call::
DISABLE_SERVICE_FRESHNESS_CHECK;<host_name>;<service_description>
:param service: service to edit
:type service: alignak.objects.service.Service
:return: None
### Response:
def disable_svc_freshness_check(self, service):
"""Disable freshness check for a service
Format of the line that triggers function call::
DISABLE_SERVICE_FRESHNESS_CHECK;<host_name>;<service_description>
:param service: service to edit
:type service: alignak.objects.service.Service
:return: None
"""
if service.check_freshness:
service.modified_attributes |= DICT_MODATTR["MODATTR_FRESHNESS_CHECKS_ENABLED"].value
service.check_freshness = False
self.send_an_element(service.get_update_status_brok()) |
def _simple_cmd(cmd, app, url='http://localhost:8080/manager', timeout=180):
'''
Simple command wrapper to commands that need only a path option
'''
try:
opts = {
'path': app,
'version': ls(url)[app]['version']
}
return '\n'.join(_wget(cmd, opts, url, timeout=timeout)['msg'])
except Exception:
return 'FAIL - No context exists for path {0}'.format(app) | Simple command wrapper to commands that need only a path option | Below is the the instruction that describes the task:
### Input:
Simple command wrapper to commands that need only a path option
### Response:
def _simple_cmd(cmd, app, url='http://localhost:8080/manager', timeout=180):
'''
Simple command wrapper to commands that need only a path option
'''
try:
opts = {
'path': app,
'version': ls(url)[app]['version']
}
return '\n'.join(_wget(cmd, opts, url, timeout=timeout)['msg'])
except Exception:
return 'FAIL - No context exists for path {0}'.format(app) |
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def check_last_ip(app):
return LastIpCheck(app, conf)
return check_last_ip | Returns a WSGI filter app for use with paste.deploy. | Below is the the instruction that describes the task:
### Input:
Returns a WSGI filter app for use with paste.deploy.
### Response:
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def check_last_ip(app):
return LastIpCheck(app, conf)
return check_last_ip |
def set(self, value, *keys):
"""
Sets the dict of the information as read from the yaml file. To access
the file safely, you can use the keys in the order of the access.
Example: set("{'project':{'fg82':[i0-i10]}}", "provisioner","policy")
will set the value of config["provisioner"]["policy"] in the yaml file if
it does not exists an error will be printing that the value does not
exists. Alternatively you can use the . notation e.g.
set("{'project':{'fg82':[i0-i10]}}", "provisioner.policy")
"""
element = self
if keys is None:
return self
if '.' in keys[0]:
keys = keys[0].split(".")
nested_str = ''.join(["['{0}']".format(x) for x in keys])
# Safely evaluate an expression to see if it is one of the Python
# literal structures: strings, numbers, tuples, lists, dicts, booleans,
# and None. Quoted string will be used if it is none of these types.
try:
ast.literal_eval(str(value))
converted = str(value)
except ValueError:
converted = "'" + str(value) + "'"
exec("self" + nested_str + "=" + converted)
return element | Sets the dict of the information as read from the yaml file. To access
the file safely, you can use the keys in the order of the access.
Example: set("{'project':{'fg82':[i0-i10]}}", "provisioner","policy")
will set the value of config["provisioner"]["policy"] in the yaml file if
it does not exists an error will be printing that the value does not
exists. Alternatively you can use the . notation e.g.
set("{'project':{'fg82':[i0-i10]}}", "provisioner.policy") | Below is the the instruction that describes the task:
### Input:
Sets the dict of the information as read from the yaml file. To access
the file safely, you can use the keys in the order of the access.
Example: set("{'project':{'fg82':[i0-i10]}}", "provisioner","policy")
will set the value of config["provisioner"]["policy"] in the yaml file if
it does not exists an error will be printing that the value does not
exists. Alternatively you can use the . notation e.g.
set("{'project':{'fg82':[i0-i10]}}", "provisioner.policy")
### Response:
def set(self, value, *keys):
"""
Sets the dict of the information as read from the yaml file. To access
the file safely, you can use the keys in the order of the access.
Example: set("{'project':{'fg82':[i0-i10]}}", "provisioner","policy")
will set the value of config["provisioner"]["policy"] in the yaml file if
it does not exists an error will be printing that the value does not
exists. Alternatively you can use the . notation e.g.
set("{'project':{'fg82':[i0-i10]}}", "provisioner.policy")
"""
element = self
if keys is None:
return self
if '.' in keys[0]:
keys = keys[0].split(".")
nested_str = ''.join(["['{0}']".format(x) for x in keys])
# Safely evaluate an expression to see if it is one of the Python
# literal structures: strings, numbers, tuples, lists, dicts, booleans,
# and None. Quoted string will be used if it is none of these types.
try:
ast.literal_eval(str(value))
converted = str(value)
except ValueError:
converted = "'" + str(value) + "'"
exec("self" + nested_str + "=" + converted)
return element |
def update_status(self, progress):
"""
waits for a signal emitted from a thread and updates the gui
Args:
progress:
Returns:
"""
# interval at which the gui will be updated, if requests come in faster than they will be ignored
update_interval = 0.2
now = datetime.datetime.now()
if not self._last_progress_update is None and now-self._last_progress_update < datetime.timedelta(seconds=update_interval):
return
self._last_progress_update = now
self.progressBar.setValue(progress)
script = self.current_script
# Estimate remaining time if progress has been made
if progress:
remaining_time = str(datetime.timedelta(seconds=script.remaining_time.seconds))
self.lbl_time_estimate.setText('time remaining: {:s}'.format(remaining_time))
if script is not str(self.tabWidget.tabText(self.tabWidget.currentIndex())).lower() in ['scripts', 'instruments']:
self.plot_script(script) | waits for a signal emitted from a thread and updates the gui
Args:
progress:
Returns: | Below is the the instruction that describes the task:
### Input:
waits for a signal emitted from a thread and updates the gui
Args:
progress:
Returns:
### Response:
def update_status(self, progress):
"""
waits for a signal emitted from a thread and updates the gui
Args:
progress:
Returns:
"""
# interval at which the gui will be updated, if requests come in faster than they will be ignored
update_interval = 0.2
now = datetime.datetime.now()
if not self._last_progress_update is None and now-self._last_progress_update < datetime.timedelta(seconds=update_interval):
return
self._last_progress_update = now
self.progressBar.setValue(progress)
script = self.current_script
# Estimate remaining time if progress has been made
if progress:
remaining_time = str(datetime.timedelta(seconds=script.remaining_time.seconds))
self.lbl_time_estimate.setText('time remaining: {:s}'.format(remaining_time))
if script is not str(self.tabWidget.tabText(self.tabWidget.currentIndex())).lower() in ['scripts', 'instruments']:
self.plot_script(script) |
def render(self, is_unicode=False, pretty_print=False):
"""Last thing to do before rendering"""
for f in self.graph.xml_filters:
self.root = f(self.root)
args = {'encoding': 'utf-8'}
svg = b''
if etree.lxml:
args['pretty_print'] = pretty_print
if not self.graph.disable_xml_declaration:
svg = b"<?xml version='1.0' encoding='utf-8'?>\n"
if not self.graph.disable_xml_declaration:
svg += b'\n'.join([
etree.tostring(pi, **args)
for pi in self.processing_instructions
])
svg += etree.tostring(self.root, **args)
if self.graph.disable_xml_declaration or is_unicode:
svg = svg.decode('utf-8')
return svg | Last thing to do before rendering | Below is the the instruction that describes the task:
### Input:
Last thing to do before rendering
### Response:
def render(self, is_unicode=False, pretty_print=False):
"""Last thing to do before rendering"""
for f in self.graph.xml_filters:
self.root = f(self.root)
args = {'encoding': 'utf-8'}
svg = b''
if etree.lxml:
args['pretty_print'] = pretty_print
if not self.graph.disable_xml_declaration:
svg = b"<?xml version='1.0' encoding='utf-8'?>\n"
if not self.graph.disable_xml_declaration:
svg += b'\n'.join([
etree.tostring(pi, **args)
for pi in self.processing_instructions
])
svg += etree.tostring(self.root, **args)
if self.graph.disable_xml_declaration or is_unicode:
svg = svg.decode('utf-8')
return svg |
def dump(thing, query, from_date, file_prefix, chunk_size, limit, thing_flags):
"""Dump data from Invenio legacy."""
init_app_context()
file_prefix = file_prefix if file_prefix else '{0}_dump'.format(thing)
kwargs = dict((f.strip('-').replace('-', '_'), True) for f in thing_flags)
try:
thing_func = collect_things_entry_points()[thing]
except KeyError:
click.Abort(
'{0} is not in the list of available things to migrate: '
'{1}'.format(thing, collect_things_entry_points()))
click.echo("Querying {0}...".format(thing))
count, items = thing_func.get(query, from_date, limit=limit, **kwargs)
progress_i = 0 # Progress bar counter
click.echo("Dumping {0}...".format(thing))
with click.progressbar(length=count) as bar:
for i, chunk_ids in enumerate(grouper(items, chunk_size)):
with open('{0}_{1}.json'.format(file_prefix, i), 'w') as fp:
fp.write("[\n")
for _id in chunk_ids:
try:
json.dump(
thing_func.dump(_id, from_date, **kwargs),
fp,
default=set_serializer
)
fp.write(",")
except Exception as e:
click.secho("Failed dump {0} {1} ({2})".format(
thing, _id, e.message), fg='red')
progress_i += 1
bar.update(progress_i)
# Strip trailing comma.
fp.seek(fp.tell()-1)
fp.write("\n]") | Dump data from Invenio legacy. | Below is the the instruction that describes the task:
### Input:
Dump data from Invenio legacy.
### Response:
def dump(thing, query, from_date, file_prefix, chunk_size, limit, thing_flags):
"""Dump data from Invenio legacy."""
init_app_context()
file_prefix = file_prefix if file_prefix else '{0}_dump'.format(thing)
kwargs = dict((f.strip('-').replace('-', '_'), True) for f in thing_flags)
try:
thing_func = collect_things_entry_points()[thing]
except KeyError:
click.Abort(
'{0} is not in the list of available things to migrate: '
'{1}'.format(thing, collect_things_entry_points()))
click.echo("Querying {0}...".format(thing))
count, items = thing_func.get(query, from_date, limit=limit, **kwargs)
progress_i = 0 # Progress bar counter
click.echo("Dumping {0}...".format(thing))
with click.progressbar(length=count) as bar:
for i, chunk_ids in enumerate(grouper(items, chunk_size)):
with open('{0}_{1}.json'.format(file_prefix, i), 'w') as fp:
fp.write("[\n")
for _id in chunk_ids:
try:
json.dump(
thing_func.dump(_id, from_date, **kwargs),
fp,
default=set_serializer
)
fp.write(",")
except Exception as e:
click.secho("Failed dump {0} {1} ({2})".format(
thing, _id, e.message), fg='red')
progress_i += 1
bar.update(progress_i)
# Strip trailing comma.
fp.seek(fp.tell()-1)
fp.write("\n]") |
def create_cloudwatch_log_event(app_name, env, region, rules):
"""Create cloudwatch log event for lambda from rules.
Args:
app_name (str): name of the lambda function
env (str): Environment/Account for lambda function
region (str): AWS region of the lambda function
rules (str): Trigger rules from the settings
"""
session = boto3.Session(profile_name=env, region_name=region)
cloudwatch_client = session.client('logs')
log_group = rules.get('log_group')
filter_name = rules.get('filter_name')
filter_pattern = rules.get('filter_pattern')
if not log_group:
LOG.critical('Log group is required and no "log_group" is defined!')
raise InvalidEventConfiguration('Log group is required and no "log_group" is defined!')
if not filter_name:
LOG.critical('Filter name is required and no filter_name is defined!')
raise InvalidEventConfiguration('Filter name is required and no filter_name is defined!')
if filter_pattern is None:
LOG.critical('Filter pattern is required and no filter_pattern is defined!')
raise InvalidEventConfiguration('Filter pattern is required and no filter_pattern is defined!')
lambda_alias_arn = get_lambda_alias_arn(app=app_name, account=env, region=region)
statement_id = '{}_cloudwatchlog_{}'.format(app_name, filter_name.replace(" ", "_"))
principal = 'logs.{}.amazonaws.com'.format(region)
account_id = get_env_credential(env=env)['accountId']
source_arn = "arn:aws:logs:{0}:{1}:log-group:{2}:*".format(region, account_id, log_group)
add_lambda_permissions(
function=lambda_alias_arn,
statement_id=statement_id,
action='lambda:InvokeFunction',
principal=principal,
source_arn=source_arn,
env=env,
region=region)
cloudwatch_client.put_subscription_filter(
logGroupName=log_group, filterName=filter_name, filterPattern=filter_pattern, destinationArn=lambda_alias_arn)
LOG.info("Created Cloudwatch log event with filter: %s", filter_pattern) | Create cloudwatch log event for lambda from rules.
Args:
app_name (str): name of the lambda function
env (str): Environment/Account for lambda function
region (str): AWS region of the lambda function
rules (str): Trigger rules from the settings | Below is the the instruction that describes the task:
### Input:
Create cloudwatch log event for lambda from rules.
Args:
app_name (str): name of the lambda function
env (str): Environment/Account for lambda function
region (str): AWS region of the lambda function
rules (str): Trigger rules from the settings
### Response:
def create_cloudwatch_log_event(app_name, env, region, rules):
"""Create cloudwatch log event for lambda from rules.
Args:
app_name (str): name of the lambda function
env (str): Environment/Account for lambda function
region (str): AWS region of the lambda function
rules (str): Trigger rules from the settings
"""
session = boto3.Session(profile_name=env, region_name=region)
cloudwatch_client = session.client('logs')
log_group = rules.get('log_group')
filter_name = rules.get('filter_name')
filter_pattern = rules.get('filter_pattern')
if not log_group:
LOG.critical('Log group is required and no "log_group" is defined!')
raise InvalidEventConfiguration('Log group is required and no "log_group" is defined!')
if not filter_name:
LOG.critical('Filter name is required and no filter_name is defined!')
raise InvalidEventConfiguration('Filter name is required and no filter_name is defined!')
if filter_pattern is None:
LOG.critical('Filter pattern is required and no filter_pattern is defined!')
raise InvalidEventConfiguration('Filter pattern is required and no filter_pattern is defined!')
lambda_alias_arn = get_lambda_alias_arn(app=app_name, account=env, region=region)
statement_id = '{}_cloudwatchlog_{}'.format(app_name, filter_name.replace(" ", "_"))
principal = 'logs.{}.amazonaws.com'.format(region)
account_id = get_env_credential(env=env)['accountId']
source_arn = "arn:aws:logs:{0}:{1}:log-group:{2}:*".format(region, account_id, log_group)
add_lambda_permissions(
function=lambda_alias_arn,
statement_id=statement_id,
action='lambda:InvokeFunction',
principal=principal,
source_arn=source_arn,
env=env,
region=region)
cloudwatch_client.put_subscription_filter(
logGroupName=log_group, filterName=filter_name, filterPattern=filter_pattern, destinationArn=lambda_alias_arn)
LOG.info("Created Cloudwatch log event with filter: %s", filter_pattern) |
def update_factor(self, name, body):
"""Update Guardian factor
Useful to enable / disable factor
Args:
name (str): Either push-notification or sms
body (dict): Attributes to modify.
See: https://auth0.com/docs/api/management/v2#!/Guardian/put_factors_by_name
"""
url = self._url('factors/{}'.format(name))
return self.client.put(url, data=body) | Update Guardian factor
Useful to enable / disable factor
Args:
name (str): Either push-notification or sms
body (dict): Attributes to modify.
See: https://auth0.com/docs/api/management/v2#!/Guardian/put_factors_by_name | Below is the the instruction that describes the task:
### Input:
Update Guardian factor
Useful to enable / disable factor
Args:
name (str): Either push-notification or sms
body (dict): Attributes to modify.
See: https://auth0.com/docs/api/management/v2#!/Guardian/put_factors_by_name
### Response:
def update_factor(self, name, body):
"""Update Guardian factor
Useful to enable / disable factor
Args:
name (str): Either push-notification or sms
body (dict): Attributes to modify.
See: https://auth0.com/docs/api/management/v2#!/Guardian/put_factors_by_name
"""
url = self._url('factors/{}'.format(name))
return self.client.put(url, data=body) |
def make_hash(o):
r"""
Makes a hash from a dictionary, list, tuple or set to any level, that
contains only other hashable types (including any lists, tuples, sets, and
dictionaries). In the case where other kinds of objects (like classes) need
to be hashed, pass in a collection of object attributes that are pertinent.
For example, a class can be hashed in this fashion:
make_hash([cls.__dict__, cls.__name__])
A function can be hashed like so:
make_hash([fn.__dict__, fn.__code__])
References:
http://stackoverflow.com/questions/5884066/hashing-a-python-dictionary
"""
if type(o) == DictProxyType:
o2 = {}
for k, v in o.items():
if not k.startswith("__"):
o2[k] = v
o = o2
if isinstance(o, (set, tuple, list)):
return tuple([make_hash(e) for e in o])
elif not isinstance(o, dict):
return hash(o)
new_o = copy.deepcopy(o)
for k, v in new_o.items():
new_o[k] = make_hash(v)
return hash(tuple(frozenset(sorted(new_o.items())))) | r"""
Makes a hash from a dictionary, list, tuple or set to any level, that
contains only other hashable types (including any lists, tuples, sets, and
dictionaries). In the case where other kinds of objects (like classes) need
to be hashed, pass in a collection of object attributes that are pertinent.
For example, a class can be hashed in this fashion:
make_hash([cls.__dict__, cls.__name__])
A function can be hashed like so:
make_hash([fn.__dict__, fn.__code__])
References:
http://stackoverflow.com/questions/5884066/hashing-a-python-dictionary | Below is the the instruction that describes the task:
### Input:
r"""
Makes a hash from a dictionary, list, tuple or set to any level, that
contains only other hashable types (including any lists, tuples, sets, and
dictionaries). In the case where other kinds of objects (like classes) need
to be hashed, pass in a collection of object attributes that are pertinent.
For example, a class can be hashed in this fashion:
make_hash([cls.__dict__, cls.__name__])
A function can be hashed like so:
make_hash([fn.__dict__, fn.__code__])
References:
http://stackoverflow.com/questions/5884066/hashing-a-python-dictionary
### Response:
def make_hash(o):
r"""
Makes a hash from a dictionary, list, tuple or set to any level, that
contains only other hashable types (including any lists, tuples, sets, and
dictionaries). In the case where other kinds of objects (like classes) need
to be hashed, pass in a collection of object attributes that are pertinent.
For example, a class can be hashed in this fashion:
make_hash([cls.__dict__, cls.__name__])
A function can be hashed like so:
make_hash([fn.__dict__, fn.__code__])
References:
http://stackoverflow.com/questions/5884066/hashing-a-python-dictionary
"""
if type(o) == DictProxyType:
o2 = {}
for k, v in o.items():
if not k.startswith("__"):
o2[k] = v
o = o2
if isinstance(o, (set, tuple, list)):
return tuple([make_hash(e) for e in o])
elif not isinstance(o, dict):
return hash(o)
new_o = copy.deepcopy(o)
for k, v in new_o.items():
new_o[k] = make_hash(v)
return hash(tuple(frozenset(sorted(new_o.items())))) |
def create_loadbalancer(self, datacenter_id, loadbalancer):
"""
Creates a load balancer within the specified data center.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param loadbalancer: The load balancer object to be created.
:type loadbalancer: ``dict``
"""
data = json.dumps(self._create_loadbalancer_dict(loadbalancer))
response = self._perform_request(
url='/datacenters/%s/loadbalancers' % datacenter_id,
method='POST',
data=data)
return response | Creates a load balancer within the specified data center.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param loadbalancer: The load balancer object to be created.
:type loadbalancer: ``dict`` | Below is the the instruction that describes the task:
### Input:
Creates a load balancer within the specified data center.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param loadbalancer: The load balancer object to be created.
:type loadbalancer: ``dict``
### Response:
def create_loadbalancer(self, datacenter_id, loadbalancer):
"""
Creates a load balancer within the specified data center.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param loadbalancer: The load balancer object to be created.
:type loadbalancer: ``dict``
"""
data = json.dumps(self._create_loadbalancer_dict(loadbalancer))
response = self._perform_request(
url='/datacenters/%s/loadbalancers' % datacenter_id,
method='POST',
data=data)
return response |
def siphashx24(message, key=b'', encoder=nacl.encoding.HexEncoder):
"""
Computes a keyed MAC of ``message`` using the 128 bit variant of the
siphash-2-4 construction.
:param message: The message to hash.
:type message: bytes
:param key: the message authentication key for the siphash MAC construct
:type key: bytes(:const:`SIPHASHX_KEYBYTES`)
:param encoder: A class that is able to encode the hashed message.
:returns: The hashed message.
:rtype: bytes(:const:`SIPHASHX_BYTES`)
.. versionadded:: 1.2
"""
digest = _sip_hashx(message, key)
return encoder.encode(digest) | Computes a keyed MAC of ``message`` using the 128 bit variant of the
siphash-2-4 construction.
:param message: The message to hash.
:type message: bytes
:param key: the message authentication key for the siphash MAC construct
:type key: bytes(:const:`SIPHASHX_KEYBYTES`)
:param encoder: A class that is able to encode the hashed message.
:returns: The hashed message.
:rtype: bytes(:const:`SIPHASHX_BYTES`)
.. versionadded:: 1.2 | Below is the the instruction that describes the task:
### Input:
Computes a keyed MAC of ``message`` using the 128 bit variant of the
siphash-2-4 construction.
:param message: The message to hash.
:type message: bytes
:param key: the message authentication key for the siphash MAC construct
:type key: bytes(:const:`SIPHASHX_KEYBYTES`)
:param encoder: A class that is able to encode the hashed message.
:returns: The hashed message.
:rtype: bytes(:const:`SIPHASHX_BYTES`)
.. versionadded:: 1.2
### Response:
def siphashx24(message, key=b'', encoder=nacl.encoding.HexEncoder):
"""
Computes a keyed MAC of ``message`` using the 128 bit variant of the
siphash-2-4 construction.
:param message: The message to hash.
:type message: bytes
:param key: the message authentication key for the siphash MAC construct
:type key: bytes(:const:`SIPHASHX_KEYBYTES`)
:param encoder: A class that is able to encode the hashed message.
:returns: The hashed message.
:rtype: bytes(:const:`SIPHASHX_BYTES`)
.. versionadded:: 1.2
"""
digest = _sip_hashx(message, key)
return encoder.encode(digest) |
def additive_noise(stream, key='X', scale=1e-1):
'''Add noise to a data stream.
Parameters
----------
stream : iterable
A stream that yields data objects.
key : string, default='X'
Name of the field to add noise.
scale : float, default=0.1
Scale factor for gaussian noise.
Yields
------
data : dict
Updated data objects in the stream.
'''
for data in stream:
noise_shape = data[key].shape
noise = scale * np.random.randn(*noise_shape)
data[key] = data[key] + noise
yield data | Add noise to a data stream.
Parameters
----------
stream : iterable
A stream that yields data objects.
key : string, default='X'
Name of the field to add noise.
scale : float, default=0.1
Scale factor for gaussian noise.
Yields
------
data : dict
Updated data objects in the stream. | Below is the the instruction that describes the task:
### Input:
Add noise to a data stream.
Parameters
----------
stream : iterable
A stream that yields data objects.
key : string, default='X'
Name of the field to add noise.
scale : float, default=0.1
Scale factor for gaussian noise.
Yields
------
data : dict
Updated data objects in the stream.
### Response:
def additive_noise(stream, key='X', scale=1e-1):
'''Add noise to a data stream.
Parameters
----------
stream : iterable
A stream that yields data objects.
key : string, default='X'
Name of the field to add noise.
scale : float, default=0.1
Scale factor for gaussian noise.
Yields
------
data : dict
Updated data objects in the stream.
'''
for data in stream:
noise_shape = data[key].shape
noise = scale * np.random.randn(*noise_shape)
data[key] = data[key] + noise
yield data |
def get_path_matching(name):
"""Get path matching a name.
Parameters
----------
name : string
Name to search for.
Returns
-------
string
Full filepath.
"""
# first try looking in the user folder
p = os.path.join(os.path.expanduser("~"), name)
# then try expanding upwards from cwd
if not os.path.isdir(p):
p = None
drive, folders = os.path.splitdrive(os.getcwd())
folders = folders.split(os.sep)
folders.insert(0, os.sep)
if name in folders:
p = os.path.join(drive, *folders[: folders.index(name) + 1])
# TODO: something more robust to catch the rest of the cases?
return p | Get path matching a name.
Parameters
----------
name : string
Name to search for.
Returns
-------
string
Full filepath. | Below is the the instruction that describes the task:
### Input:
Get path matching a name.
Parameters
----------
name : string
Name to search for.
Returns
-------
string
Full filepath.
### Response:
def get_path_matching(name):
"""Get path matching a name.
Parameters
----------
name : string
Name to search for.
Returns
-------
string
Full filepath.
"""
# first try looking in the user folder
p = os.path.join(os.path.expanduser("~"), name)
# then try expanding upwards from cwd
if not os.path.isdir(p):
p = None
drive, folders = os.path.splitdrive(os.getcwd())
folders = folders.split(os.sep)
folders.insert(0, os.sep)
if name in folders:
p = os.path.join(drive, *folders[: folders.index(name) + 1])
# TODO: something more robust to catch the rest of the cases?
return p |
def tree_type_checker(*ref):
'''Tree type checker functor
Returns:
Functor that takes a tree, and returns true if that tree matches any of
NeuriteTypes in ref
Ex:
>>> from neurom.core.types import NeuriteType, tree_type_checker
>>> tree_filter = tree_type_checker(NeuriteType.axon, NeuriteType.basal_dendrite)
>>> nrn.i_neurites(tree.isegment, tree_filter=tree_filter)
'''
ref = tuple(ref)
if NeuriteType.all in ref:
def check_tree_type(_):
'''Always returns true'''
return True
else:
def check_tree_type(tree):
'''Check whether tree has the same type as ref
Returns:
True if ref in the same type as tree.type or ref is NeuriteType.all
'''
return tree.type in ref
return check_tree_type | Tree type checker functor
Returns:
Functor that takes a tree, and returns true if that tree matches any of
NeuriteTypes in ref
Ex:
>>> from neurom.core.types import NeuriteType, tree_type_checker
>>> tree_filter = tree_type_checker(NeuriteType.axon, NeuriteType.basal_dendrite)
>>> nrn.i_neurites(tree.isegment, tree_filter=tree_filter) | Below is the the instruction that describes the task:
### Input:
Tree type checker functor
Returns:
Functor that takes a tree, and returns true if that tree matches any of
NeuriteTypes in ref
Ex:
>>> from neurom.core.types import NeuriteType, tree_type_checker
>>> tree_filter = tree_type_checker(NeuriteType.axon, NeuriteType.basal_dendrite)
>>> nrn.i_neurites(tree.isegment, tree_filter=tree_filter)
### Response:
def tree_type_checker(*ref):
'''Tree type checker functor
Returns:
Functor that takes a tree, and returns true if that tree matches any of
NeuriteTypes in ref
Ex:
>>> from neurom.core.types import NeuriteType, tree_type_checker
>>> tree_filter = tree_type_checker(NeuriteType.axon, NeuriteType.basal_dendrite)
>>> nrn.i_neurites(tree.isegment, tree_filter=tree_filter)
'''
ref = tuple(ref)
if NeuriteType.all in ref:
def check_tree_type(_):
'''Always returns true'''
return True
else:
def check_tree_type(tree):
'''Check whether tree has the same type as ref
Returns:
True if ref in the same type as tree.type or ref is NeuriteType.all
'''
return tree.type in ref
return check_tree_type |
def _compile_bus_injection(self):
"""Impose injections on buses"""
string = '"""\n'
for device, series in zip(self.devices, self.series):
if series:
string += 'system.' + device + '.gcall(system.dae)\n'
string += '\n'
string += 'system.dae.reset_small_g()\n'
string += self.gisland
string += '"""'
self.bus_injection = compile(eval(string), '', 'exec') | Impose injections on buses | Below is the the instruction that describes the task:
### Input:
Impose injections on buses
### Response:
def _compile_bus_injection(self):
"""Impose injections on buses"""
string = '"""\n'
for device, series in zip(self.devices, self.series):
if series:
string += 'system.' + device + '.gcall(system.dae)\n'
string += '\n'
string += 'system.dae.reset_small_g()\n'
string += self.gisland
string += '"""'
self.bus_injection = compile(eval(string), '', 'exec') |
def DataIsInteger(self):
"""Determines, based on the data type, if the data is an integer.
The data types considered strings are: REG_DWORD (REG_DWORD_LITTLE_ENDIAN),
REG_DWORD_BIG_ENDIAN and REG_QWORD.
Returns:
bool: True if the data is an integer, False otherwise.
"""
return self.data_type in (
definitions.REG_DWORD, definitions.REG_DWORD_BIG_ENDIAN,
definitions.REG_QWORD) | Determines, based on the data type, if the data is an integer.
The data types considered strings are: REG_DWORD (REG_DWORD_LITTLE_ENDIAN),
REG_DWORD_BIG_ENDIAN and REG_QWORD.
Returns:
bool: True if the data is an integer, False otherwise. | Below is the the instruction that describes the task:
### Input:
Determines, based on the data type, if the data is an integer.
The data types considered strings are: REG_DWORD (REG_DWORD_LITTLE_ENDIAN),
REG_DWORD_BIG_ENDIAN and REG_QWORD.
Returns:
bool: True if the data is an integer, False otherwise.
### Response:
def DataIsInteger(self):
"""Determines, based on the data type, if the data is an integer.
The data types considered strings are: REG_DWORD (REG_DWORD_LITTLE_ENDIAN),
REG_DWORD_BIG_ENDIAN and REG_QWORD.
Returns:
bool: True if the data is an integer, False otherwise.
"""
return self.data_type in (
definitions.REG_DWORD, definitions.REG_DWORD_BIG_ENDIAN,
definitions.REG_QWORD) |
async def send_request(self, connection: Connection, payment_handle: int):
"""
Approves the credential offer and submits a credential request. The result will be a credential stored in the prover's wallet.
:param connection: connection to submit request from
:param payment_handle: currently unused
:return:
Example:
connection = await Connection.create(source_id)
await connection.connect(phone_number)
credential = await Credential.create(source_id, offer)
await credential.send_request(connection, 0)
"""
if not hasattr(Credential.send_request, "cb"):
self.logger.debug("vcx_credential_send_request: Creating callback")
Credential.send_request.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32))
c_credential_handle = c_uint32(self.handle)
c_connection_handle = c_uint32(connection.handle)
c_payment = c_uint32(payment_handle)
await do_call('vcx_credential_send_request',
c_credential_handle,
c_connection_handle,
c_payment,
Credential.send_request.cb) | Approves the credential offer and submits a credential request. The result will be a credential stored in the prover's wallet.
:param connection: connection to submit request from
:param payment_handle: currently unused
:return:
Example:
connection = await Connection.create(source_id)
await connection.connect(phone_number)
credential = await Credential.create(source_id, offer)
await credential.send_request(connection, 0) | Below is the the instruction that describes the task:
### Input:
Approves the credential offer and submits a credential request. The result will be a credential stored in the prover's wallet.
:param connection: connection to submit request from
:param payment_handle: currently unused
:return:
Example:
connection = await Connection.create(source_id)
await connection.connect(phone_number)
credential = await Credential.create(source_id, offer)
await credential.send_request(connection, 0)
### Response:
async def send_request(self, connection: Connection, payment_handle: int):
"""
Approves the credential offer and submits a credential request. The result will be a credential stored in the prover's wallet.
:param connection: connection to submit request from
:param payment_handle: currently unused
:return:
Example:
connection = await Connection.create(source_id)
await connection.connect(phone_number)
credential = await Credential.create(source_id, offer)
await credential.send_request(connection, 0)
"""
if not hasattr(Credential.send_request, "cb"):
self.logger.debug("vcx_credential_send_request: Creating callback")
Credential.send_request.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32))
c_credential_handle = c_uint32(self.handle)
c_connection_handle = c_uint32(connection.handle)
c_payment = c_uint32(payment_handle)
await do_call('vcx_credential_send_request',
c_credential_handle,
c_connection_handle,
c_payment,
Credential.send_request.cb) |
def is_username(string, minlen=1, maxlen=15):
""" Determines whether the @string pattern is username-like
@string: #str being tested
@minlen: minimum required username length
@maxlen: maximum username length
-> #bool
"""
if string:
string = string.strip()
return username_re.match(string) and (minlen <= len(string) <= maxlen)
return False | Determines whether the @string pattern is username-like
@string: #str being tested
@minlen: minimum required username length
@maxlen: maximum username length
-> #bool | Below is the the instruction that describes the task:
### Input:
Determines whether the @string pattern is username-like
@string: #str being tested
@minlen: minimum required username length
@maxlen: maximum username length
-> #bool
### Response:
def is_username(string, minlen=1, maxlen=15):
""" Determines whether the @string pattern is username-like
@string: #str being tested
@minlen: minimum required username length
@maxlen: maximum username length
-> #bool
"""
if string:
string = string.strip()
return username_re.match(string) and (minlen <= len(string) <= maxlen)
return False |
def properties(lines):
"""Parse properties block
Returns:
dict: {property_type: (atom_index, value)}
"""
results = {}
for i, line in enumerate(lines):
type_ = line[3:6]
if type_ not in ["CHG", "RAD", "ISO"]:
continue # Other properties are not supported yet
count = int(line[6:9])
results[type_] = []
for j in range(count):
idx = int(line[10 + j * 8: 13 + j * 8])
val = int(line[14 + j * 8: 17 + j * 8])
results[type_].append((idx, val))
return results | Parse properties block
Returns:
dict: {property_type: (atom_index, value)} | Below is the the instruction that describes the task:
### Input:
Parse properties block
Returns:
dict: {property_type: (atom_index, value)}
### Response:
def properties(lines):
"""Parse properties block
Returns:
dict: {property_type: (atom_index, value)}
"""
results = {}
for i, line in enumerate(lines):
type_ = line[3:6]
if type_ not in ["CHG", "RAD", "ISO"]:
continue # Other properties are not supported yet
count = int(line[6:9])
results[type_] = []
for j in range(count):
idx = int(line[10 + j * 8: 13 + j * 8])
val = int(line[14 + j * 8: 17 + j * 8])
results[type_].append((idx, val))
return results |
def get_album(self, object_id, relation=None, **kwargs):
"""
Get the album with the provided id
:returns: an :class:`~deezer.resources.Album` object
"""
return self.get_object("album", object_id, relation=relation, **kwargs) | Get the album with the provided id
:returns: an :class:`~deezer.resources.Album` object | Below is the the instruction that describes the task:
### Input:
Get the album with the provided id
:returns: an :class:`~deezer.resources.Album` object
### Response:
def get_album(self, object_id, relation=None, **kwargs):
"""
Get the album with the provided id
:returns: an :class:`~deezer.resources.Album` object
"""
return self.get_object("album", object_id, relation=relation, **kwargs) |
def _tobinarray_really(self, start, end, pad, size):
"""Return binary array."""
if pad is None:
pad = self.padding
bin = array('B')
if self._buf == {} and None in (start, end):
return bin
if size is not None and size <= 0:
raise ValueError("tobinarray: wrong value for size")
start, end = self._get_start_end(start, end, size)
for i in range_g(start, end+1):
bin.append(self._buf.get(i, pad))
return bin | Return binary array. | Below is the the instruction that describes the task:
### Input:
Return binary array.
### Response:
def _tobinarray_really(self, start, end, pad, size):
"""Return binary array."""
if pad is None:
pad = self.padding
bin = array('B')
if self._buf == {} and None in (start, end):
return bin
if size is not None and size <= 0:
raise ValueError("tobinarray: wrong value for size")
start, end = self._get_start_end(start, end, size)
for i in range_g(start, end+1):
bin.append(self._buf.get(i, pad))
return bin |
def get_single_header(headers, key):
"""
Get a single value for the given key out of the given set of headers.
:param twisted.web.http_headers.Headers headers:
The set of headers in which to look for the header value
:param str key:
The header key
"""
raw_headers = headers.getRawHeaders(key)
if raw_headers is None:
return None
# Take the final header as the authorative
header, _ = cgi.parse_header(raw_headers[-1])
return header | Get a single value for the given key out of the given set of headers.
:param twisted.web.http_headers.Headers headers:
The set of headers in which to look for the header value
:param str key:
The header key | Below is the the instruction that describes the task:
### Input:
Get a single value for the given key out of the given set of headers.
:param twisted.web.http_headers.Headers headers:
The set of headers in which to look for the header value
:param str key:
The header key
### Response:
def get_single_header(headers, key):
"""
Get a single value for the given key out of the given set of headers.
:param twisted.web.http_headers.Headers headers:
The set of headers in which to look for the header value
:param str key:
The header key
"""
raw_headers = headers.getRawHeaders(key)
if raw_headers is None:
return None
# Take the final header as the authorative
header, _ = cgi.parse_header(raw_headers[-1])
return header |
def get_l2cap_options (sock):
"""get_l2cap_options (sock, mtu)
Gets L2CAP options for the specified L2CAP socket.
Options are: omtu, imtu, flush_to, mode, fcs, max_tx, txwin_size.
"""
# TODO this should be in the C module, because it depends
# directly on struct l2cap_options layout.
s = sock.getsockopt (SOL_L2CAP, L2CAP_OPTIONS, 12)
options = list( struct.unpack ("HHHBBBH", s))
return options | get_l2cap_options (sock, mtu)
Gets L2CAP options for the specified L2CAP socket.
Options are: omtu, imtu, flush_to, mode, fcs, max_tx, txwin_size. | Below is the the instruction that describes the task:
### Input:
get_l2cap_options (sock, mtu)
Gets L2CAP options for the specified L2CAP socket.
Options are: omtu, imtu, flush_to, mode, fcs, max_tx, txwin_size.
### Response:
def get_l2cap_options (sock):
"""get_l2cap_options (sock, mtu)
Gets L2CAP options for the specified L2CAP socket.
Options are: omtu, imtu, flush_to, mode, fcs, max_tx, txwin_size.
"""
# TODO this should be in the C module, because it depends
# directly on struct l2cap_options layout.
s = sock.getsockopt (SOL_L2CAP, L2CAP_OPTIONS, 12)
options = list( struct.unpack ("HHHBBBH", s))
return options |
def untracable(object):
"""
Marks decorated object as non tracable.
:param object: Object to decorate.
:type object: object
:return: Object.
:rtype: object
"""
@functools.wraps(object)
def untracable_wrapper(*args, **kwargs):
"""
Marks decorated object as non tracable.
:param \*args: Arguments.
:type \*args: \*
:param \*\*kwargs: Keywords arguments.
:type \*\*kwargs: \*\*
:return: Object.
:rtype: object
"""
return object(*args, **kwargs)
set_untracable(untracable_wrapper)
return untracable_wrapper | Marks decorated object as non tracable.
:param object: Object to decorate.
:type object: object
:return: Object.
:rtype: object | Below is the the instruction that describes the task:
### Input:
Marks decorated object as non tracable.
:param object: Object to decorate.
:type object: object
:return: Object.
:rtype: object
### Response:
def untracable(object):
"""
Marks decorated object as non tracable.
:param object: Object to decorate.
:type object: object
:return: Object.
:rtype: object
"""
@functools.wraps(object)
def untracable_wrapper(*args, **kwargs):
"""
Marks decorated object as non tracable.
:param \*args: Arguments.
:type \*args: \*
:param \*\*kwargs: Keywords arguments.
:type \*\*kwargs: \*\*
:return: Object.
:rtype: object
"""
return object(*args, **kwargs)
set_untracable(untracable_wrapper)
return untracable_wrapper |
def _check_frames(self, frames, fill_value):
"""Reduce frames to no more than are available in the file."""
if self.seekable():
remaining_frames = self.frames - self.tell()
if frames < 0 or (frames > remaining_frames and
fill_value is None):
frames = remaining_frames
elif frames < 0:
raise ValueError("frames must be specified for non-seekable files")
return frames | Reduce frames to no more than are available in the file. | Below is the the instruction that describes the task:
### Input:
Reduce frames to no more than are available in the file.
### Response:
def _check_frames(self, frames, fill_value):
"""Reduce frames to no more than are available in the file."""
if self.seekable():
remaining_frames = self.frames - self.tell()
if frames < 0 or (frames > remaining_frames and
fill_value is None):
frames = remaining_frames
elif frames < 0:
raise ValueError("frames must be specified for non-seekable files")
return frames |
def get_safe_type(self):
"""Determines the type of ESA product.
In 2016 ESA changed structure and naming of data. Therefore the class must
distinguish between old product type and compact (new) product type.
:return: type of ESA product
:rtype: constants.EsaSafeType
:raises: ValueError
"""
product_type = self.product_id.split('_')[1]
if product_type.startswith('MSI'):
return EsaSafeType.COMPACT_TYPE
if product_type in ['OPER', 'USER']:
return EsaSafeType.OLD_TYPE
raise ValueError('Unrecognized product type of product id {}'.format(self.product_id)) | Determines the type of ESA product.
In 2016 ESA changed structure and naming of data. Therefore the class must
distinguish between old product type and compact (new) product type.
:return: type of ESA product
:rtype: constants.EsaSafeType
:raises: ValueError | Below is the the instruction that describes the task:
### Input:
Determines the type of ESA product.
In 2016 ESA changed structure and naming of data. Therefore the class must
distinguish between old product type and compact (new) product type.
:return: type of ESA product
:rtype: constants.EsaSafeType
:raises: ValueError
### Response:
def get_safe_type(self):
"""Determines the type of ESA product.
In 2016 ESA changed structure and naming of data. Therefore the class must
distinguish between old product type and compact (new) product type.
:return: type of ESA product
:rtype: constants.EsaSafeType
:raises: ValueError
"""
product_type = self.product_id.split('_')[1]
if product_type.startswith('MSI'):
return EsaSafeType.COMPACT_TYPE
if product_type in ['OPER', 'USER']:
return EsaSafeType.OLD_TYPE
raise ValueError('Unrecognized product type of product id {}'.format(self.product_id)) |
def fastp_read_n_plot(self):
""" Make the read N content plot for Fastp """
data_labels, pdata = self.filter_pconfig_pdata_subplots(self.fastp_n_content_data, 'Base Content Percent')
pconfig = {
'id': 'fastp-seq-content-n-plot',
'title': 'Fastp: Read N Content',
'xlab': 'Read Position',
'ylab': 'R1 Before filtering: Base Content Percent',
'yCeiling': 100,
'yMinRange': 5,
'ymin': 0,
'xDecimals': False,
'yLabelFormat': '{value}%',
'tt_label': '{point.x}: {point.y:.2f}%',
'data_labels': data_labels
}
return linegraph.plot(pdata, pconfig) | Make the read N content plot for Fastp | Below is the the instruction that describes the task:
### Input:
Make the read N content plot for Fastp
### Response:
def fastp_read_n_plot(self):
""" Make the read N content plot for Fastp """
data_labels, pdata = self.filter_pconfig_pdata_subplots(self.fastp_n_content_data, 'Base Content Percent')
pconfig = {
'id': 'fastp-seq-content-n-plot',
'title': 'Fastp: Read N Content',
'xlab': 'Read Position',
'ylab': 'R1 Before filtering: Base Content Percent',
'yCeiling': 100,
'yMinRange': 5,
'ymin': 0,
'xDecimals': False,
'yLabelFormat': '{value}%',
'tt_label': '{point.x}: {point.y:.2f}%',
'data_labels': data_labels
}
return linegraph.plot(pdata, pconfig) |
def add_layer_to_canvas(layer, name):
"""Helper method to add layer to QGIS.
:param layer: The layer.
:type layer: QgsMapLayer
:param name: Layer name.
:type name: str
"""
if qgis_version() >= 21800:
layer.setName(name)
else:
layer.setLayerName(name)
QgsProject.instance().addMapLayer(layer, False) | Helper method to add layer to QGIS.
:param layer: The layer.
:type layer: QgsMapLayer
:param name: Layer name.
:type name: str | Below is the the instruction that describes the task:
### Input:
Helper method to add layer to QGIS.
:param layer: The layer.
:type layer: QgsMapLayer
:param name: Layer name.
:type name: str
### Response:
def add_layer_to_canvas(layer, name):
"""Helper method to add layer to QGIS.
:param layer: The layer.
:type layer: QgsMapLayer
:param name: Layer name.
:type name: str
"""
if qgis_version() >= 21800:
layer.setName(name)
else:
layer.setLayerName(name)
QgsProject.instance().addMapLayer(layer, False) |
def bufsize_validator(kwargs):
""" a validator to prevent a user from saying that they want custom
buffering when they're using an in/out object that will be os.dup'd to the
process, and has its own buffering. an example is a pipe or a tty. it
doesn't make sense to tell them to have a custom buffering, since the os
controls this. """
invalid = []
in_ob = kwargs.get("in", None)
out_ob = kwargs.get("out", None)
in_buf = kwargs.get("in_bufsize", None)
out_buf = kwargs.get("out_bufsize", None)
in_no_buf = ob_is_tty(in_ob) or ob_is_pipe(in_ob)
out_no_buf = ob_is_tty(out_ob) or ob_is_pipe(out_ob)
err = "Can't specify an {target} bufsize if the {target} target is a pipe or TTY"
if in_no_buf and in_buf is not None:
invalid.append((("in", "in_bufsize"), err.format(target="in")))
if out_no_buf and out_buf is not None:
invalid.append((("out", "out_bufsize"), err.format(target="out")))
return invalid | a validator to prevent a user from saying that they want custom
buffering when they're using an in/out object that will be os.dup'd to the
process, and has its own buffering. an example is a pipe or a tty. it
doesn't make sense to tell them to have a custom buffering, since the os
controls this. | Below is the the instruction that describes the task:
### Input:
a validator to prevent a user from saying that they want custom
buffering when they're using an in/out object that will be os.dup'd to the
process, and has its own buffering. an example is a pipe or a tty. it
doesn't make sense to tell them to have a custom buffering, since the os
controls this.
### Response:
def bufsize_validator(kwargs):
""" a validator to prevent a user from saying that they want custom
buffering when they're using an in/out object that will be os.dup'd to the
process, and has its own buffering. an example is a pipe or a tty. it
doesn't make sense to tell them to have a custom buffering, since the os
controls this. """
invalid = []
in_ob = kwargs.get("in", None)
out_ob = kwargs.get("out", None)
in_buf = kwargs.get("in_bufsize", None)
out_buf = kwargs.get("out_bufsize", None)
in_no_buf = ob_is_tty(in_ob) or ob_is_pipe(in_ob)
out_no_buf = ob_is_tty(out_ob) or ob_is_pipe(out_ob)
err = "Can't specify an {target} bufsize if the {target} target is a pipe or TTY"
if in_no_buf and in_buf is not None:
invalid.append((("in", "in_bufsize"), err.format(target="in")))
if out_no_buf and out_buf is not None:
invalid.append((("out", "out_bufsize"), err.format(target="out")))
return invalid |
def send(self, message):
""" Send a message by forwarding it to the server
:param message: Message
:type message: smsframework.data.OutgoingMessage
:rtype: smsframework.data.OutgoingMessage
:raise Exception: any exception reported by the other side
:raise urllib2.URLError: Connection error
"""
res = jsonex_request(self.server_url + '/im'.lstrip('/'), {'message': message})
msg = res['message'] # OutgoingMessage object
# Replace properties in the original object (so it's the same object, like with other providers)
for k, v in msg.__dict__.items():
setattr(message, k, v)
return message | Send a message by forwarding it to the server
:param message: Message
:type message: smsframework.data.OutgoingMessage
:rtype: smsframework.data.OutgoingMessage
:raise Exception: any exception reported by the other side
:raise urllib2.URLError: Connection error | Below is the the instruction that describes the task:
### Input:
Send a message by forwarding it to the server
:param message: Message
:type message: smsframework.data.OutgoingMessage
:rtype: smsframework.data.OutgoingMessage
:raise Exception: any exception reported by the other side
:raise urllib2.URLError: Connection error
### Response:
def send(self, message):
""" Send a message by forwarding it to the server
:param message: Message
:type message: smsframework.data.OutgoingMessage
:rtype: smsframework.data.OutgoingMessage
:raise Exception: any exception reported by the other side
:raise urllib2.URLError: Connection error
"""
res = jsonex_request(self.server_url + '/im'.lstrip('/'), {'message': message})
msg = res['message'] # OutgoingMessage object
# Replace properties in the original object (so it's the same object, like with other providers)
for k, v in msg.__dict__.items():
setattr(message, k, v)
return message |
def _pprint(self, cycle=False, flat=False, annotate=False, onlychanged=True, level=1, tab = ' '):
"""
Pretty printer that prints only the modified keywords and
generates flat representations (for repr) and optionally
annotates the top of the repr with a comment.
"""
(kwargs, pos_args, infix_operator, extra_params) = self._pprint_args
(br, indent) = ('' if flat else '\n', '' if flat else tab * level)
prettify = lambda x: isinstance(x, PrettyPrinted) and not flat
pretty = lambda x: x._pprint(flat=flat, level=level+1) if prettify(x) else repr(x)
params = dict(self.get_param_values())
show_lexsort = getattr(self, '_lexorder', None) is not None
modified = [k for (k,v) in self.get_param_values(onlychanged=onlychanged)]
pkwargs = [(k, params[k]) for k in kwargs if (k in modified)] + list(extra_params.items())
arg_list = [(k,params[k]) for k in pos_args] + pkwargs
lines = []
if annotate: # Optional annotating comment
len_ckeys, len_vkeys = len(self.constant_keys), len(self.varying_keys)
info_triple = (len(self),
', %d constant key(s)' % len_ckeys if len_ckeys else '',
', %d varying key(s)' % len_vkeys if len_vkeys else '')
annotation = '# == %d items%s%s ==\n' % info_triple
lines = [annotation]
if show_lexsort: lines.append('(')
if cycle:
lines.append('%s(...)' % self.__class__.__name__)
elif infix_operator:
level = level - 1
triple = (pretty(params[pos_args[0]]), infix_operator, pretty(params[pos_args[1]]))
lines.append('%s %s %s' % triple)
else:
lines.append('%s(' % self.__class__.__name__)
for (k,v) in arg_list:
lines.append('%s%s=%s' % (br+indent, k, pretty(v)))
lines.append(',')
lines = lines[:-1] +[br+(tab*(level-1))+')'] # Remove trailing comma
if show_lexsort:
lines.append(').lexsort(%s)' % ', '.join(repr(el) for el in self._lexorder))
return ''.join(lines) | Pretty printer that prints only the modified keywords and
generates flat representations (for repr) and optionally
annotates the top of the repr with a comment. | Below is the the instruction that describes the task:
### Input:
Pretty printer that prints only the modified keywords and
generates flat representations (for repr) and optionally
annotates the top of the repr with a comment.
### Response:
def _pprint(self, cycle=False, flat=False, annotate=False, onlychanged=True, level=1, tab = ' '):
"""
Pretty printer that prints only the modified keywords and
generates flat representations (for repr) and optionally
annotates the top of the repr with a comment.
"""
(kwargs, pos_args, infix_operator, extra_params) = self._pprint_args
(br, indent) = ('' if flat else '\n', '' if flat else tab * level)
prettify = lambda x: isinstance(x, PrettyPrinted) and not flat
pretty = lambda x: x._pprint(flat=flat, level=level+1) if prettify(x) else repr(x)
params = dict(self.get_param_values())
show_lexsort = getattr(self, '_lexorder', None) is not None
modified = [k for (k,v) in self.get_param_values(onlychanged=onlychanged)]
pkwargs = [(k, params[k]) for k in kwargs if (k in modified)] + list(extra_params.items())
arg_list = [(k,params[k]) for k in pos_args] + pkwargs
lines = []
if annotate: # Optional annotating comment
len_ckeys, len_vkeys = len(self.constant_keys), len(self.varying_keys)
info_triple = (len(self),
', %d constant key(s)' % len_ckeys if len_ckeys else '',
', %d varying key(s)' % len_vkeys if len_vkeys else '')
annotation = '# == %d items%s%s ==\n' % info_triple
lines = [annotation]
if show_lexsort: lines.append('(')
if cycle:
lines.append('%s(...)' % self.__class__.__name__)
elif infix_operator:
level = level - 1
triple = (pretty(params[pos_args[0]]), infix_operator, pretty(params[pos_args[1]]))
lines.append('%s %s %s' % triple)
else:
lines.append('%s(' % self.__class__.__name__)
for (k,v) in arg_list:
lines.append('%s%s=%s' % (br+indent, k, pretty(v)))
lines.append(',')
lines = lines[:-1] +[br+(tab*(level-1))+')'] # Remove trailing comma
if show_lexsort:
lines.append(').lexsort(%s)' % ', '.join(repr(el) for el in self._lexorder))
return ''.join(lines) |
def next_unused_name_in_group(grp, length):
""" Gives a name that isn't used in a Group.
Generates a name of the desired length that is not a Dataset or
Group in the given group. Note, if length is not large enough and
`grp` is full enough, there may be no available names meaning that
this function will hang.
Parameters
----------
grp : h5py.Group or h5py.File
The HDF5 Group (or File if at '/') to generate an unused name
in.
length : int
Number of characters the name should be.
Returns
-------
name : str
A name that isn't already an existing Dataset or Group in
`grp`.
"""
# While
#
# ltrs = string.ascii_letters + string.digits
# name = ''.join([random.choice(ltrs) for i in range(length)])
#
# seems intuitive, its performance is abysmal compared to
#
# '%0{0}x'.format(length) % random.getrandbits(length * 4)
#
# The difference is a factor of 20. Idea from
#
# https://stackoverflow.com/questions/2782229/most-lightweight-way-
# to-create-a-random-string-and-a-random-hexadecimal-number/
# 35161595#35161595
fmt = '%0{0}x'.format(length)
name = fmt % random.getrandbits(length * 4)
while name in grp:
name = fmt % random.getrandbits(length * 4)
return name | Gives a name that isn't used in a Group.
Generates a name of the desired length that is not a Dataset or
Group in the given group. Note, if length is not large enough and
`grp` is full enough, there may be no available names meaning that
this function will hang.
Parameters
----------
grp : h5py.Group or h5py.File
The HDF5 Group (or File if at '/') to generate an unused name
in.
length : int
Number of characters the name should be.
Returns
-------
name : str
A name that isn't already an existing Dataset or Group in
`grp`. | Below is the the instruction that describes the task:
### Input:
Gives a name that isn't used in a Group.
Generates a name of the desired length that is not a Dataset or
Group in the given group. Note, if length is not large enough and
`grp` is full enough, there may be no available names meaning that
this function will hang.
Parameters
----------
grp : h5py.Group or h5py.File
The HDF5 Group (or File if at '/') to generate an unused name
in.
length : int
Number of characters the name should be.
Returns
-------
name : str
A name that isn't already an existing Dataset or Group in
`grp`.
### Response:
def next_unused_name_in_group(grp, length):
""" Gives a name that isn't used in a Group.
Generates a name of the desired length that is not a Dataset or
Group in the given group. Note, if length is not large enough and
`grp` is full enough, there may be no available names meaning that
this function will hang.
Parameters
----------
grp : h5py.Group or h5py.File
The HDF5 Group (or File if at '/') to generate an unused name
in.
length : int
Number of characters the name should be.
Returns
-------
name : str
A name that isn't already an existing Dataset or Group in
`grp`.
"""
# While
#
# ltrs = string.ascii_letters + string.digits
# name = ''.join([random.choice(ltrs) for i in range(length)])
#
# seems intuitive, its performance is abysmal compared to
#
# '%0{0}x'.format(length) % random.getrandbits(length * 4)
#
# The difference is a factor of 20. Idea from
#
# https://stackoverflow.com/questions/2782229/most-lightweight-way-
# to-create-a-random-string-and-a-random-hexadecimal-number/
# 35161595#35161595
fmt = '%0{0}x'.format(length)
name = fmt % random.getrandbits(length * 4)
while name in grp:
name = fmt % random.getrandbits(length * 4)
return name |
def objectnames_conesearch(racenter,
declcenter,
searchradiusarcsec,
simbad_mirror='simbad',
returnformat='csv',
forcefetch=False,
cachedir='~/.astrobase/simbad-cache',
verbose=True,
timeout=10.0,
refresh=2.0,
maxtimeout=90.0,
maxtries=1,
complete_query_later=True):
'''This queries the SIMBAD TAP service for a list of object names near the
coords. This is effectively a "reverse" name resolver (i.e. this does the
opposite of SESAME).
Parameters
----------
racenter,declcenter : float
The cone-search center coordinates in decimal degrees
searchradiusarcsec : float
The radius in arcseconds to search around the center coordinates.
simbad_mirror : str
This is the key used to select a SIMBAD mirror from the
`SIMBAD_URLS` dict above. If set, the specified mirror will be used. If
None, a random mirror chosen from that dict will be used.
returnformat : {'csv','votable','json'}
The returned file format to request from the GAIA catalog service.
forcefetch : bool
If this is True, the query will be retried even if cached results for
it exist.
cachedir : str
This points to the directory where results will be downloaded.
verbose : bool
If True, will indicate progress and warn of any issues.
timeout : float
This sets the amount of time in seconds to wait for the service to
respond to our initial request.
refresh : float
This sets the amount of time in seconds to wait before checking if the
result file is available. If the results file isn't available after
`refresh` seconds have elapsed, the function will wait for `refresh`
seconds continuously, until `maxtimeout` is reached or the results file
becomes available.
maxtimeout : float
The maximum amount of time in seconds to wait for a result to become
available after submitting our query request.
maxtries : int
The maximum number of tries (across all mirrors tried) to make to either
submit the request or download the results, before giving up.
complete_query_later : bool
If set to True, a submitted query that does not return a result before
`maxtimeout` has passed will be cancelled but its input request
parameters and the result URL provided by the service will be saved. If
this function is then called later with these same input request
parameters, it will check if the query finally finished and a result is
available. If so, will download the results instead of submitting a new
query. If it's not done yet, will start waiting for results again. To
force launch a new query with the same request parameters, set the
`forcefetch` kwarg to True.
Returns
-------
dict
This returns a dict of the following form::
{'params':dict of the input params used for the query,
'provenance':'cache' or 'new download',
'result':path to the file on disk with the downloaded data table}
'''
# this was generated using the example at:
# http://simbad.u-strasbg.fr/simbad/sim-tap and the table diagram at:
# http://simbad.u-strasbg.fr/simbad/tap/tapsearch.html
query = (
"select a.oid, a.ra, a.dec, a.main_id, a.otype_txt, "
"a.coo_bibcode, a.nbref, b.ids as all_ids, "
"(DISTANCE(POINT('ICRS', a.ra, a.dec), "
"POINT('ICRS', {ra_center:.5f}, {decl_center:.5f})))*3600.0 "
"AS dist_arcsec "
"from basic a join ids b on a.oid = b.oidref where "
"CONTAINS(POINT('ICRS',a.ra, a.dec),"
"CIRCLE('ICRS',{ra_center:.5f},{decl_center:.5f},"
"{search_radius:.6f}))=1 "
"ORDER by dist_arcsec asc "
)
formatted_query = query.format(ra_center=racenter,
decl_center=declcenter,
search_radius=searchradiusarcsec/3600.0)
return tap_query(formatted_query,
simbad_mirror=simbad_mirror,
returnformat=returnformat,
forcefetch=forcefetch,
cachedir=cachedir,
verbose=verbose,
timeout=timeout,
refresh=refresh,
maxtimeout=maxtimeout,
maxtries=maxtries,
complete_query_later=complete_query_later) | This queries the SIMBAD TAP service for a list of object names near the
coords. This is effectively a "reverse" name resolver (i.e. this does the
opposite of SESAME).
Parameters
----------
racenter,declcenter : float
The cone-search center coordinates in decimal degrees
searchradiusarcsec : float
The radius in arcseconds to search around the center coordinates.
simbad_mirror : str
This is the key used to select a SIMBAD mirror from the
`SIMBAD_URLS` dict above. If set, the specified mirror will be used. If
None, a random mirror chosen from that dict will be used.
returnformat : {'csv','votable','json'}
The returned file format to request from the GAIA catalog service.
forcefetch : bool
If this is True, the query will be retried even if cached results for
it exist.
cachedir : str
This points to the directory where results will be downloaded.
verbose : bool
If True, will indicate progress and warn of any issues.
timeout : float
This sets the amount of time in seconds to wait for the service to
respond to our initial request.
refresh : float
This sets the amount of time in seconds to wait before checking if the
result file is available. If the results file isn't available after
`refresh` seconds have elapsed, the function will wait for `refresh`
seconds continuously, until `maxtimeout` is reached or the results file
becomes available.
maxtimeout : float
The maximum amount of time in seconds to wait for a result to become
available after submitting our query request.
maxtries : int
The maximum number of tries (across all mirrors tried) to make to either
submit the request or download the results, before giving up.
complete_query_later : bool
If set to True, a submitted query that does not return a result before
`maxtimeout` has passed will be cancelled but its input request
parameters and the result URL provided by the service will be saved. If
this function is then called later with these same input request
parameters, it will check if the query finally finished and a result is
available. If so, will download the results instead of submitting a new
query. If it's not done yet, will start waiting for results again. To
force launch a new query with the same request parameters, set the
`forcefetch` kwarg to True.
Returns
-------
dict
This returns a dict of the following form::
{'params':dict of the input params used for the query,
'provenance':'cache' or 'new download',
'result':path to the file on disk with the downloaded data table} | Below is the the instruction that describes the task:
### Input:
This queries the SIMBAD TAP service for a list of object names near the
coords. This is effectively a "reverse" name resolver (i.e. this does the
opposite of SESAME).
Parameters
----------
racenter,declcenter : float
The cone-search center coordinates in decimal degrees
searchradiusarcsec : float
The radius in arcseconds to search around the center coordinates.
simbad_mirror : str
This is the key used to select a SIMBAD mirror from the
`SIMBAD_URLS` dict above. If set, the specified mirror will be used. If
None, a random mirror chosen from that dict will be used.
returnformat : {'csv','votable','json'}
The returned file format to request from the GAIA catalog service.
forcefetch : bool
If this is True, the query will be retried even if cached results for
it exist.
cachedir : str
This points to the directory where results will be downloaded.
verbose : bool
If True, will indicate progress and warn of any issues.
timeout : float
This sets the amount of time in seconds to wait for the service to
respond to our initial request.
refresh : float
This sets the amount of time in seconds to wait before checking if the
result file is available. If the results file isn't available after
`refresh` seconds have elapsed, the function will wait for `refresh`
seconds continuously, until `maxtimeout` is reached or the results file
becomes available.
maxtimeout : float
The maximum amount of time in seconds to wait for a result to become
available after submitting our query request.
maxtries : int
The maximum number of tries (across all mirrors tried) to make to either
submit the request or download the results, before giving up.
complete_query_later : bool
If set to True, a submitted query that does not return a result before
`maxtimeout` has passed will be cancelled but its input request
parameters and the result URL provided by the service will be saved. If
this function is then called later with these same input request
parameters, it will check if the query finally finished and a result is
available. If so, will download the results instead of submitting a new
query. If it's not done yet, will start waiting for results again. To
force launch a new query with the same request parameters, set the
`forcefetch` kwarg to True.
Returns
-------
dict
This returns a dict of the following form::
{'params':dict of the input params used for the query,
'provenance':'cache' or 'new download',
'result':path to the file on disk with the downloaded data table}
### Response:
def objectnames_conesearch(racenter,
declcenter,
searchradiusarcsec,
simbad_mirror='simbad',
returnformat='csv',
forcefetch=False,
cachedir='~/.astrobase/simbad-cache',
verbose=True,
timeout=10.0,
refresh=2.0,
maxtimeout=90.0,
maxtries=1,
complete_query_later=True):
'''This queries the SIMBAD TAP service for a list of object names near the
coords. This is effectively a "reverse" name resolver (i.e. this does the
opposite of SESAME).
Parameters
----------
racenter,declcenter : float
The cone-search center coordinates in decimal degrees
searchradiusarcsec : float
The radius in arcseconds to search around the center coordinates.
simbad_mirror : str
This is the key used to select a SIMBAD mirror from the
`SIMBAD_URLS` dict above. If set, the specified mirror will be used. If
None, a random mirror chosen from that dict will be used.
returnformat : {'csv','votable','json'}
The returned file format to request from the GAIA catalog service.
forcefetch : bool
If this is True, the query will be retried even if cached results for
it exist.
cachedir : str
This points to the directory where results will be downloaded.
verbose : bool
If True, will indicate progress and warn of any issues.
timeout : float
This sets the amount of time in seconds to wait for the service to
respond to our initial request.
refresh : float
This sets the amount of time in seconds to wait before checking if the
result file is available. If the results file isn't available after
`refresh` seconds have elapsed, the function will wait for `refresh`
seconds continuously, until `maxtimeout` is reached or the results file
becomes available.
maxtimeout : float
The maximum amount of time in seconds to wait for a result to become
available after submitting our query request.
maxtries : int
The maximum number of tries (across all mirrors tried) to make to either
submit the request or download the results, before giving up.
complete_query_later : bool
If set to True, a submitted query that does not return a result before
`maxtimeout` has passed will be cancelled but its input request
parameters and the result URL provided by the service will be saved. If
this function is then called later with these same input request
parameters, it will check if the query finally finished and a result is
available. If so, will download the results instead of submitting a new
query. If it's not done yet, will start waiting for results again. To
force launch a new query with the same request parameters, set the
`forcefetch` kwarg to True.
Returns
-------
dict
This returns a dict of the following form::
{'params':dict of the input params used for the query,
'provenance':'cache' or 'new download',
'result':path to the file on disk with the downloaded data table}
'''
# this was generated using the example at:
# http://simbad.u-strasbg.fr/simbad/sim-tap and the table diagram at:
# http://simbad.u-strasbg.fr/simbad/tap/tapsearch.html
query = (
"select a.oid, a.ra, a.dec, a.main_id, a.otype_txt, "
"a.coo_bibcode, a.nbref, b.ids as all_ids, "
"(DISTANCE(POINT('ICRS', a.ra, a.dec), "
"POINT('ICRS', {ra_center:.5f}, {decl_center:.5f})))*3600.0 "
"AS dist_arcsec "
"from basic a join ids b on a.oid = b.oidref where "
"CONTAINS(POINT('ICRS',a.ra, a.dec),"
"CIRCLE('ICRS',{ra_center:.5f},{decl_center:.5f},"
"{search_radius:.6f}))=1 "
"ORDER by dist_arcsec asc "
)
formatted_query = query.format(ra_center=racenter,
decl_center=declcenter,
search_radius=searchradiusarcsec/3600.0)
return tap_query(formatted_query,
simbad_mirror=simbad_mirror,
returnformat=returnformat,
forcefetch=forcefetch,
cachedir=cachedir,
verbose=verbose,
timeout=timeout,
refresh=refresh,
maxtimeout=maxtimeout,
maxtries=maxtries,
complete_query_later=complete_query_later) |
def _is_modification_or_activity(feature):
"""Return True if the feature is a modification"""
if not (isinstance(feature, _bp('ModificationFeature')) or \
isinstance(feature, _bpimpl('ModificationFeature'))):
return None
mf_type = feature.getModificationType()
if mf_type is None:
return None
mf_type_terms = mf_type.getTerm().toArray()
for term in mf_type_terms:
if term in ('residue modification, active',
'residue modification, inactive',
'active', 'inactive'):
return 'activity'
return 'modification' | Return True if the feature is a modification | Below is the the instruction that describes the task:
### Input:
Return True if the feature is a modification
### Response:
def _is_modification_or_activity(feature):
"""Return True if the feature is a modification"""
if not (isinstance(feature, _bp('ModificationFeature')) or \
isinstance(feature, _bpimpl('ModificationFeature'))):
return None
mf_type = feature.getModificationType()
if mf_type is None:
return None
mf_type_terms = mf_type.getTerm().toArray()
for term in mf_type_terms:
if term in ('residue modification, active',
'residue modification, inactive',
'active', 'inactive'):
return 'activity'
return 'modification' |
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None | Helper method to find the deployment label that the stage_name is currently associated with. | Below is the the instruction that describes the task:
### Input:
Helper method to find the deployment label that the stage_name is currently associated with.
### Response:
def _get_current_deployment_label(self):
'''
Helper method to find the deployment label that the stage_name is currently associated with.
'''
deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId,
deploymentId=deploymentId,
**self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None |
def get_processes(sort_by_name=True):
"""Retrieve a list of processes sorted by name.
Args:
sort_by_name (bool): Sort the list by name or by process ID's.
Returns:
list of (int, str) or list of (int, str, str): List of process id,
process name and optional cmdline tuples.
"""
if sort_by_name:
return sorted(
_list_processes(),
key=cmp_to_key(
lambda p1, p2: (cmp(p1.name, p2.name) or cmp(p1.pid, p2.pid))
),
)
else:
return sorted(
_list_processes(),
key=cmp_to_key(
lambda p1, p2: (cmp(p1.pid, p2.pid) or cmp(p1.name, p2.name))
),
) | Retrieve a list of processes sorted by name.
Args:
sort_by_name (bool): Sort the list by name or by process ID's.
Returns:
list of (int, str) or list of (int, str, str): List of process id,
process name and optional cmdline tuples. | Below is the the instruction that describes the task:
### Input:
Retrieve a list of processes sorted by name.
Args:
sort_by_name (bool): Sort the list by name or by process ID's.
Returns:
list of (int, str) or list of (int, str, str): List of process id,
process name and optional cmdline tuples.
### Response:
def get_processes(sort_by_name=True):
"""Retrieve a list of processes sorted by name.
Args:
sort_by_name (bool): Sort the list by name or by process ID's.
Returns:
list of (int, str) or list of (int, str, str): List of process id,
process name and optional cmdline tuples.
"""
if sort_by_name:
return sorted(
_list_processes(),
key=cmp_to_key(
lambda p1, p2: (cmp(p1.name, p2.name) or cmp(p1.pid, p2.pid))
),
)
else:
return sorted(
_list_processes(),
key=cmp_to_key(
lambda p1, p2: (cmp(p1.pid, p2.pid) or cmp(p1.name, p2.name))
),
) |
def list(self, name, platform='', genre=''):
""" The name argument is required for this method as per the API
server specification. This method also provides the platform and genre
optional arguments as filters.
"""
data_list = self.db.get_data(self.list_path, name=name,
platform=platform, genre=genre)
data_list = data_list.get('Data') or {}
games = data_list.get('Game') or []
return [self._build_item(**i) for i in games] | The name argument is required for this method as per the API
server specification. This method also provides the platform and genre
optional arguments as filters. | Below is the the instruction that describes the task:
### Input:
The name argument is required for this method as per the API
server specification. This method also provides the platform and genre
optional arguments as filters.
### Response:
def list(self, name, platform='', genre=''):
""" The name argument is required for this method as per the API
server specification. This method also provides the platform and genre
optional arguments as filters.
"""
data_list = self.db.get_data(self.list_path, name=name,
platform=platform, genre=genre)
data_list = data_list.get('Data') or {}
games = data_list.get('Game') or []
return [self._build_item(**i) for i in games] |
def tail(self, path, tail_length=1024, append=False):
# Note: append is currently not implemented.
''' Show the end of the file - default 1KB, supports up to the Hadoop block size.
:param path: Path to read
:type path: string
:param tail_length: The length to read from the end of the file - default 1KB, up to block size.
:type tail_length: int
:param append: Currently not implemented
:type append: bool
:returns: a generator that yields strings
'''
#TODO: Make tail support multiple files at a time, like most other methods do
if not path:
raise InvalidInputException("tail: no path given")
block_size = self.serverdefaults()['blockSize']
if tail_length > block_size:
raise InvalidInputException("tail: currently supports length up to the block size (%d)" % (block_size,))
if tail_length <= 0:
raise InvalidInputException("tail: tail_length cannot be less than or equal to zero")
processor = lambda path, node: self._handle_tail(path, node, tail_length, append)
for item in self._find_items([path], processor, include_toplevel=True,
include_children=False, recurse=False):
if item:
yield item | Show the end of the file - default 1KB, supports up to the Hadoop block size.
:param path: Path to read
:type path: string
:param tail_length: The length to read from the end of the file - default 1KB, up to block size.
:type tail_length: int
:param append: Currently not implemented
:type append: bool
:returns: a generator that yields strings | Below is the the instruction that describes the task:
### Input:
Show the end of the file - default 1KB, supports up to the Hadoop block size.
:param path: Path to read
:type path: string
:param tail_length: The length to read from the end of the file - default 1KB, up to block size.
:type tail_length: int
:param append: Currently not implemented
:type append: bool
:returns: a generator that yields strings
### Response:
def tail(self, path, tail_length=1024, append=False):
# Note: append is currently not implemented.
''' Show the end of the file - default 1KB, supports up to the Hadoop block size.
:param path: Path to read
:type path: string
:param tail_length: The length to read from the end of the file - default 1KB, up to block size.
:type tail_length: int
:param append: Currently not implemented
:type append: bool
:returns: a generator that yields strings
'''
#TODO: Make tail support multiple files at a time, like most other methods do
if not path:
raise InvalidInputException("tail: no path given")
block_size = self.serverdefaults()['blockSize']
if tail_length > block_size:
raise InvalidInputException("tail: currently supports length up to the block size (%d)" % (block_size,))
if tail_length <= 0:
raise InvalidInputException("tail: tail_length cannot be less than or equal to zero")
processor = lambda path, node: self._handle_tail(path, node, tail_length, append)
for item in self._find_items([path], processor, include_toplevel=True,
include_children=False, recurse=False):
if item:
yield item |
def date_range_builder(self, start='2013-02-11', end=None):
"""
Builds date range query.
:param start:
Date string. format: YYYY-MM-DD
:type start:
String
:param end:
date string. format: YYYY-MM-DD
:type end:
String
:returns:
String
"""
if not end:
end = time.strftime('%Y-%m-%d')
return 'acquisitionDate:[%s+TO+%s]' % (start, end) | Builds date range query.
:param start:
Date string. format: YYYY-MM-DD
:type start:
String
:param end:
date string. format: YYYY-MM-DD
:type end:
String
:returns:
String | Below is the the instruction that describes the task:
### Input:
Builds date range query.
:param start:
Date string. format: YYYY-MM-DD
:type start:
String
:param end:
date string. format: YYYY-MM-DD
:type end:
String
:returns:
String
### Response:
def date_range_builder(self, start='2013-02-11', end=None):
"""
Builds date range query.
:param start:
Date string. format: YYYY-MM-DD
:type start:
String
:param end:
date string. format: YYYY-MM-DD
:type end:
String
:returns:
String
"""
if not end:
end = time.strftime('%Y-%m-%d')
return 'acquisitionDate:[%s+TO+%s]' % (start, end) |
def _delete_from_indices(self, pipeline):
"""Deletes the object's id from the sets(indices) it has been added
to and removes its list of indices (used for housekeeping).
"""
s = Set(self.key()['_indices'])
z = Set(self.key()['_zindices'])
for index in s.members:
pipeline.srem(index, self.id)
for index in z.members:
pipeline.zrem(index, self.id)
pipeline.delete(s.key)
pipeline.delete(z.key) | Deletes the object's id from the sets(indices) it has been added
to and removes its list of indices (used for housekeeping). | Below is the the instruction that describes the task:
### Input:
Deletes the object's id from the sets(indices) it has been added
to and removes its list of indices (used for housekeeping).
### Response:
def _delete_from_indices(self, pipeline):
"""Deletes the object's id from the sets(indices) it has been added
to and removes its list of indices (used for housekeeping).
"""
s = Set(self.key()['_indices'])
z = Set(self.key()['_zindices'])
for index in s.members:
pipeline.srem(index, self.id)
for index in z.members:
pipeline.zrem(index, self.id)
pipeline.delete(s.key)
pipeline.delete(z.key) |
def save(filename, obj, overwrite=False, create_directories=False):
'''
pimms.save(filename, obj) attempts to pickle the given object obj in the filename (or stream,
if given). An error is raised when this cannot be accomplished; the first argument is always
returned; though if the argument is a filename, it may be a differet string that refers to
the same file.
The save/load protocol uses pickle for all saving/loading except when the object is a numpy
object, in which case it is written using obj.tofile(). The save function writes meta-data
into the file so cannot simply be unpickled, but must be loaded using the pimms.load()
function. Fundamentally, however, if an object can be picled, it can be saved/loaded.
Options:
* overwrite (False) The optional parameter overwrite indicates whether an error should be
raised before opening the file if the file already exists.
* create_directories (False) The optional parameter create_directories indicates whether the
function should attempt to create the directories in which the filename exists if they do
not already exist.
'''
if isinstance(filename, six.string_types):
filename = os.path.expanduser(filename)
if not overwrite and os.path.exists(filename):
raise ValueError('save would overwrite file %s' % filename)
if create_directories:
dname = os.path.dirname(os.path.realpath(filename))
if not os.path.isdir(dname): os.makedirs(dname)
with open(filename, 'wb') as f:
_save_stream(f, obj)
else:
_save_stream(filename, obj)
return filename | pimms.save(filename, obj) attempts to pickle the given object obj in the filename (or stream,
if given). An error is raised when this cannot be accomplished; the first argument is always
returned; though if the argument is a filename, it may be a differet string that refers to
the same file.
The save/load protocol uses pickle for all saving/loading except when the object is a numpy
object, in which case it is written using obj.tofile(). The save function writes meta-data
into the file so cannot simply be unpickled, but must be loaded using the pimms.load()
function. Fundamentally, however, if an object can be picled, it can be saved/loaded.
Options:
* overwrite (False) The optional parameter overwrite indicates whether an error should be
raised before opening the file if the file already exists.
* create_directories (False) The optional parameter create_directories indicates whether the
function should attempt to create the directories in which the filename exists if they do
not already exist. | Below is the the instruction that describes the task:
### Input:
pimms.save(filename, obj) attempts to pickle the given object obj in the filename (or stream,
if given). An error is raised when this cannot be accomplished; the first argument is always
returned; though if the argument is a filename, it may be a differet string that refers to
the same file.
The save/load protocol uses pickle for all saving/loading except when the object is a numpy
object, in which case it is written using obj.tofile(). The save function writes meta-data
into the file so cannot simply be unpickled, but must be loaded using the pimms.load()
function. Fundamentally, however, if an object can be picled, it can be saved/loaded.
Options:
* overwrite (False) The optional parameter overwrite indicates whether an error should be
raised before opening the file if the file already exists.
* create_directories (False) The optional parameter create_directories indicates whether the
function should attempt to create the directories in which the filename exists if they do
not already exist.
### Response:
def save(filename, obj, overwrite=False, create_directories=False):
'''
pimms.save(filename, obj) attempts to pickle the given object obj in the filename (or stream,
if given). An error is raised when this cannot be accomplished; the first argument is always
returned; though if the argument is a filename, it may be a differet string that refers to
the same file.
The save/load protocol uses pickle for all saving/loading except when the object is a numpy
object, in which case it is written using obj.tofile(). The save function writes meta-data
into the file so cannot simply be unpickled, but must be loaded using the pimms.load()
function. Fundamentally, however, if an object can be picled, it can be saved/loaded.
Options:
* overwrite (False) The optional parameter overwrite indicates whether an error should be
raised before opening the file if the file already exists.
* create_directories (False) The optional parameter create_directories indicates whether the
function should attempt to create the directories in which the filename exists if they do
not already exist.
'''
if isinstance(filename, six.string_types):
filename = os.path.expanduser(filename)
if not overwrite and os.path.exists(filename):
raise ValueError('save would overwrite file %s' % filename)
if create_directories:
dname = os.path.dirname(os.path.realpath(filename))
if not os.path.isdir(dname): os.makedirs(dname)
with open(filename, 'wb') as f:
_save_stream(f, obj)
else:
_save_stream(filename, obj)
return filename |
def _zforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_zforce
PURPOSE:
evaluate the vertical force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the vertical force
HISTORY:
2015-02-13 - Written - Trick (MPIA)
"""
l,n = bovy_coords.Rz_to_lambdanu (R,z,ac=self._ac,Delta=self._Delta)
jac = bovy_coords.Rz_to_lambdanu_jac(R,z, Delta=self._Delta)
dldz = jac[0,1]
dndz = jac[1,1]
return - (dldz * self._lderiv(l,n) + \
dndz * self._nderiv(l,n)) | NAME:
_zforce
PURPOSE:
evaluate the vertical force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the vertical force
HISTORY:
2015-02-13 - Written - Trick (MPIA) | Below is the the instruction that describes the task:
### Input:
NAME:
_zforce
PURPOSE:
evaluate the vertical force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the vertical force
HISTORY:
2015-02-13 - Written - Trick (MPIA)
### Response:
def _zforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_zforce
PURPOSE:
evaluate the vertical force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the vertical force
HISTORY:
2015-02-13 - Written - Trick (MPIA)
"""
l,n = bovy_coords.Rz_to_lambdanu (R,z,ac=self._ac,Delta=self._Delta)
jac = bovy_coords.Rz_to_lambdanu_jac(R,z, Delta=self._Delta)
dldz = jac[0,1]
dndz = jac[1,1]
return - (dldz * self._lderiv(l,n) + \
dndz * self._nderiv(l,n)) |
def file_transfer_protocol_send(self, target_network, target_system, target_component, payload, force_mavlink1=False):
'''
File transfer message
target_network : Network ID (0 for broadcast) (uint8_t)
target_system : System ID (0 for broadcast) (uint8_t)
target_component : Component ID (0 for broadcast) (uint8_t)
payload : Variable length payload. The length is defined by the remaining message length when subtracting the header and other fields. The entire content of this block is opaque unless you understand any the encoding message_type. The particular encoding used can be extension specific and might not always be documented as part of the mavlink specification. (uint8_t)
'''
return self.send(self.file_transfer_protocol_encode(target_network, target_system, target_component, payload), force_mavlink1=force_mavlink1) | File transfer message
target_network : Network ID (0 for broadcast) (uint8_t)
target_system : System ID (0 for broadcast) (uint8_t)
target_component : Component ID (0 for broadcast) (uint8_t)
payload : Variable length payload. The length is defined by the remaining message length when subtracting the header and other fields. The entire content of this block is opaque unless you understand any the encoding message_type. The particular encoding used can be extension specific and might not always be documented as part of the mavlink specification. (uint8_t) | Below is the the instruction that describes the task:
### Input:
File transfer message
target_network : Network ID (0 for broadcast) (uint8_t)
target_system : System ID (0 for broadcast) (uint8_t)
target_component : Component ID (0 for broadcast) (uint8_t)
payload : Variable length payload. The length is defined by the remaining message length when subtracting the header and other fields. The entire content of this block is opaque unless you understand any the encoding message_type. The particular encoding used can be extension specific and might not always be documented as part of the mavlink specification. (uint8_t)
### Response:
def file_transfer_protocol_send(self, target_network, target_system, target_component, payload, force_mavlink1=False):
'''
File transfer message
target_network : Network ID (0 for broadcast) (uint8_t)
target_system : System ID (0 for broadcast) (uint8_t)
target_component : Component ID (0 for broadcast) (uint8_t)
payload : Variable length payload. The length is defined by the remaining message length when subtracting the header and other fields. The entire content of this block is opaque unless you understand any the encoding message_type. The particular encoding used can be extension specific and might not always be documented as part of the mavlink specification. (uint8_t)
'''
return self.send(self.file_transfer_protocol_encode(target_network, target_system, target_component, payload), force_mavlink1=force_mavlink1) |
def ssh(self, enable=True, comment=None):
"""
Enable or disable SSH
:param bool enable: enable or disable SSH daemon
:param str comment: optional comment for audit
:raises NodeCommandFailed: cannot enable SSH daemon
:return: None
"""
self.make_request(
NodeCommandFailed,
method='update',
resource='ssh',
params={'enable': enable, 'comment': comment}) | Enable or disable SSH
:param bool enable: enable or disable SSH daemon
:param str comment: optional comment for audit
:raises NodeCommandFailed: cannot enable SSH daemon
:return: None | Below is the the instruction that describes the task:
### Input:
Enable or disable SSH
:param bool enable: enable or disable SSH daemon
:param str comment: optional comment for audit
:raises NodeCommandFailed: cannot enable SSH daemon
:return: None
### Response:
def ssh(self, enable=True, comment=None):
"""
Enable or disable SSH
:param bool enable: enable or disable SSH daemon
:param str comment: optional comment for audit
:raises NodeCommandFailed: cannot enable SSH daemon
:return: None
"""
self.make_request(
NodeCommandFailed,
method='update',
resource='ssh',
params={'enable': enable, 'comment': comment}) |
def get_open_clinvar_submission(self, user_id, institute_id):
"""Retrieve the database id of an open clinvar submission for a user and institute,
if none is available then create a new submission and return it
Args:
user_id(str): a user ID
institute_id(str): an institute ID
Returns:
submission(obj) : an open clinvar submission object
"""
LOG.info("Retrieving an open clinvar submission for user '%s' and institute %s", user_id, institute_id)
query = dict(user_id=user_id, institute_id=institute_id, status='open')
submission = self.clinvar_submission_collection.find_one(query)
# If there is no open submission for this user and institute, create one
if submission is None:
submission_id = self.create_submission(user_id, institute_id)
submission = self.clinvar_submission_collection.find_one({'_id':submission_id})
return submission | Retrieve the database id of an open clinvar submission for a user and institute,
if none is available then create a new submission and return it
Args:
user_id(str): a user ID
institute_id(str): an institute ID
Returns:
submission(obj) : an open clinvar submission object | Below is the the instruction that describes the task:
### Input:
Retrieve the database id of an open clinvar submission for a user and institute,
if none is available then create a new submission and return it
Args:
user_id(str): a user ID
institute_id(str): an institute ID
Returns:
submission(obj) : an open clinvar submission object
### Response:
def get_open_clinvar_submission(self, user_id, institute_id):
"""Retrieve the database id of an open clinvar submission for a user and institute,
if none is available then create a new submission and return it
Args:
user_id(str): a user ID
institute_id(str): an institute ID
Returns:
submission(obj) : an open clinvar submission object
"""
LOG.info("Retrieving an open clinvar submission for user '%s' and institute %s", user_id, institute_id)
query = dict(user_id=user_id, institute_id=institute_id, status='open')
submission = self.clinvar_submission_collection.find_one(query)
# If there is no open submission for this user and institute, create one
if submission is None:
submission_id = self.create_submission(user_id, institute_id)
submission = self.clinvar_submission_collection.find_one({'_id':submission_id})
return submission |
def slides(self):
"""
|Slides| object containing the slides in this presentation.
"""
sldIdLst = self._element.get_or_add_sldIdLst()
self.part.rename_slide_parts([sldId.rId for sldId in sldIdLst])
return Slides(sldIdLst, self) | |Slides| object containing the slides in this presentation. | Below is the the instruction that describes the task:
### Input:
|Slides| object containing the slides in this presentation.
### Response:
def slides(self):
"""
|Slides| object containing the slides in this presentation.
"""
sldIdLst = self._element.get_or_add_sldIdLst()
self.part.rename_slide_parts([sldId.rId for sldId in sldIdLst])
return Slides(sldIdLst, self) |
def total_memory():
""" Returns the the amount of memory available for use.
The memory is obtained from MemTotal entry in /proc/meminfo.
Notes
=====
This function is not very useful and not very portable.
"""
with file('/proc/meminfo', 'r') as f:
for line in f:
words = line.split()
if words[0].upper() == 'MEMTOTAL:':
return int(words[1]) * 1024
raise IOError('MemTotal unknown') | Returns the the amount of memory available for use.
The memory is obtained from MemTotal entry in /proc/meminfo.
Notes
=====
This function is not very useful and not very portable. | Below is the the instruction that describes the task:
### Input:
Returns the the amount of memory available for use.
The memory is obtained from MemTotal entry in /proc/meminfo.
Notes
=====
This function is not very useful and not very portable.
### Response:
def total_memory():
""" Returns the the amount of memory available for use.
The memory is obtained from MemTotal entry in /proc/meminfo.
Notes
=====
This function is not very useful and not very portable.
"""
with file('/proc/meminfo', 'r') as f:
for line in f:
words = line.split()
if words[0].upper() == 'MEMTOTAL:':
return int(words[1]) * 1024
raise IOError('MemTotal unknown') |
def get(self, block=1, delay=None):
"""Get a request from a queue, optionally block until a request
is available."""
if _debug: IOQueue._debug("get block=%r delay=%r", block, delay)
# if the queue is empty and we do not block return None
if not block and not self.notempty.isSet():
if _debug: IOQueue._debug(" - not blocking and empty")
return None
# wait for something to be in the queue
if delay:
self.notempty.wait(delay)
if not self.notempty.isSet():
return None
else:
self.notempty.wait()
# extract the first element
priority, iocb = self.queue[0]
del self.queue[0]
iocb.ioQueue = None
# if the queue is empty, clear the event
qlen = len(self.queue)
if not qlen:
self.notempty.clear()
# return the request
return iocb | Get a request from a queue, optionally block until a request
is available. | Below is the the instruction that describes the task:
### Input:
Get a request from a queue, optionally block until a request
is available.
### Response:
def get(self, block=1, delay=None):
"""Get a request from a queue, optionally block until a request
is available."""
if _debug: IOQueue._debug("get block=%r delay=%r", block, delay)
# if the queue is empty and we do not block return None
if not block and not self.notempty.isSet():
if _debug: IOQueue._debug(" - not blocking and empty")
return None
# wait for something to be in the queue
if delay:
self.notempty.wait(delay)
if not self.notempty.isSet():
return None
else:
self.notempty.wait()
# extract the first element
priority, iocb = self.queue[0]
del self.queue[0]
iocb.ioQueue = None
# if the queue is empty, clear the event
qlen = len(self.queue)
if not qlen:
self.notempty.clear()
# return the request
return iocb |
def validate(user_input, ret_errs=False, print_errs=False):
"""
Wrapper for run_validator function that returns True if the user_input
contains a valid STIX pattern or False otherwise. The error messages may
also be returned or printed based upon the ret_errs and print_errs arg
values.
"""
errs = run_validator(user_input)
passed = len(errs) == 0
if print_errs:
for err in errs:
print(err)
if ret_errs:
return passed, errs
return passed | Wrapper for run_validator function that returns True if the user_input
contains a valid STIX pattern or False otherwise. The error messages may
also be returned or printed based upon the ret_errs and print_errs arg
values. | Below is the the instruction that describes the task:
### Input:
Wrapper for run_validator function that returns True if the user_input
contains a valid STIX pattern or False otherwise. The error messages may
also be returned or printed based upon the ret_errs and print_errs arg
values.
### Response:
def validate(user_input, ret_errs=False, print_errs=False):
"""
Wrapper for run_validator function that returns True if the user_input
contains a valid STIX pattern or False otherwise. The error messages may
also be returned or printed based upon the ret_errs and print_errs arg
values.
"""
errs = run_validator(user_input)
passed = len(errs) == 0
if print_errs:
for err in errs:
print(err)
if ret_errs:
return passed, errs
return passed |
def size(args):
"""
%prog size fastqfile
Find the total base pairs in a list of fastq files
"""
p = OptionParser(size.__doc__)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
total_size = total_numrecords = 0
for f in args:
cur_size = cur_numrecords = 0
for rec in iter_fastq(f):
if not rec:
break
cur_numrecords += 1
cur_size += len(rec)
print(" ".join(str(x) for x in \
(op.basename(f), cur_numrecords, cur_size)))
total_numrecords += cur_numrecords
total_size += cur_size
if len(args) > 1:
print(" ".join(str(x) for x in \
("Total", total_numrecords, total_size))) | %prog size fastqfile
Find the total base pairs in a list of fastq files | Below is the the instruction that describes the task:
### Input:
%prog size fastqfile
Find the total base pairs in a list of fastq files
### Response:
def size(args):
"""
%prog size fastqfile
Find the total base pairs in a list of fastq files
"""
p = OptionParser(size.__doc__)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
total_size = total_numrecords = 0
for f in args:
cur_size = cur_numrecords = 0
for rec in iter_fastq(f):
if not rec:
break
cur_numrecords += 1
cur_size += len(rec)
print(" ".join(str(x) for x in \
(op.basename(f), cur_numrecords, cur_size)))
total_numrecords += cur_numrecords
total_size += cur_size
if len(args) > 1:
print(" ".join(str(x) for x in \
("Total", total_numrecords, total_size))) |
def receive_empty(self, message):
"""
Pair ACKs with requests.
:type message: Message
:param message: the received message
:rtype : Transaction
:return: the transaction to which the message belongs to
"""
logger.debug("receive_empty - " + str(message))
try:
host, port = message.source
except AttributeError:
return
key_mid = str_append_hash(host, port, message.mid)
key_mid_multicast = str_append_hash(defines.ALL_COAP_NODES, port, message.mid)
key_token = str_append_hash(host, port, message.token)
key_token_multicast = str_append_hash(defines.ALL_COAP_NODES, port, message.token)
if key_mid in list(self._transactions.keys()):
transaction = self._transactions[key_mid]
elif key_token in self._transactions_token:
transaction = self._transactions_token[key_token]
elif key_mid_multicast in list(self._transactions.keys()):
transaction = self._transactions[key_mid_multicast]
elif key_token_multicast in self._transactions_token:
transaction = self._transactions_token[key_token_multicast]
else:
logger.warning("Un-Matched incoming empty message " + str(host) + ":" + str(port))
return None
if message.type == defines.Types["ACK"]:
if not transaction.request.acknowledged:
transaction.request.acknowledged = True
elif (transaction.response is not None) and (not transaction.response.acknowledged):
transaction.response.acknowledged = True
elif message.type == defines.Types["RST"]:
if not transaction.request.acknowledged:
transaction.request.rejected = True
elif not transaction.response.acknowledged:
transaction.response.rejected = True
elif message.type == defines.Types["CON"]:
#implicit ACK (might have been lost)
logger.debug("Implicit ACK on received CON for waiting transaction")
transaction.request.acknowledged = True
else:
logger.warning("Unhandled message type...")
if transaction.retransmit_stop is not None:
transaction.retransmit_stop.set()
return transaction | Pair ACKs with requests.
:type message: Message
:param message: the received message
:rtype : Transaction
:return: the transaction to which the message belongs to | Below is the the instruction that describes the task:
### Input:
Pair ACKs with requests.
:type message: Message
:param message: the received message
:rtype : Transaction
:return: the transaction to which the message belongs to
### Response:
def receive_empty(self, message):
"""
Pair ACKs with requests.
:type message: Message
:param message: the received message
:rtype : Transaction
:return: the transaction to which the message belongs to
"""
logger.debug("receive_empty - " + str(message))
try:
host, port = message.source
except AttributeError:
return
key_mid = str_append_hash(host, port, message.mid)
key_mid_multicast = str_append_hash(defines.ALL_COAP_NODES, port, message.mid)
key_token = str_append_hash(host, port, message.token)
key_token_multicast = str_append_hash(defines.ALL_COAP_NODES, port, message.token)
if key_mid in list(self._transactions.keys()):
transaction = self._transactions[key_mid]
elif key_token in self._transactions_token:
transaction = self._transactions_token[key_token]
elif key_mid_multicast in list(self._transactions.keys()):
transaction = self._transactions[key_mid_multicast]
elif key_token_multicast in self._transactions_token:
transaction = self._transactions_token[key_token_multicast]
else:
logger.warning("Un-Matched incoming empty message " + str(host) + ":" + str(port))
return None
if message.type == defines.Types["ACK"]:
if not transaction.request.acknowledged:
transaction.request.acknowledged = True
elif (transaction.response is not None) and (not transaction.response.acknowledged):
transaction.response.acknowledged = True
elif message.type == defines.Types["RST"]:
if not transaction.request.acknowledged:
transaction.request.rejected = True
elif not transaction.response.acknowledged:
transaction.response.rejected = True
elif message.type == defines.Types["CON"]:
#implicit ACK (might have been lost)
logger.debug("Implicit ACK on received CON for waiting transaction")
transaction.request.acknowledged = True
else:
logger.warning("Unhandled message type...")
if transaction.retransmit_stop is not None:
transaction.retransmit_stop.set()
return transaction |
def validate(cls, code, prefix):
'''
Validates an octoDNS geo code making sure that it is a valid and
corresponding:
* continent
* continent & country
* continent, country, & province
'''
reasons = []
pieces = code.split('-')
n = len(pieces)
if n > 3:
reasons.append('{}invalid geo code "{}"'.format(prefix, code))
elif n > 0 and pieces[0] not in geo_data:
reasons.append('{}unknown continent code "{}"'
.format(prefix, code))
elif n > 1 and pieces[1] not in geo_data[pieces[0]]:
reasons.append('{}unknown country code "{}"'.format(prefix, code))
elif n > 2 and \
pieces[2] not in geo_data[pieces[0]][pieces[1]]['provinces']:
reasons.append('{}unknown province code "{}"'.format(prefix, code))
return reasons | Validates an octoDNS geo code making sure that it is a valid and
corresponding:
* continent
* continent & country
* continent, country, & province | Below is the the instruction that describes the task:
### Input:
Validates an octoDNS geo code making sure that it is a valid and
corresponding:
* continent
* continent & country
* continent, country, & province
### Response:
def validate(cls, code, prefix):
'''
Validates an octoDNS geo code making sure that it is a valid and
corresponding:
* continent
* continent & country
* continent, country, & province
'''
reasons = []
pieces = code.split('-')
n = len(pieces)
if n > 3:
reasons.append('{}invalid geo code "{}"'.format(prefix, code))
elif n > 0 and pieces[0] not in geo_data:
reasons.append('{}unknown continent code "{}"'
.format(prefix, code))
elif n > 1 and pieces[1] not in geo_data[pieces[0]]:
reasons.append('{}unknown country code "{}"'.format(prefix, code))
elif n > 2 and \
pieces[2] not in geo_data[pieces[0]][pieces[1]]['provinces']:
reasons.append('{}unknown province code "{}"'.format(prefix, code))
return reasons |
def to_csv(self, file: str, opts: dict = None) -> str:
"""
This method will export a SAS Data Set to a file in CSV format.
:param file: the OS filesystem path of the file to be created (exported from this SAS Data Set)
:return:
"""
opts = opts if opts is not None else {}
ll = self._is_valid()
if ll:
if not self.sas.batch:
print(ll['LOG'])
else:
return ll
else:
return self.sas.write_csv(file, self.table, self.libref, self.dsopts, opts) | This method will export a SAS Data Set to a file in CSV format.
:param file: the OS filesystem path of the file to be created (exported from this SAS Data Set)
:return: | Below is the the instruction that describes the task:
### Input:
This method will export a SAS Data Set to a file in CSV format.
:param file: the OS filesystem path of the file to be created (exported from this SAS Data Set)
:return:
### Response:
def to_csv(self, file: str, opts: dict = None) -> str:
"""
This method will export a SAS Data Set to a file in CSV format.
:param file: the OS filesystem path of the file to be created (exported from this SAS Data Set)
:return:
"""
opts = opts if opts is not None else {}
ll = self._is_valid()
if ll:
if not self.sas.batch:
print(ll['LOG'])
else:
return ll
else:
return self.sas.write_csv(file, self.table, self.libref, self.dsopts, opts) |
def add_items(self, items, index_items):
"""
Add items to template if is template, else add in item list
:param items: items list to add
:type items: alignak.objects.item.Items
:param index_items: Flag indicating if the items should be indexed on the fly.
:type index_items: bool
:return: None
"""
count_templates = 0
count_items = 0
generated_items = []
for item in items:
if item.is_tpl():
self.add_template(item)
count_templates = count_templates + 1
else:
new_items = self.add_item(item, index_items)
count_items = count_items + max(1, len(new_items))
if new_items:
generated_items.extend(new_items)
if count_templates:
logger.info(' indexed %d template(s)', count_templates)
if count_items:
logger.info(' created %d %s(s).', count_items, self.inner_class.my_type) | Add items to template if is template, else add in item list
:param items: items list to add
:type items: alignak.objects.item.Items
:param index_items: Flag indicating if the items should be indexed on the fly.
:type index_items: bool
:return: None | Below is the the instruction that describes the task:
### Input:
Add items to template if is template, else add in item list
:param items: items list to add
:type items: alignak.objects.item.Items
:param index_items: Flag indicating if the items should be indexed on the fly.
:type index_items: bool
:return: None
### Response:
def add_items(self, items, index_items):
"""
Add items to template if is template, else add in item list
:param items: items list to add
:type items: alignak.objects.item.Items
:param index_items: Flag indicating if the items should be indexed on the fly.
:type index_items: bool
:return: None
"""
count_templates = 0
count_items = 0
generated_items = []
for item in items:
if item.is_tpl():
self.add_template(item)
count_templates = count_templates + 1
else:
new_items = self.add_item(item, index_items)
count_items = count_items + max(1, len(new_items))
if new_items:
generated_items.extend(new_items)
if count_templates:
logger.info(' indexed %d template(s)', count_templates)
if count_items:
logger.info(' created %d %s(s).', count_items, self.inner_class.my_type) |
def write(self, chunk: Union[str, bytes, dict]) -> None:
"""Writes the given chunk to the output buffer.
To write the output to the network, use the `flush()` method below.
If the given chunk is a dictionary, we write it as JSON and set
the Content-Type of the response to be ``application/json``.
(if you want to send JSON as a different ``Content-Type``, call
``set_header`` *after* calling ``write()``).
Note that lists are not converted to JSON because of a potential
cross-site security vulnerability. All JSON output should be
wrapped in a dictionary. More details at
http://haacked.com/archive/2009/06/25/json-hijacking.aspx/ and
https://github.com/facebook/tornado/issues/1009
"""
if self._finished:
raise RuntimeError("Cannot write() after finish()")
if not isinstance(chunk, (bytes, unicode_type, dict)):
message = "write() only accepts bytes, unicode, and dict objects"
if isinstance(chunk, list):
message += (
". Lists not accepted for security reasons; see "
+ "http://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write" # noqa: E501
)
raise TypeError(message)
if isinstance(chunk, dict):
chunk = escape.json_encode(chunk)
self.set_header("Content-Type", "application/json; charset=UTF-8")
chunk = utf8(chunk)
self._write_buffer.append(chunk) | Writes the given chunk to the output buffer.
To write the output to the network, use the `flush()` method below.
If the given chunk is a dictionary, we write it as JSON and set
the Content-Type of the response to be ``application/json``.
(if you want to send JSON as a different ``Content-Type``, call
``set_header`` *after* calling ``write()``).
Note that lists are not converted to JSON because of a potential
cross-site security vulnerability. All JSON output should be
wrapped in a dictionary. More details at
http://haacked.com/archive/2009/06/25/json-hijacking.aspx/ and
https://github.com/facebook/tornado/issues/1009 | Below is the the instruction that describes the task:
### Input:
Writes the given chunk to the output buffer.
To write the output to the network, use the `flush()` method below.
If the given chunk is a dictionary, we write it as JSON and set
the Content-Type of the response to be ``application/json``.
(if you want to send JSON as a different ``Content-Type``, call
``set_header`` *after* calling ``write()``).
Note that lists are not converted to JSON because of a potential
cross-site security vulnerability. All JSON output should be
wrapped in a dictionary. More details at
http://haacked.com/archive/2009/06/25/json-hijacking.aspx/ and
https://github.com/facebook/tornado/issues/1009
### Response:
def write(self, chunk: Union[str, bytes, dict]) -> None:
"""Writes the given chunk to the output buffer.
To write the output to the network, use the `flush()` method below.
If the given chunk is a dictionary, we write it as JSON and set
the Content-Type of the response to be ``application/json``.
(if you want to send JSON as a different ``Content-Type``, call
``set_header`` *after* calling ``write()``).
Note that lists are not converted to JSON because of a potential
cross-site security vulnerability. All JSON output should be
wrapped in a dictionary. More details at
http://haacked.com/archive/2009/06/25/json-hijacking.aspx/ and
https://github.com/facebook/tornado/issues/1009
"""
if self._finished:
raise RuntimeError("Cannot write() after finish()")
if not isinstance(chunk, (bytes, unicode_type, dict)):
message = "write() only accepts bytes, unicode, and dict objects"
if isinstance(chunk, list):
message += (
". Lists not accepted for security reasons; see "
+ "http://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write" # noqa: E501
)
raise TypeError(message)
if isinstance(chunk, dict):
chunk = escape.json_encode(chunk)
self.set_header("Content-Type", "application/json; charset=UTF-8")
chunk = utf8(chunk)
self._write_buffer.append(chunk) |
def lookup_subclass(cls, d):
"""Look up a class based on a serialized dictionary containing a typeid
Args:
d (dict): Dictionary with key "typeid"
Returns:
Serializable subclass
"""
try:
typeid = d["typeid"]
except KeyError:
raise FieldError("typeid not present in keys %s" % list(d))
subclass = cls._subcls_lookup.get(typeid, None)
if not subclass:
raise FieldError("'%s' not a valid typeid" % typeid)
else:
return subclass | Look up a class based on a serialized dictionary containing a typeid
Args:
d (dict): Dictionary with key "typeid"
Returns:
Serializable subclass | Below is the the instruction that describes the task:
### Input:
Look up a class based on a serialized dictionary containing a typeid
Args:
d (dict): Dictionary with key "typeid"
Returns:
Serializable subclass
### Response:
def lookup_subclass(cls, d):
"""Look up a class based on a serialized dictionary containing a typeid
Args:
d (dict): Dictionary with key "typeid"
Returns:
Serializable subclass
"""
try:
typeid = d["typeid"]
except KeyError:
raise FieldError("typeid not present in keys %s" % list(d))
subclass = cls._subcls_lookup.get(typeid, None)
if not subclass:
raise FieldError("'%s' not a valid typeid" % typeid)
else:
return subclass |
def accuracy(self, outputs):
'''Build a Theano expression for computing the accuracy of graph output.
Parameters
----------
outputs : dict of Theano expressions
A dictionary mapping network output names to Theano expressions
representing the outputs of a computation graph.
Returns
-------
acc : Theano expression
A Theano expression representing the accuracy of the output compared
to the target data.
'''
output = outputs[self.output_name]
predict = TT.argmax(output, axis=-1)
correct = TT.eq(predict, self._target)
acc = correct.mean()
if self._weights is not None:
acc = (self._weights * correct).sum() / self._weights.sum()
return acc | Build a Theano expression for computing the accuracy of graph output.
Parameters
----------
outputs : dict of Theano expressions
A dictionary mapping network output names to Theano expressions
representing the outputs of a computation graph.
Returns
-------
acc : Theano expression
A Theano expression representing the accuracy of the output compared
to the target data. | Below is the the instruction that describes the task:
### Input:
Build a Theano expression for computing the accuracy of graph output.
Parameters
----------
outputs : dict of Theano expressions
A dictionary mapping network output names to Theano expressions
representing the outputs of a computation graph.
Returns
-------
acc : Theano expression
A Theano expression representing the accuracy of the output compared
to the target data.
### Response:
def accuracy(self, outputs):
'''Build a Theano expression for computing the accuracy of graph output.
Parameters
----------
outputs : dict of Theano expressions
A dictionary mapping network output names to Theano expressions
representing the outputs of a computation graph.
Returns
-------
acc : Theano expression
A Theano expression representing the accuracy of the output compared
to the target data.
'''
output = outputs[self.output_name]
predict = TT.argmax(output, axis=-1)
correct = TT.eq(predict, self._target)
acc = correct.mean()
if self._weights is not None:
acc = (self._weights * correct).sum() / self._weights.sum()
return acc |
def memoize(func):
"""Decorator that stores function results in a dictionary to be used on the
next time that the same arguments were informed."""
func._cache_dict = {}
@wraps(func)
def _inner(*args, **kwargs):
return _get_memoized_value(func, args, kwargs)
return _inner | Decorator that stores function results in a dictionary to be used on the
next time that the same arguments were informed. | Below is the the instruction that describes the task:
### Input:
Decorator that stores function results in a dictionary to be used on the
next time that the same arguments were informed.
### Response:
def memoize(func):
"""Decorator that stores function results in a dictionary to be used on the
next time that the same arguments were informed."""
func._cache_dict = {}
@wraps(func)
def _inner(*args, **kwargs):
return _get_memoized_value(func, args, kwargs)
return _inner |
def get_topic_sha3(event_block):
'''
takes an event block and returns a signature for sha3 hashing
:param event_block:
:return:
'''
sig = ""
sig += event_block["name"]
if not event_block["inputs"]:
sig += "()"
return sig
sig += "("
for input in event_block["inputs"]:
sig += input["type"]
sig += ","
sig = sig[:-1]
sig += ")"
return sig | takes an event block and returns a signature for sha3 hashing
:param event_block:
:return: | Below is the the instruction that describes the task:
### Input:
takes an event block and returns a signature for sha3 hashing
:param event_block:
:return:
### Response:
def get_topic_sha3(event_block):
'''
takes an event block and returns a signature for sha3 hashing
:param event_block:
:return:
'''
sig = ""
sig += event_block["name"]
if not event_block["inputs"]:
sig += "()"
return sig
sig += "("
for input in event_block["inputs"]:
sig += input["type"]
sig += ","
sig = sig[:-1]
sig += ")"
return sig |
def get_symbol_size(version, scale=1, border=None):
"""\
Returns the symbol size (width x height) with the provided border and
scaling factor.
:param int version: A version constant.
:param scale: Indicates the size of a single module (default: 1).
The size of a module depends on the used output format; i.e.
in a PNG context, a scaling factor of 2 indicates that a module
has a size of 2 x 2 pixel. Some outputs (i.e. SVG) accept
floating point values.
:type scale: int or float
:param int border: The border size or ``None`` to specify the
default quiet zone (4 for QR Codes, 2 for Micro QR Codes).
:rtype: tuple (width, height)
"""
if border is None:
border = get_default_border_size(version)
# M4 = 0, M3 = -1 ...
dim = version * 4 + 17 if version > 0 else (version + 4) * 2 + 9
dim += 2 * border
dim *= scale
return dim, dim | \
Returns the symbol size (width x height) with the provided border and
scaling factor.
:param int version: A version constant.
:param scale: Indicates the size of a single module (default: 1).
The size of a module depends on the used output format; i.e.
in a PNG context, a scaling factor of 2 indicates that a module
has a size of 2 x 2 pixel. Some outputs (i.e. SVG) accept
floating point values.
:type scale: int or float
:param int border: The border size or ``None`` to specify the
default quiet zone (4 for QR Codes, 2 for Micro QR Codes).
:rtype: tuple (width, height) | Below is the the instruction that describes the task:
### Input:
\
Returns the symbol size (width x height) with the provided border and
scaling factor.
:param int version: A version constant.
:param scale: Indicates the size of a single module (default: 1).
The size of a module depends on the used output format; i.e.
in a PNG context, a scaling factor of 2 indicates that a module
has a size of 2 x 2 pixel. Some outputs (i.e. SVG) accept
floating point values.
:type scale: int or float
:param int border: The border size or ``None`` to specify the
default quiet zone (4 for QR Codes, 2 for Micro QR Codes).
:rtype: tuple (width, height)
### Response:
def get_symbol_size(version, scale=1, border=None):
"""\
Returns the symbol size (width x height) with the provided border and
scaling factor.
:param int version: A version constant.
:param scale: Indicates the size of a single module (default: 1).
The size of a module depends on the used output format; i.e.
in a PNG context, a scaling factor of 2 indicates that a module
has a size of 2 x 2 pixel. Some outputs (i.e. SVG) accept
floating point values.
:type scale: int or float
:param int border: The border size or ``None`` to specify the
default quiet zone (4 for QR Codes, 2 for Micro QR Codes).
:rtype: tuple (width, height)
"""
if border is None:
border = get_default_border_size(version)
# M4 = 0, M3 = -1 ...
dim = version * 4 + 17 if version > 0 else (version + 4) * 2 + 9
dim += 2 * border
dim *= scale
return dim, dim |
def parse_domain_users(domain_users_file, domain_groups_file):
"""
Parses the domain users and groups files.
"""
with open(domain_users_file) as f:
users = json.loads(f.read())
domain_groups = {}
if domain_groups_file:
with open(domain_groups_file) as f:
groups = json.loads(f.read())
for group in groups:
sid = get_field(group, 'objectSid')
domain_groups[int(sid.split('-')[-1])] = get_field(group, 'cn')
user_search = UserSearch()
count = 0
total = len(users)
print_notification("Importing {} users".format(total))
for entry in users:
result = parse_user(entry, domain_groups)
user = user_search.id_to_object(result['username'])
user.name = result['name']
user.domain.append(result['domain'])
user.description = result['description']
user.groups.extend(result['groups'])
user.flags.extend(result['flags'])
user.sid = result['sid']
user.add_tag("domaindump")
user.save()
count += 1
sys.stdout.write('\r')
sys.stdout.write("[{}/{}]".format(count, total))
sys.stdout.flush()
sys.stdout.write('\r')
return count | Parses the domain users and groups files. | Below is the the instruction that describes the task:
### Input:
Parses the domain users and groups files.
### Response:
def parse_domain_users(domain_users_file, domain_groups_file):
"""
Parses the domain users and groups files.
"""
with open(domain_users_file) as f:
users = json.loads(f.read())
domain_groups = {}
if domain_groups_file:
with open(domain_groups_file) as f:
groups = json.loads(f.read())
for group in groups:
sid = get_field(group, 'objectSid')
domain_groups[int(sid.split('-')[-1])] = get_field(group, 'cn')
user_search = UserSearch()
count = 0
total = len(users)
print_notification("Importing {} users".format(total))
for entry in users:
result = parse_user(entry, domain_groups)
user = user_search.id_to_object(result['username'])
user.name = result['name']
user.domain.append(result['domain'])
user.description = result['description']
user.groups.extend(result['groups'])
user.flags.extend(result['flags'])
user.sid = result['sid']
user.add_tag("domaindump")
user.save()
count += 1
sys.stdout.write('\r')
sys.stdout.write("[{}/{}]".format(count, total))
sys.stdout.flush()
sys.stdout.write('\r')
return count |
def run(self):
"""Increments counter and raises an exception for first two runs."""
self.count += 1
print('FailTwicePlug: Run number %s' % (self.count))
if self.count < 3:
raise RuntimeError('Fails a couple times')
return True | Increments counter and raises an exception for first two runs. | Below is the the instruction that describes the task:
### Input:
Increments counter and raises an exception for first two runs.
### Response:
def run(self):
"""Increments counter and raises an exception for first two runs."""
self.count += 1
print('FailTwicePlug: Run number %s' % (self.count))
if self.count < 3:
raise RuntimeError('Fails a couple times')
return True |
def char_style(self, style):
'''Sets the character style.
Args:
style: The desired character style. Choose from 'normal', 'outline', 'shadow', and 'outlineshadow'
Returns:
None
Raises:
RuntimeError: Invalid character style
'''
styleset = {'normal': 0,
'outline': 1,
'shadow': 2,
'outlineshadow': 3
}
if style in styleset:
self.send(chr(27) + 'q' + chr(styleset[style]))
else:
raise RuntimeError('Invalid character style in function charStyle') | Sets the character style.
Args:
style: The desired character style. Choose from 'normal', 'outline', 'shadow', and 'outlineshadow'
Returns:
None
Raises:
RuntimeError: Invalid character style | Below is the the instruction that describes the task:
### Input:
Sets the character style.
Args:
style: The desired character style. Choose from 'normal', 'outline', 'shadow', and 'outlineshadow'
Returns:
None
Raises:
RuntimeError: Invalid character style
### Response:
def char_style(self, style):
'''Sets the character style.
Args:
style: The desired character style. Choose from 'normal', 'outline', 'shadow', and 'outlineshadow'
Returns:
None
Raises:
RuntimeError: Invalid character style
'''
styleset = {'normal': 0,
'outline': 1,
'shadow': 2,
'outlineshadow': 3
}
if style in styleset:
self.send(chr(27) + 'q' + chr(styleset[style]))
else:
raise RuntimeError('Invalid character style in function charStyle') |
def get_response_signer(self):
"""Returns the response signer for this version of the signature.
"""
if not hasattr(self, "response_signer"):
self.response_signer = V2ResponseSigner(self.digest, orig=self)
return self.response_signer | Returns the response signer for this version of the signature. | Below is the the instruction that describes the task:
### Input:
Returns the response signer for this version of the signature.
### Response:
def get_response_signer(self):
"""Returns the response signer for this version of the signature.
"""
if not hasattr(self, "response_signer"):
self.response_signer = V2ResponseSigner(self.digest, orig=self)
return self.response_signer |
def visc_rad_kap_sc(T,rho,X):
'''
Radiative viscosity (Thomas, 1930) for e- scattering opacity
Parameters
----------
X : float
H mass fraction
T : float
temperature in K
rho : float
density in cgs
Returns
-------
nu : float
radiative diffusivity in [cm**2/s]
Examples
--------
>>> In [1]: import astronomy as ast
>>> In [2]: l = 100*1.e5 # 100km
>>> In [3]: v = 1.e5 # typical velocity
>>> In [4]: T = 90.e6 # temperature
>>> In [5]: X = 0.001 # H mass fraction
>>> In [6]: rho = 100. # density
>>> In [7]: nu = ast.visc_rad_kap_sc(T,rho,X)
>>> In [8]: Re=v*l/nu
>>> In [9]: print "Re_rad = "+str('%g'%Re)
>>> Re_rad = 4.43512e+08
Notes
-----
Eqn. 14' in Schatzman, 1977, assume electron scattering opacity
kappa_sc = 0.2*(1+X), Kippenhahn (2nd edn, Eqn 17.2)
'''
kappa = 0.2*(1.+X)
nu_rad = 6.88e-26*(old_div(T**4,(kappa*rho**2)))
return nu_rad | Radiative viscosity (Thomas, 1930) for e- scattering opacity
Parameters
----------
X : float
H mass fraction
T : float
temperature in K
rho : float
density in cgs
Returns
-------
nu : float
radiative diffusivity in [cm**2/s]
Examples
--------
>>> In [1]: import astronomy as ast
>>> In [2]: l = 100*1.e5 # 100km
>>> In [3]: v = 1.e5 # typical velocity
>>> In [4]: T = 90.e6 # temperature
>>> In [5]: X = 0.001 # H mass fraction
>>> In [6]: rho = 100. # density
>>> In [7]: nu = ast.visc_rad_kap_sc(T,rho,X)
>>> In [8]: Re=v*l/nu
>>> In [9]: print "Re_rad = "+str('%g'%Re)
>>> Re_rad = 4.43512e+08
Notes
-----
Eqn. 14' in Schatzman, 1977, assume electron scattering opacity
kappa_sc = 0.2*(1+X), Kippenhahn (2nd edn, Eqn 17.2) | Below is the the instruction that describes the task:
### Input:
Radiative viscosity (Thomas, 1930) for e- scattering opacity
Parameters
----------
X : float
H mass fraction
T : float
temperature in K
rho : float
density in cgs
Returns
-------
nu : float
radiative diffusivity in [cm**2/s]
Examples
--------
>>> In [1]: import astronomy as ast
>>> In [2]: l = 100*1.e5 # 100km
>>> In [3]: v = 1.e5 # typical velocity
>>> In [4]: T = 90.e6 # temperature
>>> In [5]: X = 0.001 # H mass fraction
>>> In [6]: rho = 100. # density
>>> In [7]: nu = ast.visc_rad_kap_sc(T,rho,X)
>>> In [8]: Re=v*l/nu
>>> In [9]: print "Re_rad = "+str('%g'%Re)
>>> Re_rad = 4.43512e+08
Notes
-----
Eqn. 14' in Schatzman, 1977, assume electron scattering opacity
kappa_sc = 0.2*(1+X), Kippenhahn (2nd edn, Eqn 17.2)
### Response:
def visc_rad_kap_sc(T,rho,X):
'''
Radiative viscosity (Thomas, 1930) for e- scattering opacity
Parameters
----------
X : float
H mass fraction
T : float
temperature in K
rho : float
density in cgs
Returns
-------
nu : float
radiative diffusivity in [cm**2/s]
Examples
--------
>>> In [1]: import astronomy as ast
>>> In [2]: l = 100*1.e5 # 100km
>>> In [3]: v = 1.e5 # typical velocity
>>> In [4]: T = 90.e6 # temperature
>>> In [5]: X = 0.001 # H mass fraction
>>> In [6]: rho = 100. # density
>>> In [7]: nu = ast.visc_rad_kap_sc(T,rho,X)
>>> In [8]: Re=v*l/nu
>>> In [9]: print "Re_rad = "+str('%g'%Re)
>>> Re_rad = 4.43512e+08
Notes
-----
Eqn. 14' in Schatzman, 1977, assume electron scattering opacity
kappa_sc = 0.2*(1+X), Kippenhahn (2nd edn, Eqn 17.2)
'''
kappa = 0.2*(1.+X)
nu_rad = 6.88e-26*(old_div(T**4,(kappa*rho**2)))
return nu_rad |
def list_images(self, repository_name, registry_id=None):
"""
maxResults and filtering not implemented
"""
repository = None
found = False
if repository_name in self.repositories:
repository = self.repositories[repository_name]
if registry_id:
if repository.registry_id == registry_id:
found = True
else:
found = True
if not found:
raise RepositoryNotFoundException(repository_name, registry_id or DEFAULT_REGISTRY_ID)
images = []
for image in repository.images:
images.append(image)
return images | maxResults and filtering not implemented | Below is the the instruction that describes the task:
### Input:
maxResults and filtering not implemented
### Response:
def list_images(self, repository_name, registry_id=None):
"""
maxResults and filtering not implemented
"""
repository = None
found = False
if repository_name in self.repositories:
repository = self.repositories[repository_name]
if registry_id:
if repository.registry_id == registry_id:
found = True
else:
found = True
if not found:
raise RepositoryNotFoundException(repository_name, registry_id or DEFAULT_REGISTRY_ID)
images = []
for image in repository.images:
images.append(image)
return images |
def trim_docstring(docstring):
"""Taken from http://www.python.org/dev/peps/pep-0257/"""
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
lines[0] = lines[0].strip()
if indent < maxsize:
index = 1
for line in lines[1:]:
lines[index] = line[indent:].rstrip()
index += 1
# Strip off trailing and leading blank lines:
while lines and not lines[-1]:
del lines[-1]
while lines and not lines[0]:
del lines[0]
# Return a single string:
return '\n'.join(lines) | Taken from http://www.python.org/dev/peps/pep-0257/ | Below is the the instruction that describes the task:
### Input:
Taken from http://www.python.org/dev/peps/pep-0257/
### Response:
def trim_docstring(docstring):
"""Taken from http://www.python.org/dev/peps/pep-0257/"""
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
lines[0] = lines[0].strip()
if indent < maxsize:
index = 1
for line in lines[1:]:
lines[index] = line[indent:].rstrip()
index += 1
# Strip off trailing and leading blank lines:
while lines and not lines[-1]:
del lines[-1]
while lines and not lines[0]:
del lines[0]
# Return a single string:
return '\n'.join(lines) |
def list_namespaced_pod(self, namespace, **kwargs):
"""
list or watch objects of kind Pod
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_pod(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1PodList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_pod_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_pod_with_http_info(namespace, **kwargs)
return data | list or watch objects of kind Pod
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_pod(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1PodList
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
list or watch objects of kind Pod
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_pod(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1PodList
If the method is called asynchronously,
returns the request thread.
### Response:
def list_namespaced_pod(self, namespace, **kwargs):
"""
list or watch objects of kind Pod
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_pod(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1PodList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_pod_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_pod_with_http_info(namespace, **kwargs)
return data |
def get_request_date(cls, req):
"""
Try to pull a date from the request by looking first at the
x-amz-date header, and if that's not present then the Date header.
Return a datetime.date object, or None if neither date header
is found or is in a recognisable format.
req -- a requests PreparedRequest object
"""
date = None
for header in ['x-amz-date', 'date']:
if header not in req.headers:
continue
try:
date_str = cls.parse_date(req.headers[header])
except DateFormatError:
continue
try:
date = datetime.datetime.strptime(date_str, '%Y-%m-%d').date()
except ValueError:
continue
else:
break
return date | Try to pull a date from the request by looking first at the
x-amz-date header, and if that's not present then the Date header.
Return a datetime.date object, or None if neither date header
is found or is in a recognisable format.
req -- a requests PreparedRequest object | Below is the the instruction that describes the task:
### Input:
Try to pull a date from the request by looking first at the
x-amz-date header, and if that's not present then the Date header.
Return a datetime.date object, or None if neither date header
is found or is in a recognisable format.
req -- a requests PreparedRequest object
### Response:
def get_request_date(cls, req):
"""
Try to pull a date from the request by looking first at the
x-amz-date header, and if that's not present then the Date header.
Return a datetime.date object, or None if neither date header
is found or is in a recognisable format.
req -- a requests PreparedRequest object
"""
date = None
for header in ['x-amz-date', 'date']:
if header not in req.headers:
continue
try:
date_str = cls.parse_date(req.headers[header])
except DateFormatError:
continue
try:
date = datetime.datetime.strptime(date_str, '%Y-%m-%d').date()
except ValueError:
continue
else:
break
return date |
def find_methods(self, classname=".*", methodname=".*", descriptor=".*",
accessflags=".*", no_external=False):
"""
Find a method by name using regular expression.
This method will return all MethodClassAnalysis objects, which match the
classname, methodname, descriptor and accessflags of the method.
:param classname: regular expression for the classname
:param methodname: regular expression for the method name
:param descriptor: regular expression for the descriptor
:param accessflags: regular expression for the accessflags
:param no_external: Remove external method from the output (default False)
:rtype: generator of `MethodClassAnalysis`
"""
for cname, c in self.classes.items():
if re.match(classname, cname):
for m in c.get_methods():
z = m.get_method()
# TODO is it even possible that an internal class has
# external methods? Maybe we should check for ExternalClass
# instead...
if no_external and isinstance(z, ExternalMethod):
continue
if re.match(methodname, z.get_name()) and \
re.match(descriptor, z.get_descriptor()) and \
re.match(accessflags, z.get_access_flags_string()):
yield m | Find a method by name using regular expression.
This method will return all MethodClassAnalysis objects, which match the
classname, methodname, descriptor and accessflags of the method.
:param classname: regular expression for the classname
:param methodname: regular expression for the method name
:param descriptor: regular expression for the descriptor
:param accessflags: regular expression for the accessflags
:param no_external: Remove external method from the output (default False)
:rtype: generator of `MethodClassAnalysis` | Below is the the instruction that describes the task:
### Input:
Find a method by name using regular expression.
This method will return all MethodClassAnalysis objects, which match the
classname, methodname, descriptor and accessflags of the method.
:param classname: regular expression for the classname
:param methodname: regular expression for the method name
:param descriptor: regular expression for the descriptor
:param accessflags: regular expression for the accessflags
:param no_external: Remove external method from the output (default False)
:rtype: generator of `MethodClassAnalysis`
### Response:
def find_methods(self, classname=".*", methodname=".*", descriptor=".*",
accessflags=".*", no_external=False):
"""
Find a method by name using regular expression.
This method will return all MethodClassAnalysis objects, which match the
classname, methodname, descriptor and accessflags of the method.
:param classname: regular expression for the classname
:param methodname: regular expression for the method name
:param descriptor: regular expression for the descriptor
:param accessflags: regular expression for the accessflags
:param no_external: Remove external method from the output (default False)
:rtype: generator of `MethodClassAnalysis`
"""
for cname, c in self.classes.items():
if re.match(classname, cname):
for m in c.get_methods():
z = m.get_method()
# TODO is it even possible that an internal class has
# external methods? Maybe we should check for ExternalClass
# instead...
if no_external and isinstance(z, ExternalMethod):
continue
if re.match(methodname, z.get_name()) and \
re.match(descriptor, z.get_descriptor()) and \
re.match(accessflags, z.get_access_flags_string()):
yield m |
def set_used_labels(self, labels):
""" Specify which trials to use in subsequent analysis steps.
This function masks trials based on their class labels.
Parameters
----------
labels : list of class labels
Marks all trials that have a label that is in the `labels` list for further processing.
Returns
-------
self : Workspace
The Workspace object.
"""
mask = np.zeros(self.cl_.size, dtype=bool)
for l in labels:
mask = np.logical_or(mask, self.cl_ == l)
self.trial_mask_ = mask
return self | Specify which trials to use in subsequent analysis steps.
This function masks trials based on their class labels.
Parameters
----------
labels : list of class labels
Marks all trials that have a label that is in the `labels` list for further processing.
Returns
-------
self : Workspace
The Workspace object. | Below is the the instruction that describes the task:
### Input:
Specify which trials to use in subsequent analysis steps.
This function masks trials based on their class labels.
Parameters
----------
labels : list of class labels
Marks all trials that have a label that is in the `labels` list for further processing.
Returns
-------
self : Workspace
The Workspace object.
### Response:
def set_used_labels(self, labels):
""" Specify which trials to use in subsequent analysis steps.
This function masks trials based on their class labels.
Parameters
----------
labels : list of class labels
Marks all trials that have a label that is in the `labels` list for further processing.
Returns
-------
self : Workspace
The Workspace object.
"""
mask = np.zeros(self.cl_.size, dtype=bool)
for l in labels:
mask = np.logical_or(mask, self.cl_ == l)
self.trial_mask_ = mask
return self |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.