code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def get_protein_data_pgrouped(proteindata, p_acc, headerfields):
"""Parses protein data for a certain protein into tsv output
dictionary"""
report = get_protein_data_base(proteindata, p_acc, headerfields)
return get_cov_protnumbers(proteindata, p_acc, report) | Parses protein data for a certain protein into tsv output
dictionary |
def truncate(self, length):
"""Return a new `Multihash` with a shorter digest `length`.
If the given `length` is greater than the original, a `ValueError`
is raised.
>>> mh1 = Multihash(0x01, b'FOOBAR')
>>> mh2 = mh1.truncate(3)
>>> mh2 == (0x01, b'FOO')
True
>>> mh3 = mh1.truncate(10)
Traceback (most recent call last):
...
ValueError: cannot enlarge the original digest by 4 bytes
"""
if length > len(self.digest):
raise ValueError("cannot enlarge the original digest by %d bytes"
% (length - len(self.digest)))
return self.__class__(self.func, self.digest[:length]) | Return a new `Multihash` with a shorter digest `length`.
If the given `length` is greater than the original, a `ValueError`
is raised.
>>> mh1 = Multihash(0x01, b'FOOBAR')
>>> mh2 = mh1.truncate(3)
>>> mh2 == (0x01, b'FOO')
True
>>> mh3 = mh1.truncate(10)
Traceback (most recent call last):
...
ValueError: cannot enlarge the original digest by 4 bytes |
def _process_state(cls, unprocessed, processed, state):
"""Preprocess a single state definition."""
assert type(state) is str, "wrong state name %r" % state
assert state[0] != '#', "invalid state name %r" % state
if state in processed:
return processed[state]
tokens = processed[state] = []
rflags = cls.flags
for tdef in unprocessed[state]:
if isinstance(tdef, include):
# it's a state reference
assert tdef != state, "circular state reference %r" % state
tokens.extend(cls._process_state(unprocessed, processed,
str(tdef)))
continue
if isinstance(tdef, _inherit):
# should be processed already, but may not in the case of:
# 1. the state has no counterpart in any parent
# 2. the state includes more than one 'inherit'
continue
if isinstance(tdef, default):
new_state = cls._process_new_state(tdef.state, unprocessed, processed)
tokens.append((re.compile('').match, None, new_state))
continue
assert type(tdef) is tuple, "wrong rule def %r" % tdef
try:
rex = cls._process_regex(tdef[0], rflags, state)
except Exception as err:
raise ValueError("uncompilable regex %r in state %r of %r: %s" %
(tdef[0], state, cls, err))
token = cls._process_token(tdef[1])
if len(tdef) == 2:
new_state = None
else:
new_state = cls._process_new_state(tdef[2],
unprocessed, processed)
tokens.append((rex, token, new_state))
return tokens | Preprocess a single state definition. |
def get_context_data(self, **kwargs):
"""Add context data to view"""
context = super().get_context_data(**kwargs)
tabs = self.get_active_tabs()
context.update({
'page_detail_tabs': tabs,
'active_tab': tabs[0].code if tabs else '',
'app_label': self.get_app_label(),
'model_name': self.get_model_name(),
'model_alias': self.get_model_alias(),
'model_verbose_name': self.object._meta.verbose_name.title(),
'back_url': self.get_back_url(),
'edit_url': self.get_edit_url(),
'delete_url': self.get_delete_url(),
'title': self.title,
})
return context | Add context data to view |
def lx4num(string, first):
"""
Scan a string from a specified starting position for the
end of a number.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/lx4num_c.html
:param string: Any character string.
:type string: str
:param first: First character to scan from in string.
:type first: int
:return: last and nchar
:rtype: tuple
"""
string = stypes.stringToCharP(string)
first = ctypes.c_int(first)
last = ctypes.c_int()
nchar = ctypes.c_int()
libspice.lx4num_c(string, first, ctypes.byref(last), ctypes.byref(nchar))
return last.value, nchar.value | Scan a string from a specified starting position for the
end of a number.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/lx4num_c.html
:param string: Any character string.
:type string: str
:param first: First character to scan from in string.
:type first: int
:return: last and nchar
:rtype: tuple |
def fmap(self, f: Callable[[T], B]) -> 'List[B]':
"""doufo.List.fmap: map `List`
Args:
`self`:
`f` (`Callable[[T], B]`): any callable funtion
Returns:
return (`List[B]`): A `List` of objected from `f`.
Raises:
"""
return List([f(x) for x in self.unbox()]) | doufo.List.fmap: map `List`
Args:
`self`:
`f` (`Callable[[T], B]`): any callable funtion
Returns:
return (`List[B]`): A `List` of objected from `f`.
Raises: |
def is_valid_mac(addr):
"""Check the syntax of a given mac address.
The acceptable format is xx:xx:xx:xx:xx:xx
"""
addrs = addr.split(':')
if len(addrs) != 6:
return False
for m in addrs:
try:
if int(m, 16) > 255:
return False
except ValueError:
return False
return True | Check the syntax of a given mac address.
The acceptable format is xx:xx:xx:xx:xx:xx |
def analyze(problem, Y, calc_second_order=True, num_resamples=100,
conf_level=0.95, print_to_console=False, parallel=False,
n_processors=None, seed=None):
"""Perform Sobol Analysis on model outputs.
Returns a dictionary with keys 'S1', 'S1_conf', 'ST', and 'ST_conf', where
each entry is a list of size D (the number of parameters) containing the
indices in the same order as the parameter file. If calc_second_order is
True, the dictionary also contains keys 'S2' and 'S2_conf'.
Parameters
----------
problem : dict
The problem definition
Y : numpy.array
A NumPy array containing the model outputs
calc_second_order : bool
Calculate second-order sensitivities (default True)
num_resamples : int
The number of resamples (default 100)
conf_level : float
The confidence interval level (default 0.95)
print_to_console : bool
Print results directly to console (default False)
References
----------
.. [1] Sobol, I. M. (2001). "Global sensitivity indices for nonlinear
mathematical models and their Monte Carlo estimates." Mathematics
and Computers in Simulation, 55(1-3):271-280,
doi:10.1016/S0378-4754(00)00270-6.
.. [2] Saltelli, A. (2002). "Making best use of model evaluations to
compute sensitivity indices." Computer Physics Communications,
145(2):280-297, doi:10.1016/S0010-4655(02)00280-1.
.. [3] Saltelli, A., P. Annoni, I. Azzini, F. Campolongo, M. Ratto, and
S. Tarantola (2010). "Variance based sensitivity analysis of model
output. Design and estimator for the total sensitivity index."
Computer Physics Communications, 181(2):259-270,
doi:10.1016/j.cpc.2009.09.018.
Examples
--------
>>> X = saltelli.sample(problem, 1000)
>>> Y = Ishigami.evaluate(X)
>>> Si = sobol.analyze(problem, Y, print_to_console=True)
"""
if seed:
np.random.seed(seed)
# determining if groups are defined and adjusting the number
# of rows in the cross-sampled matrix accordingly
if not problem.get('groups'):
D = problem['num_vars']
else:
D = len(set(problem['groups']))
if calc_second_order and Y.size % (2 * D + 2) == 0:
N = int(Y.size / (2 * D + 2))
elif not calc_second_order and Y.size % (D + 2) == 0:
N = int(Y.size / (D + 2))
else:
raise RuntimeError("""
Incorrect number of samples in model output file.
Confirm that calc_second_order matches option used during sampling.""")
if conf_level < 0 or conf_level > 1:
raise RuntimeError("Confidence level must be between 0-1.")
# normalize the model output
Y = (Y - Y.mean()) / Y.std()
A, B, AB, BA = separate_output_values(Y, D, N, calc_second_order)
r = np.random.randint(N, size=(N, num_resamples))
Z = norm.ppf(0.5 + conf_level / 2)
if not parallel:
S = create_Si_dict(D, calc_second_order)
for j in range(D):
S['S1'][j] = first_order(A, AB[:, j], B)
S['S1_conf'][j] = Z * first_order(A[r], AB[r, j], B[r]).std(ddof=1)
S['ST'][j] = total_order(A, AB[:, j], B)
S['ST_conf'][j] = Z * total_order(A[r], AB[r, j], B[r]).std(ddof=1)
# Second order (+conf.)
if calc_second_order:
for j in range(D):
for k in range(j + 1, D):
S['S2'][j, k] = second_order(
A, AB[:, j], AB[:, k], BA[:, j], B)
S['S2_conf'][j, k] = Z * second_order(A[r], AB[r, j],
AB[r, k], BA[r, j], B[r]).std(ddof=1)
else:
tasks, n_processors = create_task_list(
D, calc_second_order, n_processors)
func = partial(sobol_parallel, Z, A, AB, BA, B, r)
pool = Pool(n_processors)
S_list = pool.map_async(func, tasks)
pool.close()
pool.join()
S = Si_list_to_dict(S_list.get(), D, calc_second_order)
# Print results to console
if print_to_console:
print_indices(S, problem, calc_second_order)
# Add problem context and override conversion method for special case
S.problem = problem
S.to_df = MethodType(to_df, S)
return S | Perform Sobol Analysis on model outputs.
Returns a dictionary with keys 'S1', 'S1_conf', 'ST', and 'ST_conf', where
each entry is a list of size D (the number of parameters) containing the
indices in the same order as the parameter file. If calc_second_order is
True, the dictionary also contains keys 'S2' and 'S2_conf'.
Parameters
----------
problem : dict
The problem definition
Y : numpy.array
A NumPy array containing the model outputs
calc_second_order : bool
Calculate second-order sensitivities (default True)
num_resamples : int
The number of resamples (default 100)
conf_level : float
The confidence interval level (default 0.95)
print_to_console : bool
Print results directly to console (default False)
References
----------
.. [1] Sobol, I. M. (2001). "Global sensitivity indices for nonlinear
mathematical models and their Monte Carlo estimates." Mathematics
and Computers in Simulation, 55(1-3):271-280,
doi:10.1016/S0378-4754(00)00270-6.
.. [2] Saltelli, A. (2002). "Making best use of model evaluations to
compute sensitivity indices." Computer Physics Communications,
145(2):280-297, doi:10.1016/S0010-4655(02)00280-1.
.. [3] Saltelli, A., P. Annoni, I. Azzini, F. Campolongo, M. Ratto, and
S. Tarantola (2010). "Variance based sensitivity analysis of model
output. Design and estimator for the total sensitivity index."
Computer Physics Communications, 181(2):259-270,
doi:10.1016/j.cpc.2009.09.018.
Examples
--------
>>> X = saltelli.sample(problem, 1000)
>>> Y = Ishigami.evaluate(X)
>>> Si = sobol.analyze(problem, Y, print_to_console=True) |
def renumber(args):
"""
%prog renumber Mt35.consolidated.bed > tagged.bed
Renumber genes for annotation updates.
"""
from jcvi.algorithms.lis import longest_increasing_subsequence
from jcvi.utils.grouper import Grouper
p = OptionParser(renumber.__doc__)
p.set_annot_reformat_opts()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bedfile, = args
pf = bedfile.rsplit(".", 1)[0]
abedfile = pf + ".a.bed"
bbedfile = pf + ".b.bed"
if need_update(bedfile, (abedfile, bbedfile)):
prepare(bedfile)
mbed = Bed(bbedfile)
g = Grouper()
for s in mbed:
accn = s.accn
g.join(*accn.split(";"))
bed = Bed(abedfile)
for chr, sbed in bed.sub_beds():
current_chr = chr_number(chr)
if not current_chr:
continue
ranks = []
gg = set()
for s in sbed:
accn = s.accn
achr, arank = atg_name(accn)
if achr != current_chr:
continue
ranks.append(arank)
gg.add(accn)
lranks = longest_increasing_subsequence(ranks)
print(current_chr, len(sbed), "==>", len(ranks), \
"==>", len(lranks), file=sys.stderr)
granks = set(gene_name(current_chr, x, prefix=opts.prefix, \
pad0=opts.pad0, uc=opts.uc) for x in lranks) | \
set(gene_name(current_chr, x, prefix=opts.prefix, \
pad0=opts.pad0, sep="te", uc=opts.uc) for x in lranks)
tagstore = {}
for s in sbed:
achr, arank = atg_name(s.accn)
accn = s.accn
if accn in granks:
tag = (accn, FRAME)
elif accn in gg:
tag = (accn, RETAIN)
else:
tag = (".", NEW)
tagstore[accn] = tag
# Find cases where genes overlap
for s in sbed:
accn = s.accn
gaccn = g[accn]
tags = [((tagstore[x][-1] if x in tagstore else NEW), x) for x in gaccn]
group = [(PRIORITY.index(tag), x) for tag, x in tags]
best = min(group)[-1]
if accn != best:
tag = (best, OVERLAP)
else:
tag = tagstore[accn]
print("\t".join((str(s), "|".join(tag)))) | %prog renumber Mt35.consolidated.bed > tagged.bed
Renumber genes for annotation updates. |
def getNextService(self, discover):
"""Return the next authentication service for the pair of
user_input and session. This function handles fallback.
@param discover: a callable that takes a URL and returns a
list of services
@type discover: str -> [service]
@return: the next available service
"""
manager = self.getManager()
if manager is not None and not manager:
self.destroyManager()
if not manager:
yadis_url, services = discover(self.url)
manager = self.createManager(services, yadis_url)
if manager:
service = manager.next()
manager.store(self.session)
else:
service = None
return service | Return the next authentication service for the pair of
user_input and session. This function handles fallback.
@param discover: a callable that takes a URL and returns a
list of services
@type discover: str -> [service]
@return: the next available service |
def awake(self, procid):
""" Remove procid from waitlists and reestablish it in the running list """
logger.debug(f"Remove procid:{procid} from waitlists and reestablish it in the running list")
for wait_list in self.rwait:
if procid in wait_list:
wait_list.remove(procid)
for wait_list in self.twait:
if procid in wait_list:
wait_list.remove(procid)
self.timers[procid] = None
self.running.append(procid)
if self._current is None:
self._current = procid | Remove procid from waitlists and reestablish it in the running list |
def encode_username_password(
username: Union[str, bytes], password: Union[str, bytes]
) -> bytes:
"""Encodes a username/password pair in the format used by HTTP auth.
The return value is a byte string in the form ``username:password``.
.. versionadded:: 5.1
"""
if isinstance(username, unicode_type):
username = unicodedata.normalize("NFC", username)
if isinstance(password, unicode_type):
password = unicodedata.normalize("NFC", password)
return utf8(username) + b":" + utf8(password) | Encodes a username/password pair in the format used by HTTP auth.
The return value is a byte string in the form ``username:password``.
.. versionadded:: 5.1 |
def set_translation(lang):
"""Set the translation used by (some) pywws modules.
This sets the translation object ``pywws.localisation.translation``
to use a particular language.
The ``lang`` parameter can be any string of the form ``en``,
``en_GB`` or ``en_GB.UTF-8``. Anything after a ``.`` character is
ignored. In the case of a string such as ``en_GB``, the routine
will search for an ``en_GB`` language file before searching for an
``en`` one.
:param lang: language code.
:type lang: string
:return: success status.
:rtype: bool
"""
global translation
# make list of possible languages, in order of preference
langs = list()
if lang:
if '.' in lang:
lang = lang.split('.')[0]
langs += [lang, lang[:2]]
# get translation object
path = pkg_resources.resource_filename('pywws', 'lang')
codeset = locale.getpreferredencoding()
if codeset == 'ASCII':
codeset = 'UTF-8'
try:
translation = gettext.translation(
'pywws', path, languages=langs, codeset=codeset)
# Python 3 translations don't have a ugettext method
if not hasattr(translation, 'ugettext'):
translation.ugettext = translation.gettext
except IOError:
return False
return True | Set the translation used by (some) pywws modules.
This sets the translation object ``pywws.localisation.translation``
to use a particular language.
The ``lang`` parameter can be any string of the form ``en``,
``en_GB`` or ``en_GB.UTF-8``. Anything after a ``.`` character is
ignored. In the case of a string such as ``en_GB``, the routine
will search for an ``en_GB`` language file before searching for an
``en`` one.
:param lang: language code.
:type lang: string
:return: success status.
:rtype: bool |
def accept(self):
"""
Call the :meth:`accept` method of the underlying socket and set up SSL
on the returned socket, using the Context object supplied to this
:class:`Connection` object at creation.
:return: A *(conn, addr)* pair where *conn* is the new
:class:`Connection` object created, and *address* is as returned by
the socket's :meth:`accept`.
"""
client, addr = self._socket.accept()
conn = Connection(self._context, client)
conn.set_accept_state()
return (conn, addr) | Call the :meth:`accept` method of the underlying socket and set up SSL
on the returned socket, using the Context object supplied to this
:class:`Connection` object at creation.
:return: A *(conn, addr)* pair where *conn* is the new
:class:`Connection` object created, and *address* is as returned by
the socket's :meth:`accept`. |
def attach_related_file(self, path, mimetype=None):
"""Attaches a file from the filesystem."""
filename = os.path.basename(path)
content = open(path, 'rb').read()
self.attach_related(filename, content, mimetype) | Attaches a file from the filesystem. |
def convertPrice(variant, regex=None, short_regex=None, none_regex=none_price_regex):
''' Helper function to convert the given input price into integers (cents
count). :obj:`int`, :obj:`float` and :obj:`str` are supported
:param variant: Price
:param re.compile regex: Regex to convert str into price. The re should
contain two named groups `euro` and `cent`
:param re.compile short_regex: Short regex version (no cent part)
group `euro` should contain a valid integer.
:param re.compile none_regex: Regex to detect that no value is provided
if the input data is str, the normal regex do not match and this
regex matches `None` is returned.
:rtype: int/None'''
if isinstance(variant, int) and not isinstance(variant, bool):
return variant
elif isinstance(variant, float):
return round(variant * 100)
elif isinstance(variant, str):
match = (regex or default_price_regex).search(variant) \
or (short_regex or short_price_regex).match(variant)
if not match:
if none_regex and none_regex.match(variant):
return None
raise ValueError('Could not extract price: {0}'.format(variant))
return int(match.group('euro')) * 100 + \
int(match.groupdict().get('cent', '').ljust(2, '0'))
else:
raise TypeError('Unknown price type: {0!r}'.format(variant)) | Helper function to convert the given input price into integers (cents
count). :obj:`int`, :obj:`float` and :obj:`str` are supported
:param variant: Price
:param re.compile regex: Regex to convert str into price. The re should
contain two named groups `euro` and `cent`
:param re.compile short_regex: Short regex version (no cent part)
group `euro` should contain a valid integer.
:param re.compile none_regex: Regex to detect that no value is provided
if the input data is str, the normal regex do not match and this
regex matches `None` is returned.
:rtype: int/None |
def margin(
self,
axis=None,
weighted=True,
include_missing=False,
include_transforms_for_dims=None,
prune=False,
include_mr_cat=False,
):
"""Return ndarray representing slice margin across selected axis.
A margin (or basis) can be calculated for a contingency table, provided
that the dimensions of the desired directions are marginable. The
dimensions are marginable if they represent mutualy exclusive data,
such as true categorical data. For array types the items dimensions are
not marginable. Requesting a margin across these dimensions
(e.g. slice.margin(axis=0) for a categorical array cube slice) will
produce an error. For multiple response slices, the implicit convention
is that the provided direction scales to the selections dimension of the
slice. These cases produce meaningful data, but of a slightly different
shape (e.g. slice.margin(0) for a MR x CAT slice will produce 2D ndarray
(variable dimensions are never collapsed!)).
:param axis: Axis across which to sum. Can be 0 (columns margin),
1 (rows margin) and None (table margin). If requested across
variables dimension (e.g. requesting 0 margin for CA array) it will
produce an error.
:param weighted: Weighted or unweighted counts.
:param include_missing: Include missing categories or not.
:param include_transforms_for_dims: Indices of dimensions for which to
include transformations
:param prune: Perform pruning based on unweighted counts.
:returns: (weighed or unweighted counts) summed across provided axis.
For multiple response types, items dimensions are not collapsed.
"""
axis = self._calculate_correct_axis_for_cube(axis)
hs_dims = self._hs_dims_for_cube(include_transforms_for_dims)
margin = self._cube.margin(
axis=axis,
weighted=weighted,
include_missing=include_missing,
include_transforms_for_dims=hs_dims,
prune=prune,
include_mr_cat=include_mr_cat,
)
return self._extract_slice_result_from_cube(margin) | Return ndarray representing slice margin across selected axis.
A margin (or basis) can be calculated for a contingency table, provided
that the dimensions of the desired directions are marginable. The
dimensions are marginable if they represent mutualy exclusive data,
such as true categorical data. For array types the items dimensions are
not marginable. Requesting a margin across these dimensions
(e.g. slice.margin(axis=0) for a categorical array cube slice) will
produce an error. For multiple response slices, the implicit convention
is that the provided direction scales to the selections dimension of the
slice. These cases produce meaningful data, but of a slightly different
shape (e.g. slice.margin(0) for a MR x CAT slice will produce 2D ndarray
(variable dimensions are never collapsed!)).
:param axis: Axis across which to sum. Can be 0 (columns margin),
1 (rows margin) and None (table margin). If requested across
variables dimension (e.g. requesting 0 margin for CA array) it will
produce an error.
:param weighted: Weighted or unweighted counts.
:param include_missing: Include missing categories or not.
:param include_transforms_for_dims: Indices of dimensions for which to
include transformations
:param prune: Perform pruning based on unweighted counts.
:returns: (weighed or unweighted counts) summed across provided axis.
For multiple response types, items dimensions are not collapsed. |
def get_entry_categories(self, category_nodes):
"""
Return a list of entry's categories
based on imported categories.
"""
categories = []
for category_node in category_nodes:
domain = category_node.attrib.get('domain')
if domain == 'category':
categories.append(self.categories[category_node.text])
return categories | Return a list of entry's categories
based on imported categories. |
def get_input(problem):
"""" Returns the specified problem answer in the form
problem: problem id
Returns string, or bytes if a file is loaded
"""
input_data = load_input()
pbsplit = problem.split(":")
problem_input = input_data['input'][pbsplit[0]]
if isinstance(problem_input, dict) and "filename" in problem_input and "value" in problem_input:
if len(pbsplit) > 1 and pbsplit[1] == 'filename':
return problem_input["filename"]
else:
return open(problem_input["value"], 'rb').read()
else:
return problem_input | Returns the specified problem answer in the form
problem: problem id
Returns string, or bytes if a file is loaded |
def solubility_parameter(self):
r'''Solubility parameter of the chemical at its
current temperature and pressure, in units of [Pa^0.5].
.. math::
\delta = \sqrt{\frac{\Delta H_{vap} - RT}{V_m}}
Calculated based on enthalpy of vaporization and molar volume.
Normally calculated at STP. For uses of this property, see
:obj:`thermo.solubility.solubility_parameter`.
Examples
--------
>>> Chemical('NH3').solubility_parameter
24766.329043856073
'''
return solubility_parameter(T=self.T, Hvapm=self.Hvapm, Vml=self.Vml,
Method=self.solubility_parameter_method,
CASRN=self.CAS) | r'''Solubility parameter of the chemical at its
current temperature and pressure, in units of [Pa^0.5].
.. math::
\delta = \sqrt{\frac{\Delta H_{vap} - RT}{V_m}}
Calculated based on enthalpy of vaporization and molar volume.
Normally calculated at STP. For uses of this property, see
:obj:`thermo.solubility.solubility_parameter`.
Examples
--------
>>> Chemical('NH3').solubility_parameter
24766.329043856073 |
def withAttribute(*args,**attrDict):
"""Helper to create a validating parse action to be used with start
tags created with :class:`makeXMLTags` or
:class:`makeHTMLTags`. Use ``withAttribute`` to qualify
a starting tag with a required attribute value, to avoid false
matches on common tags such as ``<TD>`` or ``<DIV>``.
Call ``withAttribute`` with a series of attribute names and
values. Specify the list of filter attributes names and values as:
- keyword arguments, as in ``(align="right")``, or
- as an explicit dict with ``**`` operator, when an attribute
name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}``
- a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align","right"))``
For attribute names with a namespace prefix, you must use the second
form. Attribute names are matched insensitive to upper/lower case.
If just testing for ``class`` (with or without a namespace), use
:class:`withClass`.
To verify that the attribute exists, but without specifying a value,
pass ``withAttribute.ANY_VALUE`` as the value.
Example::
html = '''
<div>
Some text
<div type="grid">1 4 0 1 0</div>
<div type="graph">1,3 2,3 1,1</div>
<div>this has no type</div>
</div>
'''
div,div_end = makeHTMLTags("div")
# only match div tag having a type attribute with value "grid"
div_grid = div().setParseAction(withAttribute(type="grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
# construct a match with any div tag having a type attribute, regardless of the value
div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k,v) for k,v in attrs]
def pa(s,l,tokens):
for attrName,attrValue in attrs:
if attrName not in tokens:
raise ParseException(s,l,"no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
(attrName, tokens[attrName], attrValue))
return pa | Helper to create a validating parse action to be used with start
tags created with :class:`makeXMLTags` or
:class:`makeHTMLTags`. Use ``withAttribute`` to qualify
a starting tag with a required attribute value, to avoid false
matches on common tags such as ``<TD>`` or ``<DIV>``.
Call ``withAttribute`` with a series of attribute names and
values. Specify the list of filter attributes names and values as:
- keyword arguments, as in ``(align="right")``, or
- as an explicit dict with ``**`` operator, when an attribute
name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}``
- a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align","right"))``
For attribute names with a namespace prefix, you must use the second
form. Attribute names are matched insensitive to upper/lower case.
If just testing for ``class`` (with or without a namespace), use
:class:`withClass`.
To verify that the attribute exists, but without specifying a value,
pass ``withAttribute.ANY_VALUE`` as the value.
Example::
html = '''
<div>
Some text
<div type="grid">1 4 0 1 0</div>
<div type="graph">1,3 2,3 1,1</div>
<div>this has no type</div>
</div>
'''
div,div_end = makeHTMLTags("div")
# only match div tag having a type attribute with value "grid"
div_grid = div().setParseAction(withAttribute(type="grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
# construct a match with any div tag having a type attribute, regardless of the value
div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1 |
def register_blueprints(app, application_package_name=None, blueprint_directory=None):
"""Register Flask blueprints on app object"""
if not application_package_name:
application_package_name = 'app'
if not blueprint_directory:
blueprint_directory = os.path.join(os.getcwd(), application_package_name)
blueprint_directories = get_child_directories(blueprint_directory)
for directory in blueprint_directories:
abs_package = '{}.{}'.format(application_package_name, directory)
service = importlib.import_module(abs_package)
app.register_blueprint(service.blueprint_api, url_prefix='') | Register Flask blueprints on app object |
def UpdateFlow(self,
client_id,
flow_id,
flow_obj=db.Database.unchanged,
flow_state=db.Database.unchanged,
client_crash_info=db.Database.unchanged,
pending_termination=db.Database.unchanged,
processing_on=db.Database.unchanged,
processing_since=db.Database.unchanged,
processing_deadline=db.Database.unchanged):
"""Updates flow objects in the database."""
try:
flow = self.flows[(client_id, flow_id)]
except KeyError:
raise db.UnknownFlowError(client_id, flow_id)
if flow_obj != db.Database.unchanged:
self.flows[(client_id, flow_id)] = flow_obj
flow = flow_obj
if flow_state != db.Database.unchanged:
flow.flow_state = flow_state
if client_crash_info != db.Database.unchanged:
flow.client_crash_info = client_crash_info
if pending_termination != db.Database.unchanged:
flow.pending_termination = pending_termination
if processing_on != db.Database.unchanged:
flow.processing_on = processing_on
if processing_since != db.Database.unchanged:
flow.processing_since = processing_since
if processing_deadline != db.Database.unchanged:
flow.processing_deadline = processing_deadline
flow.last_update_time = rdfvalue.RDFDatetime.Now() | Updates flow objects in the database. |
def remove_accounts_from_group(accounts_query, group):
""" Remove accounts from group. """
query = accounts_query.filter(date_deleted__isnull=True)
for account in query:
remove_account_from_group(account, group) | Remove accounts from group. |
def __read_device(self):
"""Read the state of the gamepad."""
state = XinputState()
res = self.manager.xinput.XInputGetState(
self.__device_number, ctypes.byref(state))
if res == XINPUT_ERROR_SUCCESS:
return state
if res != XINPUT_ERROR_DEVICE_NOT_CONNECTED:
raise RuntimeError(
"Unknown error %d attempting to get state of device %d" % (
res, self.__device_number))
# else (device is not connected)
return None | Read the state of the gamepad. |
def execute_catch(c, sql, vars=None):
"""Run a query, but ignore any errors. For error recovery paths where the error handler should not raise another."""
try:
c.execute(sql, vars)
except Exception as err:
cmd = sql.split(' ', 1)[0]
log.error("Error executing %s: %s", cmd, err) | Run a query, but ignore any errors. For error recovery paths where the error handler should not raise another. |
def create_intent(self,
parent,
intent,
language_code=None,
intent_view=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Creates an intent in the specified agent.
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.IntentsClient()
>>>
>>> parent = client.project_agent_path('[PROJECT]')
>>>
>>> # TODO: Initialize ``intent``:
>>> intent = {}
>>>
>>> response = client.create_intent(parent, intent)
Args:
parent (str): Required. The agent to create a intent for.
Format: ``projects/<Project ID>/agent``.
intent (Union[dict, ~google.cloud.dialogflow_v2.types.Intent]): Required. The intent to create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.Intent`
language_code (str): Optional. The language of training phrases, parameters and rich messages
defined in ``intent``. If not specified, the agent's default language is
used. [More than a dozen
languages](https://dialogflow.com/docs/reference/language) are supported.
Note: languages must be enabled in the agent, before they can be used.
intent_view (~google.cloud.dialogflow_v2.types.IntentView): Optional. The resource view to apply to the returned intent.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2.types.Intent` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'create_intent' not in self._inner_api_calls:
self._inner_api_calls[
'create_intent'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_intent,
default_retry=self._method_configs['CreateIntent'].retry,
default_timeout=self._method_configs['CreateIntent']
.timeout,
client_info=self._client_info,
)
request = intent_pb2.CreateIntentRequest(
parent=parent,
intent=intent,
language_code=language_code,
intent_view=intent_view,
)
return self._inner_api_calls['create_intent'](
request, retry=retry, timeout=timeout, metadata=metadata) | Creates an intent in the specified agent.
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.IntentsClient()
>>>
>>> parent = client.project_agent_path('[PROJECT]')
>>>
>>> # TODO: Initialize ``intent``:
>>> intent = {}
>>>
>>> response = client.create_intent(parent, intent)
Args:
parent (str): Required. The agent to create a intent for.
Format: ``projects/<Project ID>/agent``.
intent (Union[dict, ~google.cloud.dialogflow_v2.types.Intent]): Required. The intent to create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.Intent`
language_code (str): Optional. The language of training phrases, parameters and rich messages
defined in ``intent``. If not specified, the agent's default language is
used. [More than a dozen
languages](https://dialogflow.com/docs/reference/language) are supported.
Note: languages must be enabled in the agent, before they can be used.
intent_view (~google.cloud.dialogflow_v2.types.IntentView): Optional. The resource view to apply to the returned intent.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2.types.Intent` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. |
def from_bytes(OverwinterTx, byte_string):
'''
byte-like -> OverwinterTx
'''
header = byte_string[0:4]
group_id = byte_string[4:8]
if header != b'\x03\x00\x00\x80' or group_id != b'\x70\x82\xc4\x03':
raise ValueError(
'Bad header or group ID. Expected {} and {}. Got: {} and {}'
.format(b'\x03\x00\x00\x80'.hex(),
b'\x70\x82\xc4\x03'.hex(),
header.hex(),
group_id.hex()))
tx_ins = []
tx_ins_num = shared.VarInt.from_bytes(byte_string[8:])
current = 8 + len(tx_ins_num)
for _ in range(tx_ins_num.number):
tx_in = TxIn.from_bytes(byte_string[current:])
current += len(tx_in)
tx_ins.append(tx_in)
tx_outs = []
tx_outs_num = shared.VarInt.from_bytes(byte_string[current:])
current += len(tx_outs_num)
for _ in range(tx_outs_num.number):
tx_out = TxOut.from_bytes(byte_string[current:])
current += len(tx_out)
tx_outs.append(tx_out)
lock_time = byte_string[current:current + 4]
current += 4
expiry_height = byte_string[current:current + 4]
current += 4
if current == len(byte_string):
# No joinsplits
tx_joinsplits = tuple()
joinsplit_pubkey = None
joinsplit_sig = None
else:
tx_joinsplits = []
tx_joinsplits_num = shared.VarInt.from_bytes(byte_string[current:])
current += len(tx_outs_num)
for _ in range(tx_joinsplits_num.number):
tx_joinsplit = z.SproutJoinsplit.from_bytes(
byte_string[current:])
current += len(tx_joinsplit)
tx_joinsplits.append(tx_joinsplit)
joinsplit_pubkey = byte_string[current:current + 32]
current += 32
joinsplit_sig = byte_string[current:current + 64]
return OverwinterTx(
tx_ins=tx_ins,
tx_outs=tx_outs,
lock_time=lock_time,
expiry_height=expiry_height,
tx_joinsplits=tx_joinsplits,
joinsplit_pubkey=joinsplit_pubkey,
joinsplit_sig=joinsplit_sig) | byte-like -> OverwinterTx |
def add_to_stage(self, paths):
"""Stage given files
:param paths:
:return:
"""
cmd = self._command.add(paths)
(code, stdout, stderr) = self._exec(cmd)
if code:
raise errors.VCSError('Can\'t add paths to VCS. Process exited with code %d and message: %s' % (
code, stderr + stdout)) | Stage given files
:param paths:
:return: |
def convert_runsummary_to_json(
df, comment='Uploaded via km3pipe.StreamDS', prefix='TEST_'
):
"""Convert a Pandas DataFrame with runsummary to JSON for DB upload"""
data_field = []
comment += ", by {}".format(getpass.getuser())
for det_id, det_data in df.groupby('det_id'):
runs_field = []
data_field.append({"DetectorId": det_id, "Runs": runs_field})
for run, run_data in det_data.groupby('run'):
parameters_field = []
runs_field.append({
"Run": int(run),
"Parameters": parameters_field
})
parameter_dict = {}
for row in run_data.itertuples():
for parameter_name in run_data.columns:
if parameter_name in REQUIRED_COLUMNS:
continue
if parameter_name not in parameter_dict:
entry = {'Name': prefix + parameter_name, 'Data': []}
parameter_dict[parameter_name] = entry
data_value = getattr(row, parameter_name)
try:
data_value = float(data_value)
except ValueError as e:
log.critical("Data values has to be floats!")
raise ValueError(e)
value = {'S': str(getattr(row, 'source')), 'D': data_value}
parameter_dict[parameter_name]['Data'].append(value)
for parameter_data in parameter_dict.values():
parameters_field.append(parameter_data)
data_to_upload = {"Comment": comment, "Data": data_field}
file_data_to_upload = json.dumps(data_to_upload)
return file_data_to_upload | Convert a Pandas DataFrame with runsummary to JSON for DB upload |
def add_method(obj, func, name=None):
"""Adds an instance method to an object."""
if name is None:
name = func.__name__
if sys.version_info < (3,):
method = types.MethodType(func, obj, obj.__class__)
else:
method = types.MethodType(func, obj)
setattr(obj, name, method) | Adds an instance method to an object. |
def _histogram_move_keys_by_game(sess, ds, batch_size=8*1024):
"""Given dataset of key names, return histogram of moves/game.
Move counts are written by the game players, so
this is mostly useful for repair or backfill.
Args:
sess: TF session
ds: TF dataset containing game move keys.
batch_size: performance tuning parameter
"""
ds = ds.batch(batch_size)
# Turns 'g_0000001234_m_133' into 'g_0000001234'
ds = ds.map(lambda x: tf.strings.substr(x, 0, 12))
iterator = ds.make_initializable_iterator()
sess.run(iterator.initializer)
get_next = iterator.get_next()
h = collections.Counter()
try:
while True:
h.update(sess.run(get_next))
except tf.errors.OutOfRangeError:
pass
# NOTE: Cannot be truly sure the count is right till the end.
return h | Given dataset of key names, return histogram of moves/game.
Move counts are written by the game players, so
this is mostly useful for repair or backfill.
Args:
sess: TF session
ds: TF dataset containing game move keys.
batch_size: performance tuning parameter |
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = X.map(_document_frequency).sum()
tfs = X.map(lambda x: np.asarray(x.sum(axis=0))).sum().ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return kept_indices, removed_terms | Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features. |
def _parse_args(self,freqsAngles=True,_firstFlip=False,*args):
"""Helper function to parse the arguments to the __call__ and actionsFreqsAngles functions"""
from galpy.orbit import Orbit
RasOrbit= False
integrated= True #whether the orbit was already integrated when given
if len(args) == 5 or len(args) == 3: #pragma: no cover
raise IOError("Must specify phi for actionAngleIsochroneApprox")
if len(args) == 6 or len(args) == 4:
if len(args) == 6:
R,vR,vT, z, vz, phi= args
else:
R,vR,vT, phi= args
z, vz= 0., 0.
if isinstance(R,float):
os= [Orbit([R,vR,vT,z,vz,phi])]
RasOrbit= True
integrated= False
elif len(R.shape) == 1: #not integrated yet
os= [Orbit([R[ii],vR[ii],vT[ii],z[ii],vz[ii],phi[ii]]) for ii in range(R.shape[0])]
RasOrbit= True
integrated= False
if isinstance(args[0],Orbit) \
or (isinstance(args[0],list) and isinstance(args[0][0],Orbit)) \
or RasOrbit:
if RasOrbit:
pass
elif not isinstance(args[0],list):
os= [args[0]]
if len(os[0]._orb.vxvv) == 3 or len(os[0]._orb.vxvv) == 5: #pragma: no cover
raise IOError("Must specify phi for actionAngleIsochroneApprox")
else:
os= args[0]
if len(os[0]._orb.vxvv) == 3 or len(os[0]._orb.vxvv) == 5: #pragma: no cover
raise IOError("Must specify phi for actionAngleIsochroneApprox")
self._check_consistent_units_orbitInput(os[0])
if not hasattr(os[0]._orb,'orbit'): #not integrated yet
if _firstFlip:
for o in os:
o._orb.vxvv[1]= -o._orb.vxvv[1]
o._orb.vxvv[2]= -o._orb.vxvv[2]
o._orb.vxvv[4]= -o._orb.vxvv[4]
[o.integrate(self._tsJ,pot=self._pot,
method=self._integrate_method,
dt=self._integrate_dt) for o in os]
if _firstFlip:
for o in os:
o._orb.vxvv[1]= -o._orb.vxvv[1]
o._orb.vxvv[2]= -o._orb.vxvv[2]
o._orb.vxvv[4]= -o._orb.vxvv[4]
o._orb.orbit[:,1]= -o._orb.orbit[:,1]
o._orb.orbit[:,2]= -o._orb.orbit[:,2]
o._orb.orbit[:,4]= -o._orb.orbit[:,4]
integrated= False
ntJ= os[0].getOrbit().shape[0]
no= len(os)
R= nu.empty((no,ntJ))
vR= nu.empty((no,ntJ))
vT= nu.empty((no,ntJ))
z= nu.zeros((no,ntJ))+10.**-7. #To avoid numpy warnings for
vz= nu.zeros((no,ntJ))+10.**-7. #planarOrbits
phi= nu.empty((no,ntJ))
for ii in range(len(os)):
this_orbit= os[ii].getOrbit()
R[ii,:]= this_orbit[:,0]
vR[ii,:]= this_orbit[:,1]
vT[ii,:]= this_orbit[:,2]
if this_orbit.shape[1] == 6:
z[ii,:]= this_orbit[:,3]
vz[ii,:]= this_orbit[:,4]
phi[ii,:]= this_orbit[:,5]
else:
phi[ii,:]= this_orbit[:,3]
if freqsAngles and not integrated: #also integrate backwards in time, such that the requested point is not at the edge
no= R.shape[0]
nt= R.shape[1]
oR= nu.empty((no,2*nt-1))
ovR= nu.empty((no,2*nt-1))
ovT= nu.empty((no,2*nt-1))
oz= nu.zeros((no,2*nt-1))+10.**-7. #To avoid numpy warnings for
ovz= nu.zeros((no,2*nt-1))+10.**-7. #planarOrbits
ophi= nu.empty((no,2*nt-1))
if _firstFlip:
oR[:,:nt]= R[:,::-1]
ovR[:,:nt]= vR[:,::-1]
ovT[:,:nt]= vT[:,::-1]
oz[:,:nt]= z[:,::-1]
ovz[:,:nt]= vz[:,::-1]
ophi[:,:nt]= phi[:,::-1]
else:
oR[:,nt-1:]= R
ovR[:,nt-1:]= vR
ovT[:,nt-1:]= vT
oz[:,nt-1:]= z
ovz[:,nt-1:]= vz
ophi[:,nt-1:]= phi
#load orbits
if _firstFlip:
os= [Orbit([R[ii,0],vR[ii,0],vT[ii,0],z[ii,0],vz[ii,0],phi[ii,0]]) for ii in range(R.shape[0])]
else:
os= [Orbit([R[ii,0],-vR[ii,0],-vT[ii,0],z[ii,0],-vz[ii,0],phi[ii,0]]) for ii in range(R.shape[0])]
#integrate orbits
[o.integrate(self._tsJ,pot=self._pot,
method=self._integrate_method,
dt=self._integrate_dt) for o in os]
#extract phase-space points along the orbit
ts= self._tsJ
if _firstFlip:
for ii in range(no):
oR[ii,nt:]= os[ii].R(ts[1:]) #drop t=0, which we have
ovR[ii,nt:]= os[ii].vR(ts[1:]) #already
ovT[ii,nt:]= os[ii].vT(ts[1:]) # reverse, such that
if os[ii].getOrbit().shape[1] == 6:
oz[ii,nt:]= os[ii].z(ts[1:]) #everything is in the
ovz[ii,nt:]= os[ii].vz(ts[1:]) #right order
ophi[ii,nt:]= os[ii].phi(ts[1:]) #!
else:
for ii in range(no):
oR[ii,:nt-1]= os[ii].R(ts[1:])[::-1] #drop t=0, which we have
ovR[ii,:nt-1]= -os[ii].vR(ts[1:])[::-1] #already
ovT[ii,:nt-1]= -os[ii].vT(ts[1:])[::-1] # reverse, such that
if os[ii].getOrbit().shape[1] == 6:
oz[ii,:nt-1]= os[ii].z(ts[1:])[::-1] #everything is in the
ovz[ii,:nt-1]= -os[ii].vz(ts[1:])[::-1] #right order
ophi[ii,:nt-1]= os[ii].phi(ts[1:])[::-1] #!
return (oR,ovR,ovT,oz,ovz,ophi)
else:
return (R,vR,vT,z,vz,phi) | Helper function to parse the arguments to the __call__ and actionsFreqsAngles functions |
def get_user_presence(self, userid):
''' check on presence of a user '''
response, status_code = self.__pod__.Presence.get_v2_user_uid_presence(
sessionToken=self.__session__,
uid=userid
).result()
self.logger.debug('%s: %s' % (status_code, response))
return status_code, response | check on presence of a user |
def get_child_by_name(parent, name):
"""
Iterate through a gtk container, `parent`,
and return the widget with the name `name`.
"""
# http://stackoverflow.com/questions/2072976/access-to-widget-in-gtk
def iterate_children(widget, name):
if widget.get_name() == name:
return widget
try:
for w in widget.get_children():
result = iterate_children(w, name)
if result is not None:
return result
else:
continue
except AttributeError:
pass
return iterate_children(parent, name) | Iterate through a gtk container, `parent`,
and return the widget with the name `name`. |
def add_item_metadata(self, handle, key, value):
"""Store the given key:value pair for the item associated with handle.
:param handle: handle for accessing an item before the dataset is
frozen
:param key: metadata key
:param value: metadata value
"""
_mkdir_if_missing(self._metadata_fragments_abspath)
prefix = self._handle_to_fragment_absprefixpath(handle)
fpath = prefix + '.{}.json'.format(key)
_put_obj(fpath, value) | Store the given key:value pair for the item associated with handle.
:param handle: handle for accessing an item before the dataset is
frozen
:param key: metadata key
:param value: metadata value |
def create_embeded_pkcs7_signature(data, cert, key):
"""
Creates an embeded ("nodetached") pkcs7 signature.
This is equivalent to the output of::
openssl smime -sign -signer cert -inkey key -outform DER -nodetach < data
:type data: bytes
:type cert: str
:type key: str
""" # noqa: E501
assert isinstance(data, bytes)
assert isinstance(cert, str)
try:
pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key)
signcert = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
except crypto.Error as e:
raise exceptions.CorruptCertificate from e
bio_in = crypto._new_mem_buf(data)
pkcs7 = crypto._lib.PKCS7_sign(
signcert._x509, pkey._pkey, crypto._ffi.NULL, bio_in, PKCS7_NOSIGS
)
bio_out = crypto._new_mem_buf()
crypto._lib.i2d_PKCS7_bio(bio_out, pkcs7)
signed_data = crypto._bio_to_string(bio_out)
return signed_data | Creates an embeded ("nodetached") pkcs7 signature.
This is equivalent to the output of::
openssl smime -sign -signer cert -inkey key -outform DER -nodetach < data
:type data: bytes
:type cert: str
:type key: str |
def convert_to_consumable_types (self, project, name, prop_set, sources, only_one=False):
""" Attempts to convert 'source' to the types that this generator can
handle. The intention is to produce the set of targets can should be
used when generator is run.
only_one: convert 'source' to only one of source types
if there's more that one possibility, report an
error.
Returns a pair:
consumed: all targets that can be consumed.
"""
if __debug__:
from .targets import ProjectTarget
assert isinstance(name, basestring) or name is None
assert isinstance(project, ProjectTarget)
assert isinstance(prop_set, property_set.PropertySet)
assert is_iterable_typed(sources, virtual_target.VirtualTarget)
assert isinstance(only_one, bool)
consumed = []
missing_types = []
if len (sources) > 1:
# Don't know how to handle several sources yet. Just try
# to pass the request to other generator
missing_types = self.source_types_
else:
(c, m) = self.consume_directly (sources [0])
consumed += c
missing_types += m
# No need to search for transformation if
# some source type has consumed source and
# no more source types are needed.
if only_one and consumed:
missing_types = []
#TODO: we should check that only one source type
#if create of 'only_one' is true.
# TODO: consider if consuned/bypassed separation should
# be done by 'construct_types'.
if missing_types:
transformed = construct_types (project, name, missing_types, prop_set, sources)
# Add targets of right type to 'consumed'. Add others to
# 'bypassed'. The 'generators.construct' rule has done
# its best to convert everything to the required type.
# There's no need to rerun it on targets of different types.
# NOTE: ignoring usage requirements
for t in transformed[1]:
if t.type() in missing_types:
consumed.append(t)
consumed = unique(consumed)
return consumed | Attempts to convert 'source' to the types that this generator can
handle. The intention is to produce the set of targets can should be
used when generator is run.
only_one: convert 'source' to only one of source types
if there's more that one possibility, report an
error.
Returns a pair:
consumed: all targets that can be consumed. |
def set_host_finished(self, scan_id, target, host):
""" Add the host in a list of finished hosts """
finished_hosts = self.scans_table[scan_id]['finished_hosts']
finished_hosts[target].extend(host)
self.scans_table[scan_id]['finished_hosts'] = finished_hosts | Add the host in a list of finished hosts |
def dist(src, tar, method=sim_levenshtein):
"""Return a distance between two strings.
This is a generalized function for calling other distance functions.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
method : function
Specifies the similarity metric (:py:func:`sim_levenshtein` by default)
-- Note that this takes a similarity metric function, not a distance
metric function.
Returns
-------
float
Distance according to the specified function
Raises
------
AttributeError
Unknown distance function
Examples
--------
>>> round(dist('cat', 'hat'), 12)
0.333333333333
>>> round(dist('Niall', 'Neil'), 12)
0.6
>>> dist('aluminum', 'Catalan')
0.875
>>> dist('ATCG', 'TAGC')
0.75
"""
if callable(method):
return 1 - method(src, tar)
else:
raise AttributeError('Unknown distance function: ' + str(method)) | Return a distance between two strings.
This is a generalized function for calling other distance functions.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
method : function
Specifies the similarity metric (:py:func:`sim_levenshtein` by default)
-- Note that this takes a similarity metric function, not a distance
metric function.
Returns
-------
float
Distance according to the specified function
Raises
------
AttributeError
Unknown distance function
Examples
--------
>>> round(dist('cat', 'hat'), 12)
0.333333333333
>>> round(dist('Niall', 'Neil'), 12)
0.6
>>> dist('aluminum', 'Catalan')
0.875
>>> dist('ATCG', 'TAGC')
0.75 |
def _select_index(self, row, col):
"""Change the selection index, and make sure it stays in the right range
A little more complicated than just dooing modulo the number of row columns
to be sure to cycle through all element.
horizontaly, the element are maped like this :
to r <-- a b c d e f --> to g
to f <-- g h i j k l --> to m
to l <-- m n o p q r --> to a
and vertically
a d g j m p
b e h k n q
c f i l o r
"""
nr, nc = self._size
nr = nr-1
nc = nc-1
# case 1
if (row > nr and col >= nc) or (row >= nr and col > nc):
self._select_index(0, 0)
# case 2
elif (row <= 0 and col < 0) or (row < 0 and col <= 0):
self._select_index(nr, nc)
# case 3
elif row > nr :
self._select_index(0, col+1)
# case 4
elif row < 0 :
self._select_index(nr, col-1)
# case 5
elif col > nc :
self._select_index(row+1, 0)
# case 6
elif col < 0 :
self._select_index(row-1, nc)
elif 0 <= row and row <= nr and 0 <= col and col <= nc :
self._index = (row, col)
else :
raise NotImplementedError("you'r trying to go where no completion\
have gone before : %d:%d (%d:%d)"%(row, col, nr, nc) ) | Change the selection index, and make sure it stays in the right range
A little more complicated than just dooing modulo the number of row columns
to be sure to cycle through all element.
horizontaly, the element are maped like this :
to r <-- a b c d e f --> to g
to f <-- g h i j k l --> to m
to l <-- m n o p q r --> to a
and vertically
a d g j m p
b e h k n q
c f i l o r |
def addFilter(self, filterMethod=FILTER_METHOD_AND, **kwargs):
'''
addFilter - Add a filter to this query.
@param filterMethod <str> - The filter method to use (AND or OR), default: 'AND'
@param additional args - Filter arguments. @see QueryableListBase.filter
@raises ValueError if filterMethod is not one of known methods.
'''
filterMethod = filterMethod.upper()
if filterMethod not in FILTER_METHODS:
raise ValueError('Unknown filter method, %s. Must be one of: %s' %(str(filterMethod), repr(FILTER_METHODS)))
self.filters.append((filterMethod, kwargs)) | addFilter - Add a filter to this query.
@param filterMethod <str> - The filter method to use (AND or OR), default: 'AND'
@param additional args - Filter arguments. @see QueryableListBase.filter
@raises ValueError if filterMethod is not one of known methods. |
def lbd_to_XYZ_jac(*args,**kwargs):
"""
NAME:
lbd_to_XYZ_jac
PURPOSE:
calculate the Jacobian of the Galactic spherical coordinates to Galactic rectangular coordinates transformation
INPUT:
l,b,D- Galactic spherical coordinates
vlos,pmll,pmbb- Galactic spherical velocities (some as proper motions)
if 6 inputs: l,b,D,vlos,pmll x cos(b),pmbb
if 3: l,b,D
degree= (False) if True, l and b are in degrees
OUTPUT:
jacobian
HISTORY:
2013-12-09 - Written - Bovy (IAS)
"""
out= sc.zeros((6,6))
if len(args) == 3:
l,b,D= args
vlos, pmll, pmbb= 0., 0., 0.
elif len(args) == 6:
l,b,D,vlos,pmll,pmbb= args
if kwargs.get('degree',False):
l*= _DEGTORAD
b*= _DEGTORAD
cl= sc.cos(l)
sl= sc.sin(l)
cb= sc.cos(b)
sb= sc.sin(b)
out[0,0]= -D*cb*sl
out[0,1]= -D*sb*cl
out[0,2]= cb*cl
out[1,0]= D*cb*cl
out[1,1]= -D*sb*sl
out[1,2]= cb*sl
out[2,1]= D*cb
out[2,2]= sb
if len(args) == 3:
if kwargs.get('degree',False):
out[:,0]*= _DEGTORAD
out[:,1]*= _DEGTORAD
return out[:3,:3]
out[3,0]= -sl*cb*vlos-cl*_K*D*pmll+sb*sl*_K*D*pmbb
out[3,1]= -cl*sb*vlos-cb*cl*_K*D*pmbb
out[3,2]= -sl*_K*pmll-sb*cl*_K*pmbb
out[3,3]= cl*cb
out[3,4]= -sl*_K*D
out[3,5]= -cl*sb*_K*D
out[4,0]= cl*cb*vlos-sl*_K*D*pmll-cl*sb*_K*D*pmbb
out[4,1]= -sl*sb*vlos-sl*cb*_K*D*pmbb
out[4,2]= cl*_K*pmll-sl*sb*_K*pmbb
out[4,3]= sl*cb
out[4,4]= cl*_K*D
out[4,5]= -sl*sb*_K*D
out[5,1]= cb*vlos-sb*_K*D*pmbb
out[5,2]= cb*_K*pmbb
out[5,3]= sb
out[5,5]= cb*_K*D
if kwargs.get('degree',False):
out[:,0]*= _DEGTORAD
out[:,1]*= _DEGTORAD
return out | NAME:
lbd_to_XYZ_jac
PURPOSE:
calculate the Jacobian of the Galactic spherical coordinates to Galactic rectangular coordinates transformation
INPUT:
l,b,D- Galactic spherical coordinates
vlos,pmll,pmbb- Galactic spherical velocities (some as proper motions)
if 6 inputs: l,b,D,vlos,pmll x cos(b),pmbb
if 3: l,b,D
degree= (False) if True, l and b are in degrees
OUTPUT:
jacobian
HISTORY:
2013-12-09 - Written - Bovy (IAS) |
def get_alert(thing_name, key, session=None):
"""Set an alert on a thing with the given condition
"""
return _request('get', '/get/alert/for/{0}'.format(thing_name), params={'key': key}, session=session) | Set an alert on a thing with the given condition |
def show_lbaas_healthmonitor(self, lbaas_healthmonitor, **_params):
"""Fetches information for a lbaas_healthmonitor."""
return self.get(self.lbaas_healthmonitor_path % (lbaas_healthmonitor),
params=_params) | Fetches information for a lbaas_healthmonitor. |
def handle_url_build_error(self, error: Exception, endpoint: str, values: dict) -> str:
"""Handle a build error.
Ideally this will return a valid url given the error endpoint
and values.
"""
for handler in self.url_build_error_handlers:
result = handler(error, endpoint, values)
if result is not None:
return result
raise error | Handle a build error.
Ideally this will return a valid url given the error endpoint
and values. |
def tdSensorValue(self, protocol, model, sid, datatype):
"""Get the sensor value for a given sensor.
:return: a dict with the keys: value, timestamp.
"""
value = create_string_buffer(20)
timestamp = c_int()
self._lib.tdSensorValue(protocol, model, sid, datatype,
value, sizeof(value), byref(timestamp))
return {'value': self._to_str(value), 'timestamp': timestamp.value} | Get the sensor value for a given sensor.
:return: a dict with the keys: value, timestamp. |
def robust_outer_product(vec_1, vec_2):
"""
Calculates a 'robust' outer product of two vectors that may or may not
contain very small values.
Parameters
----------
vec_1 : 1D ndarray
vec_2 : 1D ndarray
Returns
-------
outer_prod : 2D ndarray. The outer product of vec_1 and vec_2
"""
mantissa_1, exponents_1 = np.frexp(vec_1)
mantissa_2, exponents_2 = np.frexp(vec_2)
new_mantissas = mantissa_1[None, :] * mantissa_2[:, None]
new_exponents = exponents_1[None, :] + exponents_2[:, None]
return new_mantissas * np.exp2(new_exponents) | Calculates a 'robust' outer product of two vectors that may or may not
contain very small values.
Parameters
----------
vec_1 : 1D ndarray
vec_2 : 1D ndarray
Returns
-------
outer_prod : 2D ndarray. The outer product of vec_1 and vec_2 |
def sort_tiers(self, key=lambda x: x.name):
"""Sort the tiers given the key. Example key functions:
Sort according to the tiername in a list:
``lambda x: ['name1', 'name2' ... 'namen'].index(x.name)``.
Sort according to the number of annotations:
``lambda x: len(list(x.get_intervals()))``
:param func key: A key function. Default sorts alphabetically.
"""
self.tiers.sort(key=key) | Sort the tiers given the key. Example key functions:
Sort according to the tiername in a list:
``lambda x: ['name1', 'name2' ... 'namen'].index(x.name)``.
Sort according to the number of annotations:
``lambda x: len(list(x.get_intervals()))``
:param func key: A key function. Default sorts alphabetically. |
def business_rule_notification_is_blocked(self, hosts, services):
# pylint: disable=too-many-locals
"""Process business rule notifications behaviour. If all problems have
been acknowledged, no notifications should be sent if state is not OK.
By default, downtimes are ignored, unless explicitly told to be treated
as acknowledgements through with the business_rule_downtime_as_ack set.
:return: True if all source problem are acknowledged, otherwise False
:rtype: bool
"""
# Walks through problems to check if all items in non ok are
# acknowledged or in downtime period.
acknowledged = 0
for src_prob_id in self.source_problems:
if src_prob_id in hosts:
src_prob = hosts[src_prob_id]
else:
src_prob = services[src_prob_id]
if src_prob.last_hard_state_id != 0:
if src_prob.problem_has_been_acknowledged:
# Problem hast been acknowledged
acknowledged += 1
# Only check problems under downtime if we are
# explicitly told to do so.
elif self.business_rule_downtime_as_ack is True:
if src_prob.scheduled_downtime_depth > 0:
# Problem is under downtime, and downtimes should be
# treated as acknowledgements
acknowledged += 1
elif hasattr(src_prob, "host") and \
hosts[src_prob.host].scheduled_downtime_depth > 0:
# Host is under downtime, and downtimes should be
# treated as acknowledgements
acknowledged += 1
return acknowledged == len(self.source_problems) | Process business rule notifications behaviour. If all problems have
been acknowledged, no notifications should be sent if state is not OK.
By default, downtimes are ignored, unless explicitly told to be treated
as acknowledgements through with the business_rule_downtime_as_ack set.
:return: True if all source problem are acknowledged, otherwise False
:rtype: bool |
def generate_single_simulation(self, x):
"""
Generate a single SSA simulation
:param x: an integer to reset the random seed. If None, the initial random number generator is used
:return: a list of :class:`~means.simulation.Trajectory` one per species in the problem
:rtype: list[:class:`~means.simulation.Trajectory`]
"""
#reset random seed
if x:
self.__rng = np.random.RandomState(x)
# perform one stochastic simulation
time_points, species_over_time = self._gssa(self.__initial_conditions, self.__t_max)
# build descriptors for first order raw moments aka expectations (e.g. [1, 0, 0], [0, 1, 0] and [0, 0, 1])
descriptors = []
for i, s in enumerate(self.__species):
row = [0] * len(self.__species)
row[i] = 1
descriptors.append(Moment(row, s))
# build trajectories
trajectories = [Trajectory(time_points, spot, desc) for
spot, desc in zip(species_over_time, descriptors)]
return trajectories | Generate a single SSA simulation
:param x: an integer to reset the random seed. If None, the initial random number generator is used
:return: a list of :class:`~means.simulation.Trajectory` one per species in the problem
:rtype: list[:class:`~means.simulation.Trajectory`] |
def create_untl_xml_subelement(parent, element, prefix=''):
"""Create a UNTL XML subelement."""
subelement = SubElement(parent, prefix + element.tag)
if element.content is not None:
subelement.text = element.content
if element.qualifier is not None:
subelement.attrib["qualifier"] = element.qualifier
if element.children > 0:
for child in element.children:
SubElement(subelement, prefix + child.tag).text = child.content
else:
subelement.text = element.content
return subelement | Create a UNTL XML subelement. |
def _bundle_generic(bfile, addhelper, fmt, reffmt, data_dir):
'''
Loop over all basis sets and add data to an archive
Parameters
----------
bfile : object
An object that gets passed through to the addhelper function
addhelper : function
A function that takes bfile and adds data to the bfile
fmt : str
Format of the basis set to create
reffmt : str
Format to use for the references
data_dir : str
Data directory with all the basis set information.
Returns
-------
None
'''
ext = converters.get_format_extension(fmt)
refext = refconverters.get_format_extension(reffmt)
subdir = 'basis_set_bundle-' + fmt + '-' + reffmt
readme_path = os.path.join(subdir, 'README.txt')
addhelper(bfile, readme_path, _create_readme(fmt, reffmt))
for name, data, notes in _basis_data_iter(fmt, reffmt, data_dir):
for ver, verdata in data.items():
filename = misc.basis_name_to_filename(name)
basis_filepath = os.path.join(subdir, '{}.{}{}'.format(filename, ver, ext))
ref_filename = os.path.join(subdir, '{}.{}.ref{}'.format(filename, ver, refext))
bsdata, refdata = verdata
addhelper(bfile, basis_filepath, bsdata)
addhelper(bfile, ref_filename, refdata)
if len(notes) > 0:
notes_filename = os.path.join(subdir, filename + '.notes')
addhelper(bfile, notes_filename, notes)
for fam in api.get_families(data_dir):
fam_notes = api.get_family_notes(fam, data_dir)
if len(fam_notes) > 0:
fam_notes_filename = os.path.join(subdir, fam + '.family_notes')
addhelper(bfile, fam_notes_filename, fam_notes) | Loop over all basis sets and add data to an archive
Parameters
----------
bfile : object
An object that gets passed through to the addhelper function
addhelper : function
A function that takes bfile and adds data to the bfile
fmt : str
Format of the basis set to create
reffmt : str
Format to use for the references
data_dir : str
Data directory with all the basis set information.
Returns
-------
None |
def snapshot(self):
"""Snapshot current state."""
self._snapshot = {
'muted': self.muted,
'volume': self.volume,
'stream': self.stream
}
_LOGGER.info('took snapshot of current state of %s', self.friendly_name) | Snapshot current state. |
def transform_q(q, query):
"""
Replaces (lookup, value) children of Q with equivalent WhereNode objects.
This is a pre-prep of our Q object, ready for later rendering into SQL.
Modifies in place, no need to return.
(We could do this in render_q, but then we'd have to pass the Query object
from ConditionalAggregate down into SQLConditionalAggregate, which Django
avoids to do in their API so we try and follow their lead here)
"""
for i, child in enumerate(q.children):
if isinstance(child, Q):
transform_q(child, query)
else:
# child is (lookup, value) tuple
where_node = query.build_filter(child)
q.children[i] = where_node | Replaces (lookup, value) children of Q with equivalent WhereNode objects.
This is a pre-prep of our Q object, ready for later rendering into SQL.
Modifies in place, no need to return.
(We could do this in render_q, but then we'd have to pass the Query object
from ConditionalAggregate down into SQLConditionalAggregate, which Django
avoids to do in their API so we try and follow their lead here) |
def migrate_passwords_to_leader_storage(self, excludes=None):
"""Migrate any passwords storage on disk to leader storage."""
if not is_leader():
log("Skipping password migration as not the lead unit",
level=DEBUG)
return
dirname = os.path.dirname(self.root_passwd_file_template)
path = os.path.join(dirname, '*.passwd')
for f in glob.glob(path):
if excludes and f in excludes:
log("Excluding %s from leader storage migration" % (f),
level=DEBUG)
continue
key = os.path.basename(f)
with open(f, 'r') as passwd:
_value = passwd.read().strip()
try:
leader_set(settings={key: _value})
if self.delete_ondisk_passwd_file:
os.unlink(f)
except ValueError:
# NOTE cluster relation not yet ready - skip for now
pass | Migrate any passwords storage on disk to leader storage. |
def main(sample_id, assembly_file, minsize):
"""Main executor of the process_mapping template.
Parameters
----------
sample_id : str
Sample Identification string.
assembly: str
Path to the fatsa file generated by the assembler.
minsize: str
Min contig size to be considered a complete ORF
"""
logger.info("Starting assembly file processing")
warnings = []
fails = ""
# Parse the spades assembly file and perform the first filtering.
logger.info("Starting assembly parsing")
assembly_obj = Assembly(assembly_file, 0, 0,
sample_id, minsize)
if 'spades' in assembly_file:
assembler = "SPAdes"
else:
assembler = "MEGAHIT"
with open(".warnings", "w") as warn_fh:
t_80 = int(minsize) * 0.8
t_150 = int(minsize) * 1.5
# Check if assembly size of the first assembly is lower than 80% of the
# estimated genome size - DENV ORF has min 10k nt. If True, redo the filtering without the
# k-mer coverage filter
assembly_len = assembly_obj.get_assembly_length()
logger.debug("Checking assembly length: {}".format(assembly_len))
if assembly_obj.nORFs < 1:
warn_msg = "No complete ORFs found."
warn_fh.write(warn_msg)
fails = warn_msg
if assembly_len < t_80:
logger.warning("Assembly size ({}) smaller than the minimum "
"threshold of 80% of expected genome size. "
"Applying contig filters without the k-mer "
"coverage filter".format(assembly_len))
assembly_len = assembly_obj.get_assembly_length()
logger.debug("Checking updated assembly length: "
"{}".format(assembly_len))
if assembly_len < t_80:
warn_msg = "Assembly size smaller than the minimum" \
" threshold of 80% of expected genome size: {}".format(
assembly_len)
logger.warning(warn_msg)
warn_fh.write(warn_msg)
fails = warn_msg
if assembly_len > t_150:
warn_msg = "Assembly size ({}) larger than the maximum" \
" threshold of 150% of expected genome size.".format(
assembly_len)
logger.warning(warn_msg)
warn_fh.write(warn_msg)
fails = warn_msg
# Write json report
with open(".report.json", "w") as json_report:
json_dic = {
"tableRow": [{
"sample": sample_id,
"data": [
{"header": "Contigs ({})".format(assembler),
"value": len(assembly_obj.contigs),
"table": "assembly",
"columnBar": True},
{"header": "Assembled BP ({})".format(assembler),
"value": assembly_len,
"table": "assembly",
"columnBar": True},
{"header": "ORFs",
"value": assembly_obj.nORFs,
"table": "assembly",
"columnBar":False}
]
}],
}
if warnings:
json_dic["warnings"] = [{
"sample": sample_id,
"table": "assembly",
"value": warnings
}]
if fails:
json_dic["fail"] = [{
"sample": sample_id,
"table": "assembly",
"value": [fails]
}]
json_report.write(json.dumps(json_dic, separators=(",", ":")))
with open(".status", "w") as status_fh:
status_fh.write("pass") | Main executor of the process_mapping template.
Parameters
----------
sample_id : str
Sample Identification string.
assembly: str
Path to the fatsa file generated by the assembler.
minsize: str
Min contig size to be considered a complete ORF |
def _connect(self):
"""Try to connect to the database.
Raises:
:exc:`~ConnectionError`: If the connection to the database
fails.
:exc:`~AuthenticationError`: If there is a OperationFailure due to
Authentication failure after connecting to the database.
:exc:`~ConfigurationError`: If there is a ConfigurationError while
connecting to the database.
"""
try:
# FYI: the connection process might raise a
# `ServerSelectionTimeoutError`, that is a subclass of
# `ConnectionFailure`.
# The presence of ca_cert, certfile, keyfile, crlfile implies the
# use of certificates for TLS connectivity.
if self.ca_cert is None or self.certfile is None or \
self.keyfile is None or self.crlfile is None:
client = pymongo.MongoClient(self.host,
self.port,
replicaset=self.replicaset,
serverselectiontimeoutms=self.connection_timeout,
ssl=self.ssl,
**MONGO_OPTS)
if self.login is not None and self.password is not None:
client[self.dbname].authenticate(self.login, self.password)
else:
logger.info('Connecting to MongoDB over TLS/SSL...')
client = pymongo.MongoClient(self.host,
self.port,
replicaset=self.replicaset,
serverselectiontimeoutms=self.connection_timeout,
ssl=self.ssl,
ssl_ca_certs=self.ca_cert,
ssl_certfile=self.certfile,
ssl_keyfile=self.keyfile,
ssl_pem_passphrase=self.keyfile_passphrase,
ssl_crlfile=self.crlfile,
ssl_cert_reqs=CERT_REQUIRED,
**MONGO_OPTS)
if self.login is not None:
client[self.dbname].authenticate(self.login,
mechanism='MONGODB-X509')
return client
except (pymongo.errors.ConnectionFailure,
pymongo.errors.OperationFailure) as exc:
logger.info('Exception in _connect(): {}'.format(exc))
raise ConnectionError(str(exc)) from exc
except pymongo.errors.ConfigurationError as exc:
raise ConfigurationError from exc | Try to connect to the database.
Raises:
:exc:`~ConnectionError`: If the connection to the database
fails.
:exc:`~AuthenticationError`: If there is a OperationFailure due to
Authentication failure after connecting to the database.
:exc:`~ConfigurationError`: If there is a ConfigurationError while
connecting to the database. |
def exception_wrapper(f):
"""Decorator to convert dbus exception to pympris exception."""
@wraps(f)
def wrapper(*args, **kwds):
try:
return f(*args, **kwds)
except dbus.exceptions.DBusException as err:
_args = err.args
raise PyMPRISException(*_args)
return wrapper | Decorator to convert dbus exception to pympris exception. |
def set_affinity_matrix(self, affinity_mat):
"""
Parameters
----------
affinity_mat : sparse matrix (N_obs, N_obs).
The adjacency matrix to input.
"""
affinity_mat = check_array(affinity_mat, accept_sparse=sparse_formats)
if affinity_mat.shape[0] != affinity_mat.shape[1]:
raise ValueError("affinity matrix is not square")
self.affinity_matrix = affinity_mat | Parameters
----------
affinity_mat : sparse matrix (N_obs, N_obs).
The adjacency matrix to input. |
def encrypt(self):
"""
We perform no encryption, we just encode the value as base64 and then
decode it in decrypt().
"""
value = self.parameters.get("Plaintext")
if isinstance(value, six.text_type):
value = value.encode('utf-8')
return json.dumps({"CiphertextBlob": base64.b64encode(value).decode("utf-8"), 'KeyId': 'key_id'}) | We perform no encryption, we just encode the value as base64 and then
decode it in decrypt(). |
def find_additional_rels(self, all_models):
"""Attempts to scan for additional relationship fields for this model based on all of the other models'
structures and relationships.
"""
for model_name, model in iteritems(all_models):
if model_name != self.name:
for field_name in model.field_names:
field = model.fields[field_name]
# if this field type references the current model
if field.field_type == self.name and field.back_populates is not None and \
(isinstance(field, StatikForeignKeyField) or isinstance(field, StatikManyToManyField)):
self.additional_rels[field.back_populates] = {
'to_model': model_name,
'back_populates': field_name,
'secondary': (model_name, field.field_type)
if isinstance(field, StatikManyToManyField) else None
}
logger.debug(
'Additional relationship %s.%s -> %s (%s)',
self.name,
field.back_populates,
model_name,
self.additional_rels[field.back_populates]
) | Attempts to scan for additional relationship fields for this model based on all of the other models'
structures and relationships. |
def get_instance_property(instance, property_name):
"""Retrieves property of an instance, keeps retrying until getting a non-None"""
name = get_name(instance)
while True:
try:
value = getattr(instance, property_name)
if value is not None:
break
print(f"retrieving {property_name} on {name} produced None, retrying")
time.sleep(RETRY_INTERVAL_SEC)
instance.reload()
continue
except Exception as e:
print(f"retrieving {property_name} on {name} failed with {e}, retrying")
time.sleep(RETRY_INTERVAL_SEC)
try:
instance.reload()
except Exception:
pass
continue
return value | Retrieves property of an instance, keeps retrying until getting a non-None |
def memoizedmethod(method):
"""
Decorator that caches method result.
Args:
method (function): Method
Returns:
function: Memoized method.
Notes:
Target method class needs as "_cache" attribute (dict).
It is the case of "ObjectIOBase" and all its subclasses.
"""
method_name = method.__name__
@wraps(method)
def patched(self, *args, **kwargs):
"""Patched method"""
# Gets value from cache
try:
return self._cache[method_name]
# Evaluates and cache value
except KeyError:
result = self._cache[method_name] = method(
self, *args, **kwargs)
return result
return patched | Decorator that caches method result.
Args:
method (function): Method
Returns:
function: Memoized method.
Notes:
Target method class needs as "_cache" attribute (dict).
It is the case of "ObjectIOBase" and all its subclasses. |
def ReadTrigger(self, trigger_link, options=None):
"""Reads a trigger.
:param str trigger_link:
The link to the trigger.
:param dict options:
The request options for the request.
:return:
The read Trigger.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(trigger_link)
trigger_id = base.GetResourceIdOrFullNameFromLink(trigger_link)
return self.Read(path, 'triggers', trigger_id, None, options) | Reads a trigger.
:param str trigger_link:
The link to the trigger.
:param dict options:
The request options for the request.
:return:
The read Trigger.
:rtype:
dict |
def shadow_hash(crypt_salt=None, password=None, algorithm='sha512'):
'''
Generates a salted hash suitable for /etc/shadow.
crypt_salt : None
Salt to be used in the generation of the hash. If one is not
provided, a random salt will be generated.
password : None
Value to be salted and hashed. If one is not provided, a random
password will be generated.
algorithm : sha512
Hash algorithm to use.
CLI Example:
.. code-block:: bash
salt '*' random.shadow_hash 'My5alT' 'MyP@asswd' md5
'''
return salt.utils.pycrypto.gen_hash(crypt_salt, password, algorithm) | Generates a salted hash suitable for /etc/shadow.
crypt_salt : None
Salt to be used in the generation of the hash. If one is not
provided, a random salt will be generated.
password : None
Value to be salted and hashed. If one is not provided, a random
password will be generated.
algorithm : sha512
Hash algorithm to use.
CLI Example:
.. code-block:: bash
salt '*' random.shadow_hash 'My5alT' 'MyP@asswd' md5 |
def check_type_and_values_of_specification_dict(specification_dict,
unique_alternatives):
"""
Verifies that the values of specification_dict have the correct type, have
the correct structure, and have valid values (i.e. are actually in the set
of possible alternatives). Will raise various errors if / when appropriate.
Parameters
----------
specification_dict : OrderedDict.
Keys are a proper subset of the columns in `long_form_df`. Values are
either a list or a single string, `"all_diff"` or `"all_same"`. If a
list, the elements should be:
- single objects that are within the alternative ID column of
`long_form_df`
- lists of objects that are within the alternative ID column of
`long_form_df`. For each single object in the list, a unique
column will be created (i.e. there will be a unique coefficient
for that variable in the corresponding utility equation of the
corresponding alternative). For lists within the
`specification_dict` values, a single column will be created for
all the alternatives within iterable (i.e. there will be one
common coefficient for the variables in the iterable).
unique_alternatives : 1D ndarray.
Should contain the possible alternative id's for this dataset.
Returns
-------
None.
"""
for key in specification_dict:
specification = specification_dict[key]
if isinstance(specification, str):
if specification not in ["all_same", "all_diff"]:
msg = "specification_dict[{}] not in ['all_same', 'all_diff']"
raise ValueError(msg.format(key))
elif isinstance(specification, list):
# Imagine that the specification is [[1, 2], 3]
# group would be [1, 2]
# group_item would be 1 or 2. group_item should never be a list.
for group in specification:
group_is_list = isinstance(group, list)
if group_is_list:
for group_item in group:
if isinstance(group_item, list):
msg = "Wrong structure for specification_dict[{}]"
msg_2 = " Values can be a list of lists of ints,"
msg_3 = " not lists of lists of lists of ints."
total_msg = msg.format(key) + msg_2 + msg_3
raise ValueError(total_msg)
elif group_item not in unique_alternatives:
msg_1 = "{} in {} in specification_dict[{}]"
msg_2 = " is not in long_format[alt_id_col]"
total_msg = (msg_1.format(group_item, group, key) +
msg_2)
raise ValueError(total_msg)
else:
if group not in unique_alternatives:
msg_1 = "{} in specification_dict[{}]"
msg_2 = " is not in long_format[alt_id_col]"
raise ValueError(msg_1.format(group, key) + msg_2)
else:
msg = "specification_dict[{}] must be 'all_same', 'all_diff', or"
msg_2 = " a list."
raise TypeError(msg.format(key) + msg_2)
return None | Verifies that the values of specification_dict have the correct type, have
the correct structure, and have valid values (i.e. are actually in the set
of possible alternatives). Will raise various errors if / when appropriate.
Parameters
----------
specification_dict : OrderedDict.
Keys are a proper subset of the columns in `long_form_df`. Values are
either a list or a single string, `"all_diff"` or `"all_same"`. If a
list, the elements should be:
- single objects that are within the alternative ID column of
`long_form_df`
- lists of objects that are within the alternative ID column of
`long_form_df`. For each single object in the list, a unique
column will be created (i.e. there will be a unique coefficient
for that variable in the corresponding utility equation of the
corresponding alternative). For lists within the
`specification_dict` values, a single column will be created for
all the alternatives within iterable (i.e. there will be one
common coefficient for the variables in the iterable).
unique_alternatives : 1D ndarray.
Should contain the possible alternative id's for this dataset.
Returns
-------
None. |
def fit(self, X, y=None):
"""Fits the GraphLasso covariance model to X.
Closely follows sklearn.covariance.graph_lasso.GraphLassoCV.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
"""
# quic-specific outputs
self.opt_ = None
self.cputime_ = None
self.iters_ = None
self.duality_gap_ = None
# these must be updated upon self.fit()
self.sample_covariance_ = None
self.lam_scale_ = None
self.is_fitted_ = False
# initialize
X = check_array(X, ensure_min_features=2, estimator=self)
X = as_float_array(X, copy=False, force_all_finite=False)
if self.cv is None:
cv = (3, 10)
elif isinstance(self.cv, int):
cv = (self.cv, 10) # upgrade with default number of trials
elif isinstance(self.cv, tuple):
cv = self.cv
cv = RepeatedKFold(n_splits=cv[0], n_repeats=cv[1])
self.init_coefs(X)
# get path
if isinstance(self.lams, int):
n_refinements = self.n_refinements
lam_1 = self.lam_scale_
lam_0 = 1e-2 * lam_1
path = np.logspace(np.log10(lam_0), np.log10(lam_1), self.lams)[::-1]
else:
path = self.lams
n_refinements = 1
# run this thing a bunch
results = list()
t0 = time.time()
for rr in range(n_refinements):
if self.sc is None:
# parallel version
this_result = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose, backend=self.backend
)(
delayed(_quic_path)(
X[train],
path,
X_test=X[test],
lam=self.lam,
tol=self.tol,
max_iter=self.max_iter,
Theta0=self.Theta0,
Sigma0=self.Sigma0,
method=self.method,
verbose=self.verbose,
score_metric=self.score_metric,
init_method=self.init_method,
)
for train, test in cv.split(X)
)
else:
# parallel via spark
train_test_grid = [(train, test) for (train, test) in cv.split(X)]
indexed_param_grid = list(
zip(range(len(train_test_grid)), train_test_grid)
)
par_param_grid = self.sc.parallelize(indexed_param_grid)
X_bc = self.sc.broadcast(X)
# wrap function parameters so we dont pick whole self object
quic_path = partial(
_quic_path,
path=path,
lam=self.lam,
tol=self.tol,
max_iter=self.max_iter,
Theta0=self.Theta0,
Sigma0=self.Sigma0,
method=self.method,
verbose=self.verbose,
score_metric=self.score_metric,
init_method=self.init_method,
)
indexed_results = dict(
par_param_grid.map(
partial(_quic_path_spark, quic_path=quic_path, X_bc=X_bc)
).collect()
)
this_result = [
indexed_results[idx] for idx in range(len(train_test_grid))
]
X_bc.unpersist()
# Little dance to transform the list in what we need
covs, _, scores = zip(*this_result)
covs = zip(*covs)
scores = zip(*scores)
results.extend(zip(path, scores, covs))
results = sorted(results, key=operator.itemgetter(0), reverse=True)
# Find the maximum (avoid using built in 'max' function to
# have a fully-reproducible selection of the smallest alpha
# in case of equality)
best_score = -np.inf
last_finite_idx = 0
best_index = 0
for index, (lam, scores, _) in enumerate(results):
# sometimes we get -np.inf in the result (in kl-loss)
scores = [s for s in scores if not np.isinf(s)]
if len(scores) == 0:
this_score = -np.inf
else:
this_score = np.mean(scores)
if this_score >= .1 / np.finfo(np.float64).eps:
this_score = np.nan
if np.isfinite(this_score):
last_finite_idx = index
if this_score >= best_score:
best_score = this_score
best_index = index
# Refine the grid
if best_index == 0:
# We do not need to go back: we have chosen
# the highest value of lambda for which there are
# non-zero coefficients
lam_1 = results[0][0]
lam_0 = results[1][0]
elif best_index == last_finite_idx and not best_index == len(results) - 1:
# We have non-converged models on the upper bound of the
# grid, we need to refine the grid there
lam_1 = results[best_index][0]
lam_0 = results[best_index + 1][0]
elif best_index == len(results) - 1:
lam_1 = results[best_index][0]
lam_0 = 0.01 * results[best_index][0]
else:
lam_1 = results[best_index - 1][0]
lam_0 = results[best_index + 1][0]
if isinstance(self.lams, int):
path = np.logspace(np.log10(lam_1), np.log10(lam_0), self.lams + 2)
path = path[1:-1]
if self.verbose and n_refinements > 1:
print(
"[GraphLassoCV] Done refinement % 2i out of %i: % 3is"
% (rr + 1, n_refinements, time.time() - t0)
)
results = list(zip(*results))
grid_scores_ = list(results[1])
lams = list(results[0])
# Finally, compute the score with lambda = 0
lams.append(0)
grid_scores_.append(
cross_val_score(EmpiricalCovariance(), X, cv=cv, n_jobs=self.n_jobs)
)
self.grid_scores_ = np.array(grid_scores_)
self.lam_ = self.lam * lams[best_index]
self.cv_lams_ = [self.lam * l for l in lams]
# Finally fit the model with the selected lambda
if self.method == "quic":
(
self.precision_,
self.covariance_,
self.opt_,
self.cputime_,
self.iters_,
self.duality_gap_,
) = quic(
self.sample_covariance_,
self.lam_,
mode="default",
tol=self.tol,
max_iter=self.max_iter,
Theta0=self.Theta0,
Sigma0=self.Sigma0,
path=None,
msg=self.verbose,
)
else:
raise NotImplementedError("Only method='quic' has been implemented.")
self.is_fitted_ = True
return self | Fits the GraphLasso covariance model to X.
Closely follows sklearn.covariance.graph_lasso.GraphLassoCV.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate |
def com_google_fonts_check_fstype(ttFont):
"""Checking OS/2 fsType.
Fonts must have their fsType field set to zero.
This setting is known as Installable Embedding, meaning
that none of the DRM restrictions are enabled on the fonts.
More info available at:
https://docs.microsoft.com/en-us/typography/opentype/spec/os2#fstype
"""
value = ttFont['OS/2'].fsType
if value != 0:
FSTYPE_RESTRICTIONS = {
0x0002: ("* The font must not be modified, embedded or exchanged in"
" any manner without first obtaining permission of"
" the legal owner."),
0x0004: ("The font may be embedded, and temporarily loaded on the"
" remote system, but documents that use it must"
" not be editable."),
0x0008: ("The font may be embedded but must only be installed"
" temporarily on other systems."),
0x0100: ("The font may not be subsetted prior to embedding."),
0x0200: ("Only bitmaps contained in the font may be embedded."
" No outline data may be embedded.")
}
restrictions = ""
for bit_mask in FSTYPE_RESTRICTIONS.keys():
if value & bit_mask:
restrictions += FSTYPE_RESTRICTIONS[bit_mask]
if value & 0b1111110011110001:
restrictions += ("* There are reserved bits set,"
" which indicates an invalid setting.")
yield FAIL, ("OS/2 fsType is a legacy DRM-related field.\n"
"In this font it is set to {} meaning that:\n"
"{}\n"
"No such DRM restrictions can be enabled on the"
" Google Fonts collection, so the fsType field"
" must be set to zero (Installable Embedding) instead.\n"
"Fonts with this setting indicate that they may be embedded"
" and permanently installed on the remote system"
" by an application.\n\n"
" More detailed info is available at:\n"
" https://docs.microsoft.com/en-us"
"/typography/opentype/spec/os2#fstype"
"").format(value, restrictions)
else:
yield PASS, ("OS/2 fsType is properly set to zero.") | Checking OS/2 fsType.
Fonts must have their fsType field set to zero.
This setting is known as Installable Embedding, meaning
that none of the DRM restrictions are enabled on the fonts.
More info available at:
https://docs.microsoft.com/en-us/typography/opentype/spec/os2#fstype |
def _parse_hparams(hparams):
"""Split hparams, based on key prefixes.
Args:
hparams: hyperparameters
Returns:
Tuple of hparams for respectably: agent, optimizer, runner, replay_buffer.
"""
prefixes = ["agent_", "optimizer_", "runner_", "replay_buffer_"]
ret = []
for prefix in prefixes:
ret_dict = {}
for key in hparams.values():
if prefix in key:
par_name = key[len(prefix):]
ret_dict[par_name] = hparams.get(key)
ret.append(ret_dict)
return ret | Split hparams, based on key prefixes.
Args:
hparams: hyperparameters
Returns:
Tuple of hparams for respectably: agent, optimizer, runner, replay_buffer. |
def generate(env):
"""Add Builders and construction variables for gnulink to an Environment."""
link.generate(env)
if env['PLATFORM'] == 'hpux':
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -shared -fPIC')
# __RPATH is set to $_RPATH in the platform specification if that
# platform supports it.
env['RPATHPREFIX'] = '-Wl,-rpath='
env['RPATHSUFFIX'] = ''
env['_RPATH'] = '${_concat(RPATHPREFIX, RPATH, RPATHSUFFIX, __env__)}'
# OpenBSD doesn't usually use SONAME for libraries
use_soname = not sys.platform.startswith('openbsd')
link._setup_versioned_lib_variables(env, tool = 'gnulink', use_soname = use_soname)
env['LINKCALLBACKS'] = link._versioned_lib_callbacks()
# For backward-compatibility with older SCons versions
env['SHLIBVERSIONFLAGS'] = SCons.Util.CLVar('-Wl,-Bsymbolic') | Add Builders and construction variables for gnulink to an Environment. |
def get_data_length(self):
# type: () -> int
'''
A method to get the length of the data that this Directory Record
points to.
Parameters:
None.
Returns:
The length of the data that this Directory Record points to.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not yet initialized')
if self.inode is not None:
return self.inode.get_data_length()
return self.data_length | A method to get the length of the data that this Directory Record
points to.
Parameters:
None.
Returns:
The length of the data that this Directory Record points to. |
def ProbGreater(self, x):
"""Probability that a sample from this Pmf exceeds x.
x: number
returns: float probability
"""
t = [prob for (val, prob) in self.d.iteritems() if val > x]
return sum(t) | Probability that a sample from this Pmf exceeds x.
x: number
returns: float probability |
def pack(self, remaining_size):
"""Pack data of part into binary format"""
arguments_count, payload = self.pack_data(remaining_size - self.header_size)
payload_length = len(payload)
# align payload length to multiple of 8
if payload_length % 8 != 0:
payload += b"\x00" * (8 - payload_length % 8)
self.header = PartHeader(self.kind, self.attribute, arguments_count, self.bigargumentcount,
payload_length, remaining_size)
hdr = self.header_struct.pack(*self.header)
if pyhdb.tracing:
self.trace_header = humanhexlify(hdr, 30)
self.trace_payload = humanhexlify(payload, 30)
return hdr + payload | Pack data of part into binary format |
def decrypt_subtitle(self, subtitle):
"""Decrypt encrypted subtitle data in high level model object
@param crunchyroll.models.Subtitle subtitle
@return str
"""
return self.decrypt(self._build_encryption_key(int(subtitle.id)),
subtitle['iv'][0].text.decode('base64'),
subtitle['data'][0].text.decode('base64')) | Decrypt encrypted subtitle data in high level model object
@param crunchyroll.models.Subtitle subtitle
@return str |
def clinvar_submission_header(submission_objs, csv_type):
"""Determine which fields to include in csv header by checking a list of submission objects
Args:
submission_objs(list): a list of objects (variants or casedata) to include in a csv file
csv_type(str) : 'variant_data' or 'case_data'
Returns:
custom_header(dict): A dictionary with the fields required in the csv header. Keys and values are specified in CLINVAR_HEADER and CASEDATA_HEADER
"""
complete_header = {} # header containing all available fields
custom_header = {} # header reflecting the real data included in the submission objects
if csv_type == 'variant_data' :
complete_header = CLINVAR_HEADER
else:
complete_header = CASEDATA_HEADER
for header_key, header_value in complete_header.items(): # loop over the info fields provided in each submission object
for clinvar_obj in submission_objs: # loop over the submission objects
for key, value in clinvar_obj.items(): # loop over the keys and values of the clinvar objects
if not header_key in custom_header and header_key == key: # add to custom header if missing and specified in submission object
custom_header[header_key] = header_value
return custom_header | Determine which fields to include in csv header by checking a list of submission objects
Args:
submission_objs(list): a list of objects (variants or casedata) to include in a csv file
csv_type(str) : 'variant_data' or 'case_data'
Returns:
custom_header(dict): A dictionary with the fields required in the csv header. Keys and values are specified in CLINVAR_HEADER and CASEDATA_HEADER |
def https_connection(self):
"""Return an https connection to this Connection's endpoint.
Returns a 3-tuple containing::
1. The :class:`HTTPSConnection` instance
2. Dictionary of auth headers to be used with the connection
3. The root url path (str) to be used for requests.
"""
endpoint = self.endpoint
host, remainder = endpoint.split(':', 1)
port = remainder
if '/' in remainder:
port, _ = remainder.split('/', 1)
conn = HTTPSConnection(
host, int(port),
context=self._get_ssl(self.cacert),
)
path = (
"/model/{}".format(self.uuid)
if self.uuid else ""
)
return conn, self._http_headers(), path | Return an https connection to this Connection's endpoint.
Returns a 3-tuple containing::
1. The :class:`HTTPSConnection` instance
2. Dictionary of auth headers to be used with the connection
3. The root url path (str) to be used for requests. |
def add_number_widget(self, ref, x=1, value=1):
""" Add Number Widget """
if ref not in self.widgets:
widget = widgets.NumberWidget(screen=self, ref=ref, x=x, value=value)
self.widgets[ref] = widget
return self.widgets[ref] | Add Number Widget |
def _handle_ticker(self, dtype, data, ts):
"""Adds received ticker data to self.tickers dict, filed under its channel
id.
:param dtype:
:param data:
:param ts:
:return:
"""
self.log.debug("_handle_ticker: %s - %s - %s", dtype, data, ts)
channel_id, *data = data
channel_identifier = self.channel_directory[channel_id]
entry = (data, ts)
self.tickers[channel_identifier].put(entry) | Adds received ticker data to self.tickers dict, filed under its channel
id.
:param dtype:
:param data:
:param ts:
:return: |
def singularity_build(script=None, src=None, dest=None, **kwargs):
'''docker build command. By default a script is sent to the docker build command but
you can also specify different parameters defined inu//docker-py.readthedocs.org/en/stable/api/#build
'''
singularity = SoS_SingularityClient()
singularity.build(script, src, dest, **kwargs)
return 0 | docker build command. By default a script is sent to the docker build command but
you can also specify different parameters defined inu//docker-py.readthedocs.org/en/stable/api/#build |
def _normalized_keys(self, section, items):
# type: (str, Iterable[Tuple[str, Any]]) -> Dict[str, Any]
"""Normalizes items to construct a dictionary with normalized keys.
This routine is where the names become keys and are made the same
regardless of source - configuration files or environment.
"""
normalized = {}
for name, val in items:
key = section + "." + _normalize_name(name)
normalized[key] = val
return normalized | Normalizes items to construct a dictionary with normalized keys.
This routine is where the names become keys and are made the same
regardless of source - configuration files or environment. |
def _make_publisher(catalog_or_dataset):
"""De estar presentes las claves necesarias, genera el diccionario
"publisher" a nivel catálogo o dataset."""
level = catalog_or_dataset
keys = [k for k in ["publisher_name", "publisher_mbox"] if k in level]
if keys:
level["publisher"] = {
key.replace("publisher_", ""): level.pop(key) for key in keys
}
return level | De estar presentes las claves necesarias, genera el diccionario
"publisher" a nivel catálogo o dataset. |
def transitions_for(self, roles=None, actor=None, anchors=[]):
"""
For use on :class:`~coaster.sqlalchemy.mixins.RoleMixin` classes:
returns currently available transitions for the specified
roles or actor as a dictionary of name: :class:`StateTransitionWrapper`.
"""
proxy = self.obj.access_for(roles, actor, anchors)
return {name: transition for name, transition in self.transitions(current=False).items()
if name in proxy} | For use on :class:`~coaster.sqlalchemy.mixins.RoleMixin` classes:
returns currently available transitions for the specified
roles or actor as a dictionary of name: :class:`StateTransitionWrapper`. |
def strftime(dt, fmt):
'''
`strftime` implementation working before 1900
'''
if _illegal_s.search(fmt):
raise TypeError("This strftime implementation does not handle %s")
if dt.year > 1900:
return dt.strftime(fmt)
fmt = fmt.replace('%c', '%a %b %d %H:%M:%S %Y')\
.replace('%Y', str(dt.year))\
.replace('%y', '{:04}'.format(dt.year)[-2:])
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6*(delta // 100 + delta // 400)
year = year + off
# Move to around the year 2000
year = year + ((2000 - year)//28)*28
timetuple = dt.timetuple()
return time.strftime(fmt, (year,) + timetuple[1:]) | `strftime` implementation working before 1900 |
def parse_options_header(value, multiple=False):
"""Parse a ``Content-Type`` like header into a tuple with the content
type and the options:
>>> parse_options_header('text/html; charset=utf8')
('text/html', {'charset': 'utf8'})
This should not be used to parse ``Cache-Control`` like headers that use
a slightly different format. For these headers use the
:func:`parse_dict_header` function.
.. versionchanged:: 0.15
:rfc:`2231` parameter continuations are handled.
.. versionadded:: 0.5
:param value: the header to parse.
:param multiple: Whether try to parse and return multiple MIME types
:return: (mimetype, options) or (mimetype, options, mimetype, options, …)
if multiple=True
"""
if not value:
return "", {}
result = []
value = "," + value.replace("\n", ",")
while value:
match = _option_header_start_mime_type.match(value)
if not match:
break
result.append(match.group(1)) # mimetype
options = {}
# Parse options
rest = match.group(2)
continued_encoding = None
while rest:
optmatch = _option_header_piece_re.match(rest)
if not optmatch:
break
option, count, encoding, language, option_value = optmatch.groups()
# Continuations don't have to supply the encoding after the
# first line. If we're in a continuation, track the current
# encoding to use for subsequent lines. Reset it when the
# continuation ends.
if not count:
continued_encoding = None
else:
if not encoding:
encoding = continued_encoding
continued_encoding = encoding
option = unquote_header_value(option)
if option_value is not None:
option_value = unquote_header_value(option_value, option == "filename")
if encoding is not None:
option_value = _unquote(option_value).decode(encoding)
if count:
# Continuations append to the existing value. For
# simplicity, this ignores the possibility of
# out-of-order indices, which shouldn't happen anyway.
options[option] = options.get(option, "") + option_value
else:
options[option] = option_value
rest = rest[optmatch.end() :]
result.append(options)
if multiple is False:
return tuple(result)
value = rest
return tuple(result) if result else ("", {}) | Parse a ``Content-Type`` like header into a tuple with the content
type and the options:
>>> parse_options_header('text/html; charset=utf8')
('text/html', {'charset': 'utf8'})
This should not be used to parse ``Cache-Control`` like headers that use
a slightly different format. For these headers use the
:func:`parse_dict_header` function.
.. versionchanged:: 0.15
:rfc:`2231` parameter continuations are handled.
.. versionadded:: 0.5
:param value: the header to parse.
:param multiple: Whether try to parse and return multiple MIME types
:return: (mimetype, options) or (mimetype, options, mimetype, options, …)
if multiple=True |
def rename_ligand(self,ligand_name,mol_file):
"""
Get an atom selection for the selected from both topology and trajectory. Rename the ligand LIG
to help with ligand names that are not standard, e.g. contain numbers.
Takes:
* ligand_name * - MDAnalysis atom selection for the ligand selected by user
Output:
* self.ligand * - renamed ligand with resname LIG,
* self.ligand_noH * - renamed ligand with resname LIG and without H atoms (these are not
present in the final 2D representation and are therefore excluded from some analysis scripts.)
"""
self.universe.ligand = self.universe.select_atoms(ligand_name)
#Both resname and resnames options need to be reset in order for complete renaming.
self.universe.ligand.residues.resnames = "LIG"
self.universe.ligand.resname = "LIG"
if mol_file is None:
self.universe.ligand.write("lig.pdb")
os.system("babel -ipdb lig.pdb -omol lig.mol ") | Get an atom selection for the selected from both topology and trajectory. Rename the ligand LIG
to help with ligand names that are not standard, e.g. contain numbers.
Takes:
* ligand_name * - MDAnalysis atom selection for the ligand selected by user
Output:
* self.ligand * - renamed ligand with resname LIG,
* self.ligand_noH * - renamed ligand with resname LIG and without H atoms (these are not
present in the final 2D representation and are therefore excluded from some analysis scripts.) |
def _optimize_with_progs(format_module, filename, image_format):
"""
Use the correct optimizing functions in sequence.
And report back statistics.
"""
filesize_in = os.stat(filename).st_size
report_stats = None
for func in format_module.PROGRAMS:
if not getattr(Settings, func.__name__):
continue
report_stats = _optimize_image_external(
filename, func, image_format, format_module.OUT_EXT)
filename = report_stats.final_filename
if format_module.BEST_ONLY:
break
if report_stats is not None:
report_stats.bytes_in = filesize_in
else:
report_stats = stats.skip(image_format, filename)
return report_stats | Use the correct optimizing functions in sequence.
And report back statistics. |
def push(self, item):
'''
Push an item
'''
self.server.lpush(self.key, self._encode_item(item)) | Push an item |
def get_prep_value(self, value):
'''The psycopg adaptor returns Python objects,
but we also have to handle conversion ourselves
'''
if isinstance(value, JSON.JsonDict):
return json.dumps(value, cls=JSON.Encoder)
if isinstance(value, JSON.JsonList):
return value.json_string
if isinstance(value, JSON.JsonString):
return json.dumps(value)
return value | The psycopg adaptor returns Python objects,
but we also have to handle conversion ourselves |
def convert_radian(coord, *variables):
"""Convert the given coordinate from radian to degree
Parameters
----------
coord: xr.Variable
The variable to transform
``*variables``
The variables that are on the same unit.
Returns
-------
xr.Variable
The transformed variable if one of the given `variables` has units in
radian"""
if any(v.attrs.get('units') == 'radian' for v in variables):
return coord * 180. / np.pi
return coord | Convert the given coordinate from radian to degree
Parameters
----------
coord: xr.Variable
The variable to transform
``*variables``
The variables that are on the same unit.
Returns
-------
xr.Variable
The transformed variable if one of the given `variables` has units in
radian |
def _print_foreign_playlist_message(self):
""" reset previous message """
self.operation_mode = self.window_mode = NORMAL_MODE
self.refreshBody()
""" display new message """
txt='''A playlist by this name:
__"|{0}|"
already exists in the config directory.
This playlist was saved as:
__"|{1}|"
'''.format(self._cnf.foreign_filename_only_no_extension,
self._cnf.stations_filename_only_no_extension)
self._show_help(txt, FOREIGN_PLAYLIST_MESSAGE_MODE,
caption = ' Foreign playlist ',
prompt = ' Press any key ',
is_message=True) | reset previous message |
def observe(matcher):
"""
Internal decorator to trigger operator hooks before/after
matcher execution.
"""
@functools.wraps(matcher)
def observer(self, subject, *expected, **kw):
# Trigger before hook, if present
if hasattr(self, 'before'):
self.before(subject, *expected, **kw)
# Trigger matcher method
result = matcher(self, subject, *expected, **kw)
# After error hook
if result is not True and hasattr(self, 'after_error'):
self.after_error(result, subject, *expected, **kw)
# After success hook
if result is True and hasattr(self, 'after_success'):
self.after_success(subject, *expected, **kw)
# Enable diff comparison on error, if needed
if not hasattr(self, 'show_diff'):
self.show_diff = all([
isinstance(subject, six.string_types),
all([isinstance(x, six.string_types) for x in expected]),
])
return result
return observer | Internal decorator to trigger operator hooks before/after
matcher execution. |
def trace(
data, name, format='png', datarange=(None, None), suffix='', path='./', rows=1, columns=1,
num=1, last=True, fontmap = None, verbose=1):
"""
Generates trace plot from an array of data.
:Arguments:
data: array or list
Usually a trace from an MCMC sample.
name: string
The name of the trace.
datarange: tuple or list
Preferred y-range of trace (defaults to (None,None)).
format (optional): string
Graphic output format (defaults to png).
suffix (optional): string
Filename suffix.
path (optional): string
Specifies location for saving plots (defaults to local directory).
fontmap (optional): dict
Font map for plot.
"""
if fontmap is None:
fontmap = {1: 10, 2: 8, 3: 6, 4: 5, 5: 4}
# Stand-alone plot or subplot?
standalone = rows == 1 and columns == 1 and num == 1
if standalone:
if verbose > 0:
print_('Plotting', name)
figure()
subplot(rows, columns, num)
pyplot(data.tolist())
ylim(datarange)
# Plot options
title('\n\n %s trace' % name, x=0., y=1., ha='left', va='top',
fontsize='small')
# Smaller tick labels
tlabels = gca().get_xticklabels()
setp(tlabels, 'fontsize', fontmap[max(rows / 2, 1)])
tlabels = gca().get_yticklabels()
setp(tlabels, 'fontsize', fontmap[max(rows / 2, 1)])
if standalone:
if not os.path.exists(path):
os.mkdir(path)
if not path.endswith('/'):
path += '/'
# Save to file
savefig("%s%s%s.%s" % (path, name, suffix, format)) | Generates trace plot from an array of data.
:Arguments:
data: array or list
Usually a trace from an MCMC sample.
name: string
The name of the trace.
datarange: tuple or list
Preferred y-range of trace (defaults to (None,None)).
format (optional): string
Graphic output format (defaults to png).
suffix (optional): string
Filename suffix.
path (optional): string
Specifies location for saving plots (defaults to local directory).
fontmap (optional): dict
Font map for plot. |
def formatMessageForBuildResults(self, mode, buildername, buildset, build, master, previous_results, blamelist):
"""Generate a buildbot mail message and return a dictionary
containing the message body, type and subject."""
ss_list = buildset['sourcestamps']
results = build['results']
ctx = dict(results=build['results'],
mode=mode,
buildername=buildername,
workername=build['properties'].get(
'workername', ["<unknown>"])[0],
buildset=buildset,
build=build,
projects=self.getProjects(ss_list, master),
previous_results=previous_results,
status_detected=self.getDetectedStatus(
mode, results, previous_results),
build_url=utils.getURLForBuild(
master, build['builder']['builderid'], build['number']),
buildbot_url=master.config.buildbotURL,
blamelist=blamelist,
summary=self.messageSummary(build, results),
sourcestamps=self.messageSourceStamps(ss_list)
)
yield self.buildAdditionalContext(master, ctx)
msgdict = self.renderMessage(ctx)
return msgdict | Generate a buildbot mail message and return a dictionary
containing the message body, type and subject. |
def update_user(resource_root, user):
"""
Update a user.
Replaces the user's details with those provided.
@param resource_root: The root Resource object
@param user: An ApiUser object
@return: An ApiUser object
"""
return call(resource_root.put,
'%s/%s' % (USERS_PATH, user.name), ApiUser, data=user) | Update a user.
Replaces the user's details with those provided.
@param resource_root: The root Resource object
@param user: An ApiUser object
@return: An ApiUser object |
def Clouds(name=None, deterministic=False, random_state=None):
"""
Augmenter to draw clouds in images.
This is a wrapper around ``CloudLayer``. It executes 1 to 2 layers per image, leading to varying densities
and frequency patterns of clouds.
This augmenter seems to be fairly robust w.r.t. the image size. Tested with ``96x128``, ``192x256``
and ``960x1280``.
dtype support::
* ``uint8``: yes; tested
* ``uint16``: no (1)
* ``uint32``: no (1)
* ``uint64``: no (1)
* ``int8``: no (1)
* ``int16``: no (1)
* ``int32``: no (1)
* ``int64``: no (1)
* ``float16``: no (1)
* ``float32``: no (1)
* ``float64``: no (1)
* ``float128``: no (1)
* ``bool``: no (1)
- (1) Parameters of this augmenter are optimized for the value range of uint8.
While other dtypes may be accepted, they will lead to images augmented in
ways inappropriate for the respective dtype.
Parameters
----------
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Clouds()
Creates an augmenter that adds clouds to images.
"""
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return meta.SomeOf((1, 2), children=[
CloudLayer(
intensity_mean=(196, 255), intensity_freq_exponent=(-2.5, -2.0), intensity_coarse_scale=10,
alpha_min=0, alpha_multiplier=(0.25, 0.75), alpha_size_px_max=(2, 8), alpha_freq_exponent=(-2.5, -2.0),
sparsity=(0.8, 1.0), density_multiplier=(0.5, 1.0)
),
CloudLayer(
intensity_mean=(196, 255), intensity_freq_exponent=(-2.0, -1.0), intensity_coarse_scale=10,
alpha_min=0, alpha_multiplier=(0.5, 1.0), alpha_size_px_max=(64, 128), alpha_freq_exponent=(-2.0, -1.0),
sparsity=(1.0, 1.4), density_multiplier=(0.8, 1.5)
)
], random_order=False, name=name, deterministic=deterministic, random_state=random_state) | Augmenter to draw clouds in images.
This is a wrapper around ``CloudLayer``. It executes 1 to 2 layers per image, leading to varying densities
and frequency patterns of clouds.
This augmenter seems to be fairly robust w.r.t. the image size. Tested with ``96x128``, ``192x256``
and ``960x1280``.
dtype support::
* ``uint8``: yes; tested
* ``uint16``: no (1)
* ``uint32``: no (1)
* ``uint64``: no (1)
* ``int8``: no (1)
* ``int16``: no (1)
* ``int32``: no (1)
* ``int64``: no (1)
* ``float16``: no (1)
* ``float32``: no (1)
* ``float64``: no (1)
* ``float128``: no (1)
* ``bool``: no (1)
- (1) Parameters of this augmenter are optimized for the value range of uint8.
While other dtypes may be accepted, they will lead to images augmented in
ways inappropriate for the respective dtype.
Parameters
----------
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Clouds()
Creates an augmenter that adds clouds to images. |
def points(self, points):
""" set points without copying """
if not isinstance(points, np.ndarray):
raise TypeError('Points must be a numpy array')
# get the unique coordinates along each axial direction
x = np.unique(points[:,0])
y = np.unique(points[:,1])
z = np.unique(points[:,2])
nx, ny, nz = len(x), len(y), len(z)
# TODO: this needs to be tested (unique might return a tuple)
dx, dy, dz = np.unique(np.diff(x)), np.unique(np.diff(y)), np.unique(np.diff(z))
ox, oy, oz = np.min(x), np.min(y), np.min(z)
# Build the vtk object
self._from_specs((nx,ny,nz), (dx,dy,dz), (ox,oy,oz))
#self._point_ref = points
self.Modified() | set points without copying |
def encode(in_bytes):
"""Encode a string using Consistent Overhead Byte Stuffing (COBS).
Input is any byte string. Output is also a byte string.
Encoding guarantees no zero bytes in the output. The output
string will be expanded slightly, by a predictable amount.
An empty string is encoded to '\\x01'"""
final_zero = True
out_bytes = []
idx = 0
search_start_idx = 0
for in_char in in_bytes:
if in_char == '\x00':
final_zero = True
out_bytes.append(chr(idx - search_start_idx + 1))
out_bytes.append(in_bytes[search_start_idx:idx])
search_start_idx = idx + 1
else:
if idx - search_start_idx == 0xFD:
final_zero = False
out_bytes.append('\xFF')
out_bytes.append(in_bytes[search_start_idx:idx+1])
search_start_idx = idx + 1
idx += 1
if idx != search_start_idx or final_zero:
out_bytes.append(chr(idx - search_start_idx + 1))
out_bytes.append(in_bytes[search_start_idx:idx])
return ''.join(out_bytes) | Encode a string using Consistent Overhead Byte Stuffing (COBS).
Input is any byte string. Output is also a byte string.
Encoding guarantees no zero bytes in the output. The output
string will be expanded slightly, by a predictable amount.
An empty string is encoded to '\\x01 |
def run(self):
"""
Run a Quil program on the QVM multiple times and return the values stored in the
classical registers designated by the classical_addresses parameter.
:return: An array of bitstrings of shape ``(trials, len(classical_addresses))``
"""
super().run()
if not isinstance(self._executable, Program):
# This should really never happen
# unless a user monkeys with `self.status` and `self._executable`.
raise ValueError("Please `load` an appropriate executable.")
quil_program = self._executable
trials = quil_program.num_shots
classical_addresses = get_classical_addresses_from_program(quil_program)
if self.noise_model is not None:
quil_program = apply_noise_model(quil_program, self.noise_model)
quil_program = self.augment_program_with_memory_values(quil_program)
try:
self._bitstrings = self.connection._qvm_run(quil_program=quil_program,
classical_addresses=classical_addresses,
trials=trials,
measurement_noise=self.measurement_noise,
gate_noise=self.gate_noise,
random_seed=self.random_seed)['ro']
except KeyError:
warnings.warn("You are running a QVM program with no MEASURE instructions. "
"The result of this program will always be an empty array. Are "
"you sure you didn't mean to measure some of your qubits?")
self._bitstrings = np.zeros((trials, 0), dtype=np.int64)
return self | Run a Quil program on the QVM multiple times and return the values stored in the
classical registers designated by the classical_addresses parameter.
:return: An array of bitstrings of shape ``(trials, len(classical_addresses))`` |
Subsets and Splits