code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def handle_truncated_response(callback, params, entities):
"""
Handle truncated responses
:param callback:
:param params:
:param entities:
:return:
"""
results = {}
for entity in entities:
results[entity] = []
while True:
try:
marker_found = False
response = callback(**params)
for entity in entities:
if entity in response:
results[entity] = results[entity] + response[entity]
for marker_name in ['NextToken', 'Marker', 'PaginationToken']:
if marker_name in response and response[marker_name]:
params[marker_name] = response[marker_name]
marker_found = True
if not marker_found:
break
except Exception as e:
if is_throttled(e):
time.sleep(1)
else:
raise e
return results | Handle truncated responses
:param callback:
:param params:
:param entities:
:return: |
def parse_cctop_full(infile):
"""Parse a CCTOP XML results file and return a list of the consensus TM domains in the format::
[(1, inside_outside_or_tm),
(2, inside_outside_or_tm),
...]
Where the first value of a tuple is the sequence residue number, and the second is the predicted location with the
values 'I' (inside), 'O' (outside), or 'M' (membrane).
Args:
infile (str): Path to CCTOP XML file
Returns:
list: List of tuples in the format described above
"""
parser = etree.XMLParser(ns_clean=True)
with open(infile, 'r') as f:
tree = etree.fromstring(f.read(), parser)
all_info = []
if tree.find('Topology') is not None:
for r in tree.find('Topology').findall('Region'):
region_start = int(r.attrib['from'])
region_end = int(r.attrib['to'])
region = r.attrib['loc']
for i in range(region_start, region_end + 1):
all_info.append((i, region))
return all_info | Parse a CCTOP XML results file and return a list of the consensus TM domains in the format::
[(1, inside_outside_or_tm),
(2, inside_outside_or_tm),
...]
Where the first value of a tuple is the sequence residue number, and the second is the predicted location with the
values 'I' (inside), 'O' (outside), or 'M' (membrane).
Args:
infile (str): Path to CCTOP XML file
Returns:
list: List of tuples in the format described above |
def unsubscribe(self, tag, match_type=None):
'''
Un-subscribe to events matching the passed tag.
'''
if tag is None:
return
match_func = self._get_match_func(match_type)
self.pending_tags.remove([tag, match_func])
old_events = self.pending_events
self.pending_events = []
for evt in old_events:
if any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags):
self.pending_events.append(evt) | Un-subscribe to events matching the passed tag. |
def _get_files(extension, path):
"""
Returns a sorted list of all of the files having the same extension under the same directory
:param extension: the extension of the data files such as 'gdm'
:param path: path to the folder containing the files
:return: sorted list of files
"""
# retrieves the files sharing the same extension
files = []
for file in os.listdir(path):
if file.endswith(extension):
files.append(os.path.join(path, file))
return sorted(files) | Returns a sorted list of all of the files having the same extension under the same directory
:param extension: the extension of the data files such as 'gdm'
:param path: path to the folder containing the files
:return: sorted list of files |
def serialize_to_string(
root_processor, # type: RootProcessor
value, # type: Any
indent=None # type: Optional[Text]
):
# type: (...) -> Text
"""
Serialize the value to an XML string using the root processor.
:return: The serialized XML string.
See also :func:`declxml.serialize_to_file`
"""
if not _is_valid_root_processor(root_processor):
raise InvalidRootProcessor('Invalid root processor')
state = _ProcessorState()
state.push_location(root_processor.element_path)
root = root_processor.serialize(value, state)
state.pop_location()
# Always encode to UTF-8 because element tree does not support other
# encodings in earlier Python versions. See: https://bugs.python.org/issue1767933
serialized_value = ET.tostring(root, encoding='utf-8')
# Since element tree does not support pretty printing XML, we use minidom to do the pretty
# printing
if indent:
serialized_value = minidom.parseString(serialized_value).toprettyxml(
indent=indent, encoding='utf-8'
)
return serialized_value.decode('utf-8') | Serialize the value to an XML string using the root processor.
:return: The serialized XML string.
See also :func:`declxml.serialize_to_file` |
def write_to_file(self):
"""
Writes the weeks with associated commits to file.
"""
with open('../github_stats_output/last_year_commits.csv', 'w+') as output:
output.write('date,organization,repos,members,teams,'
+ 'unique_contributors,total_contributors,forks,'
+ 'stargazers,pull_requests,open_issues,has_readme,'
+ 'has_license,pull_requests_open,pull_requests_closed,'
+ 'commits\n')
#no reverse this time to print oldest first
previous_commits = 0
for week in self.sorted_weeks:
if str(self.commits[week]) != previous_commits:#delete dups
week_formatted = datetime.datetime.utcfromtimestamp(
week ).strftime('%Y-%m-%d')
output.write(week_formatted
+ ',llnl,0,0,0,0,0,0,0,0,0,0,0,0,0,'
+ str(self.commits[week]) + '\n')
previous_commits = str(self.commits[week]) | Writes the weeks with associated commits to file. |
def infer_namespace(ac):
"""Infer the single namespace of the given accession
This function is convenience wrapper around infer_namespaces().
Returns:
* None if no namespaces are inferred
* The (single) namespace if only one namespace is inferred
* Raises an exception if more than one namespace is inferred
>>> infer_namespace("ENST00000530893.6")
'ensembl'
>>> infer_namespace("NM_01234.5")
'refseq'
>>> infer_namespace("A2BC19")
'uniprot'
N.B. The following test is disabled because Python 2 and Python 3
handle doctest exceptions differently. :-(
X>>> infer_namespace("P12345")
Traceback (most recent call last):
...
bioutils.exceptions.BioutilsError: Multiple namespaces possible for P12345
>>> infer_namespace("BOGUS99") is None
True
"""
namespaces = infer_namespaces(ac)
if not namespaces:
return None
if len(namespaces) > 1:
raise BioutilsError("Multiple namespaces possible for {}".format(ac))
return namespaces[0] | Infer the single namespace of the given accession
This function is convenience wrapper around infer_namespaces().
Returns:
* None if no namespaces are inferred
* The (single) namespace if only one namespace is inferred
* Raises an exception if more than one namespace is inferred
>>> infer_namespace("ENST00000530893.6")
'ensembl'
>>> infer_namespace("NM_01234.5")
'refseq'
>>> infer_namespace("A2BC19")
'uniprot'
N.B. The following test is disabled because Python 2 and Python 3
handle doctest exceptions differently. :-(
X>>> infer_namespace("P12345")
Traceback (most recent call last):
...
bioutils.exceptions.BioutilsError: Multiple namespaces possible for P12345
>>> infer_namespace("BOGUS99") is None
True |
def get_id(self):
"""
get unique identifier of this container
:return: str
"""
if self._id is None:
# FIXME: provide a better error message when key is not defined
self._id = self.inspect(refresh=False)["Id"]
return self._id | get unique identifier of this container
:return: str |
def update_contents(self, contents, mime_type):
"""Update the contents and set the hash and modification time"""
import hashlib
import time
new_size = len(contents)
self.mime_type = mime_type
if mime_type == 'text/plain':
self.contents = contents.encode('utf-8')
else:
self.contents = contents
old_hash = self.hash
self.hash = hashlib.md5(self.contents).hexdigest()
if self.size and (old_hash != self.hash):
self.modified = int(time.time())
self.size = new_size | Update the contents and set the hash and modification time |
def gaussian_filter(self, sigma=2, order=0):
"""
Spatially smooth images with a gaussian filter.
Filtering will be applied to every image in the collection.
Parameters
----------
sigma : scalar or sequence of scalars, default = 2
Size of the filter size as standard deviation in pixels.
A sequence is interpreted as the standard deviation for each axis.
A single scalar is applied equally to all axes.
order : choice of 0 / 1 / 2 / 3 or sequence from same set, optional, default = 0
Order of the gaussian kernel, 0 is a gaussian,
higher numbers correspond to derivatives of a gaussian.
"""
from scipy.ndimage.filters import gaussian_filter
return self.map(lambda v: gaussian_filter(v, sigma, order), value_shape=self.value_shape) | Spatially smooth images with a gaussian filter.
Filtering will be applied to every image in the collection.
Parameters
----------
sigma : scalar or sequence of scalars, default = 2
Size of the filter size as standard deviation in pixels.
A sequence is interpreted as the standard deviation for each axis.
A single scalar is applied equally to all axes.
order : choice of 0 / 1 / 2 / 3 or sequence from same set, optional, default = 0
Order of the gaussian kernel, 0 is a gaussian,
higher numbers correspond to derivatives of a gaussian. |
def tokens_required(scopes='', new=False):
"""
Decorator for views to request an ESI Token.
Accepts required scopes as a space-delimited string
or list of strings of scope names.
Can require a new token to be retrieved by SSO.
Returns a QueryDict of Tokens.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
# if we're coming back from SSO for a new token, return it
token = _check_callback(request)
if token and new:
tokens = Token.objects.filter(pk=token.pk)
logger.debug("Returning new token.")
return view_func(request, tokens, *args, **kwargs)
if not new:
# ensure user logged in to check existing tokens
if not request.user.is_authenticated:
logger.debug(
"Session {0} is not logged in. Redirecting to login.".format(request.session.session_key[:5]))
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(request.get_full_path())
# collect tokens in db, check if still valid, return if any
tokens = Token.objects.filter(user__pk=request.user.pk).require_scopes(scopes).require_valid()
if tokens.exists():
logger.debug("Retrieved {0} tokens for {1} session {2}".format(tokens.count(), request.user,
request.session.session_key[:5]))
return view_func(request, tokens, *args, **kwargs)
# trigger creation of new token via sso
logger.debug("No tokens identified for {0} session {1}. Redirecting to SSO.".format(request.user, request.session.session_key[:5]))
from esi.views import sso_redirect
return sso_redirect(request, scopes=scopes)
return _wrapped_view
return decorator | Decorator for views to request an ESI Token.
Accepts required scopes as a space-delimited string
or list of strings of scope names.
Can require a new token to be retrieved by SSO.
Returns a QueryDict of Tokens. |
def _fetch_access_token(self, url, data):
""" The real fetch access token """
logger.info('Fetching component access token')
res = self._http.post(
url=url,
data=data
)
try:
res.raise_for_status()
except requests.RequestException as reqe:
raise WeChatClientException(
errcode=None,
errmsg=None,
client=self,
request=reqe.request,
response=reqe.response
)
result = res.json()
if 'errcode' in result and result['errcode'] != 0:
raise WeChatClientException(
result['errcode'],
result['errmsg'],
client=self,
request=res.request,
response=res
)
expires_in = 7200
if 'expires_in' in result:
expires_in = result['expires_in']
self.session.set(
'component_access_token',
result['component_access_token'],
expires_in
)
self.expires_at = int(time.time()) + expires_in
return result | The real fetch access token |
def xtqx(self):
"""get the normal matrix attribute. Create the attribute if
it has not yet been created
Returns
-------
xtqx : pyemu.Matrix
"""
if self.__xtqx is None:
self.log("xtqx")
self.__xtqx = self.jco.T * (self.obscov ** -1) * self.jco
self.log("xtqx")
return self.__xtqx | get the normal matrix attribute. Create the attribute if
it has not yet been created
Returns
-------
xtqx : pyemu.Matrix |
def positions(self, reverse=False):
"""returns a generator that walks the positions of this tree in DFO"""
def Posgen(reverse):
if reverse:
lastrootsib = self.last_sibling_position(self.root)
current = self.last_decendant(lastrootsib)
while current is not None:
yield current
current = self.prev_position(current)
else:
current = self.root
while current is not None:
yield current
current = self.next_position(current)
return Posgen(reverse) | returns a generator that walks the positions of this tree in DFO |
def add_init_files(path, zip_handler):
"""
adds init files to the included folder
:param path: str
"""
paths = path.split('\\')
paths = paths[:len(paths) - 1]
for sub_path in paths:
for root, dirs, files in os.walk(sub_path):
for file_to_zip in [x for x in files if '__init__.py' in x]:
filename = os.path.join(root, file_to_zip)
zip_con = filename.replace('\\', '/')
if zip_con in zip_handler.namelist():
continue
add_file(filename, zip_handler, False) | adds init files to the included folder
:param path: str |
def create_volume(self, availability_zone, size=None, snapshot_id=None):
"""Create a new volume."""
params = {"AvailabilityZone": availability_zone}
if ((snapshot_id is None and size is None) or
(snapshot_id is not None and size is not None)):
raise ValueError("Please provide either size or snapshot_id")
if size is not None:
params["Size"] = str(size)
if snapshot_id is not None:
params["SnapshotId"] = snapshot_id
query = self.query_factory(
action="CreateVolume", creds=self.creds, endpoint=self.endpoint,
other_params=params)
d = query.submit()
return d.addCallback(self.parser.create_volume) | Create a new volume. |
def parent(self) -> Optional['CtsReference']:
""" Parent of the actual URN, for example, 1.1 for 1.1.1
:rtype: CtsReference
"""
if self.start.depth == 1 and (self.end is None or self.end.depth <= 1):
return None
else:
if self.start.depth > 1 and (self.end is None or self.end.depth == 0):
return CtsReference("{0}{1}".format(
".".join(self.start.list[:-1]),
self.start.subreference or ""
))
elif self.start.depth > 1 and self.end is not None and self.end.depth > 1:
_start = self.start.list[0:-1]
_end = self.end.list[0:-1]
if _start == _end and \
self.start.subreference is None and \
self.end.subreference is None:
return CtsReference(
".".join(_start)
)
else:
return CtsReference("{0}{1}-{2}{3}".format(
".".join(_start),
self.start.subreference or "",
".".join(_end),
self.end.subreference or ""
)) | Parent of the actual URN, for example, 1.1 for 1.1.1
:rtype: CtsReference |
def create_table(
self,
parent,
table_id,
table,
initial_splits=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a new table in the specified instance.
The table can be created with a full set of initial column families,
specified in the request.
Example:
>>> from google.cloud import bigtable_admin_v2
>>>
>>> client = bigtable_admin_v2.BigtableTableAdminClient()
>>>
>>> parent = client.instance_path('[PROJECT]', '[INSTANCE]')
>>>
>>> # TODO: Initialize `table_id`:
>>> table_id = ''
>>>
>>> # TODO: Initialize `table`:
>>> table = {}
>>>
>>> response = client.create_table(parent, table_id, table)
Args:
parent (str): The unique name of the instance in which to create the table. Values are
of the form ``projects/<project>/instances/<instance>``.
table_id (str): The name by which the new table should be referred to within the parent
instance, e.g., ``foobar`` rather than ``<parent>/tables/foobar``.
table (Union[dict, ~google.cloud.bigtable_admin_v2.types.Table]): The Table to create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_admin_v2.types.Table`
initial_splits (list[Union[dict, ~google.cloud.bigtable_admin_v2.types.Split]]): The optional list of row keys that will be used to initially split the
table into several tablets (tablets are similar to HBase regions). Given
two split keys, ``s1`` and ``s2``, three tablets will be created,
spanning the key ranges: ``[, s1), [s1, s2), [s2, )``.
Example:
- Row keys := ``["a", "apple", "custom", "customer_1", "customer_2",``
``"other", "zz"]``
- initial\_split\_keys :=
``["apple", "customer_1", "customer_2", "other"]``
- Key assignment:
- Tablet 1 ``[, apple) => {"a"}.``
- Tablet 2 ``[apple, customer_1) => {"apple", "custom"}.``
- Tablet 3 ``[customer_1, customer_2) => {"customer_1"}.``
- Tablet 4 ``[customer_2, other) => {"customer_2"}.``
- Tablet 5 ``[other, ) => {"other", "zz"}.``
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_admin_v2.types.Split`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.bigtable_admin_v2.types.Table` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_table" not in self._inner_api_calls:
self._inner_api_calls[
"create_table"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_table,
default_retry=self._method_configs["CreateTable"].retry,
default_timeout=self._method_configs["CreateTable"].timeout,
client_info=self._client_info,
)
request = bigtable_table_admin_pb2.CreateTableRequest(
parent=parent, table_id=table_id, table=table, initial_splits=initial_splits
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["create_table"](
request, retry=retry, timeout=timeout, metadata=metadata
) | Creates a new table in the specified instance.
The table can be created with a full set of initial column families,
specified in the request.
Example:
>>> from google.cloud import bigtable_admin_v2
>>>
>>> client = bigtable_admin_v2.BigtableTableAdminClient()
>>>
>>> parent = client.instance_path('[PROJECT]', '[INSTANCE]')
>>>
>>> # TODO: Initialize `table_id`:
>>> table_id = ''
>>>
>>> # TODO: Initialize `table`:
>>> table = {}
>>>
>>> response = client.create_table(parent, table_id, table)
Args:
parent (str): The unique name of the instance in which to create the table. Values are
of the form ``projects/<project>/instances/<instance>``.
table_id (str): The name by which the new table should be referred to within the parent
instance, e.g., ``foobar`` rather than ``<parent>/tables/foobar``.
table (Union[dict, ~google.cloud.bigtable_admin_v2.types.Table]): The Table to create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_admin_v2.types.Table`
initial_splits (list[Union[dict, ~google.cloud.bigtable_admin_v2.types.Split]]): The optional list of row keys that will be used to initially split the
table into several tablets (tablets are similar to HBase regions). Given
two split keys, ``s1`` and ``s2``, three tablets will be created,
spanning the key ranges: ``[, s1), [s1, s2), [s2, )``.
Example:
- Row keys := ``["a", "apple", "custom", "customer_1", "customer_2",``
``"other", "zz"]``
- initial\_split\_keys :=
``["apple", "customer_1", "customer_2", "other"]``
- Key assignment:
- Tablet 1 ``[, apple) => {"a"}.``
- Tablet 2 ``[apple, customer_1) => {"apple", "custom"}.``
- Tablet 3 ``[customer_1, customer_2) => {"customer_1"}.``
- Tablet 4 ``[customer_2, other) => {"customer_2"}.``
- Tablet 5 ``[other, ) => {"other", "zz"}.``
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_admin_v2.types.Split`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.bigtable_admin_v2.types.Table` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. |
def save(self, record_key, record_data, overwrite=True, secret_key=''):
'''
a method to create a record in the collection folder
:param record_key: string with name to assign to record (see NOTES below)
:param record_data: byte data for record body
:param overwrite: [optional] boolean to overwrite records with same name
:param secret_key: [optional] string with key to encrypt data
:return: string with name of record
NOTE: record_key may only contain alphanumeric, /, _, . or -
characters and may not begin with the . or / character.
NOTE: using one or more / characters splits the key into
separate segments. these segments will appear as a
sub directories inside the record collection and each
segment is used as a separate index for that record
when using the list method
eg. lab/unittests/1473719695.2165067.json is indexed:
[ 'lab', 'unittests', '1473719695.2165067', '.json' ]
'''
title = '%s.save' % self.__class__.__name__
# validate inputs
input_fields = {
'record_key': record_key,
'secret_key': secret_key
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# validate byte data
if not isinstance(record_data, bytes):
raise ValueError('%s(record_data=b"...") must be byte data.' % title)
# construct and validate file path
file_path = os.path.join(self.collection_folder, record_key)
file_path = self.fields.validate(file_path, '.record_key_path')
file_root, file_name = os.path.split(file_path)
self.fields.validate(file_name, '.record_key_comp')
while file_root != self.collection_folder:
file_root, path_node = os.path.split(file_root)
self.fields.validate(path_node, '.record_key_comp')
# check overwrite exception
from os import path, makedirs
if not overwrite:
if path.exists(file_path):
raise Exception('%s(record_key="%s") already exists. To overwrite, set overwrite=True' % (title, record_key))
# create directories in path to file
file_root, file_node = path.split(file_path)
if file_root:
if not path.exists(file_root):
makedirs(file_root)
# encrypt data
if secret_key:
from labpack.encryption import cryptolab
record_data, secret_key = cryptolab.encrypt(record_data, secret_key)
# save file
with open(file_path, 'wb') as f:
f.write(record_data)
f.close()
# erase file date from drep files
import re
if re.search('\\.drep$', file_name):
from os import utime
file_time = 1
utime(file_path, times=(file_time, file_time))
return record_key | a method to create a record in the collection folder
:param record_key: string with name to assign to record (see NOTES below)
:param record_data: byte data for record body
:param overwrite: [optional] boolean to overwrite records with same name
:param secret_key: [optional] string with key to encrypt data
:return: string with name of record
NOTE: record_key may only contain alphanumeric, /, _, . or -
characters and may not begin with the . or / character.
NOTE: using one or more / characters splits the key into
separate segments. these segments will appear as a
sub directories inside the record collection and each
segment is used as a separate index for that record
when using the list method
eg. lab/unittests/1473719695.2165067.json is indexed:
[ 'lab', 'unittests', '1473719695.2165067', '.json' ] |
def free_memory(cls, exclude=None):
"""Free global annotation memory."""
annotations_in_memory = Annotation.__ANNOTATIONS_IN_MEMORY__
exclude = () if exclude is None else exclude
for annotation_cls in list(annotations_in_memory.keys()):
if issubclass(annotation_cls, exclude):
continue
if issubclass(annotation_cls, cls):
del annotations_in_memory[annotation_cls] | Free global annotation memory. |
def load_generated_checkers(cls, args):
"""
Load checker classes from generator plugins
"""
for gen in cls._get_generator_plugins():
checkers = gen.get_checkers(args)
cls.checkers.update(checkers) | Load checker classes from generator plugins |
def logistic_map(x, steps, r=4):
r"""
Generates a time series of the logistic map.
Characteristics and Background:
The logistic map is among the simplest examples for a time series that can
exhibit chaotic behavior depending on the parameter r. For r between 2 and
3, the series quickly becomes static. At r=3 the first bifurcation point is
reached after which the series starts to oscillate. Beginning with r = 3.6
it shows chaotic behavior with a few islands of stability until perfect
chaos is achieved at r = 4.
Calculating the Lyapunov exponent:
To calculate the "true" Lyapunov exponent of the logistic map, we first
have to make a few observations for maps in general that are repeated
applications of a function to a starting value.
If we have two starting values that differ by some infinitesimal
:math:`delta_0` then according to the definition of the lyapunov exponent
we will have an exponential divergence:
.. math::
|\delta_n| = |\delta_0| e^{\lambda n}
We can now write that:
.. math::
e^{\lambda n} = \lim_{\delta_0 -> 0} |\frac{\delta_n}{\delta_0}|
This is the definition of the derivative :math:`\frac{dx_n}{dx_0}` of a
point :math:`x_n` in the time series with respect to the starting point
:math:`x_0` (or rather the absolute value of that derivative). Now we can
use the fact that due to the definition of our map as repetitive
application of some f we have:
.. math::
f^{n\prime}(x) = f(f(f(...f(x_0)...))) = f'(x_n-1) \cdot f'(x_n-2)
\cdot ... \cdot f'(x_0)
with
.. math::
e^{\lambda n} = |f^{n\prime}(x)|
we now have
.. math::
e^{\lambda n} &= |f'(x_n-1) \cdot f'(x_n-2) \cdot ... \cdot f'(x_0)| \\
\Leftrightarrow \\
\lambda n &= \ln |f'(x_n-1) \cdot f'(x_n-2) \cdot ... \cdot f'(x_0)| \\
\Leftrightarrow \\
\lambda &= \frac{1}{n} \ln |f'(x_n-1) \cdot f'(x_n-2) \cdot ... \cdot f'(x_0)| \\
&= \frac{1}{n} \sum_{k=0}^{n-1} \ln |f'(x_k)|
With this sum we can now calculate the lyapunov exponent for any map.
For the logistic map we simply have to calculate :math:`f'(x)` and as we
have
.. math::
f(x) = r x (1-x) = rx - rx²
we now get
.. math::
f'(x) = r - 2 rx
References:
.. [lm_1] https://en.wikipedia.org/wiki/Tent_map
.. [lm_2] https://blog.abhranil.net/2015/05/15/lyapunov-exponent-of-the-logistic-map-mathematica-code/
Args:
x (float):
starting point
steps (int):
number of steps for which the generator should run
Kwargs:
r (int):
parameter r that controls the behavior of the map
Returns:
generator object:
the generator that creates the time series
"""
for _ in range(steps):
x = r * x * (1 - x)
yield x | r"""
Generates a time series of the logistic map.
Characteristics and Background:
The logistic map is among the simplest examples for a time series that can
exhibit chaotic behavior depending on the parameter r. For r between 2 and
3, the series quickly becomes static. At r=3 the first bifurcation point is
reached after which the series starts to oscillate. Beginning with r = 3.6
it shows chaotic behavior with a few islands of stability until perfect
chaos is achieved at r = 4.
Calculating the Lyapunov exponent:
To calculate the "true" Lyapunov exponent of the logistic map, we first
have to make a few observations for maps in general that are repeated
applications of a function to a starting value.
If we have two starting values that differ by some infinitesimal
:math:`delta_0` then according to the definition of the lyapunov exponent
we will have an exponential divergence:
.. math::
|\delta_n| = |\delta_0| e^{\lambda n}
We can now write that:
.. math::
e^{\lambda n} = \lim_{\delta_0 -> 0} |\frac{\delta_n}{\delta_0}|
This is the definition of the derivative :math:`\frac{dx_n}{dx_0}` of a
point :math:`x_n` in the time series with respect to the starting point
:math:`x_0` (or rather the absolute value of that derivative). Now we can
use the fact that due to the definition of our map as repetitive
application of some f we have:
.. math::
f^{n\prime}(x) = f(f(f(...f(x_0)...))) = f'(x_n-1) \cdot f'(x_n-2)
\cdot ... \cdot f'(x_0)
with
.. math::
e^{\lambda n} = |f^{n\prime}(x)|
we now have
.. math::
e^{\lambda n} &= |f'(x_n-1) \cdot f'(x_n-2) \cdot ... \cdot f'(x_0)| \\
\Leftrightarrow \\
\lambda n &= \ln |f'(x_n-1) \cdot f'(x_n-2) \cdot ... \cdot f'(x_0)| \\
\Leftrightarrow \\
\lambda &= \frac{1}{n} \ln |f'(x_n-1) \cdot f'(x_n-2) \cdot ... \cdot f'(x_0)| \\
&= \frac{1}{n} \sum_{k=0}^{n-1} \ln |f'(x_k)|
With this sum we can now calculate the lyapunov exponent for any map.
For the logistic map we simply have to calculate :math:`f'(x)` and as we
have
.. math::
f(x) = r x (1-x) = rx - rx²
we now get
.. math::
f'(x) = r - 2 rx
References:
.. [lm_1] https://en.wikipedia.org/wiki/Tent_map
.. [lm_2] https://blog.abhranil.net/2015/05/15/lyapunov-exponent-of-the-logistic-map-mathematica-code/
Args:
x (float):
starting point
steps (int):
number of steps for which the generator should run
Kwargs:
r (int):
parameter r that controls the behavior of the map
Returns:
generator object:
the generator that creates the time series |
def main():
'''Main routine.'''
# process arguments
if len(sys.argv) < 3:
usage()
rgname = sys.argv[1]
vmss = sys.argv[2]
# Load Azure app defaults
try:
with open('azurermconfig.json') as config_file:
config_data = json.load(config_file)
except FileNotFoundError:
sys.exit("Error: Expecting azurermconfig.json in current folder")
tenant_id = config_data['tenantId']
app_id = config_data['appId']
app_secret = config_data['appSecret']
sub_id = config_data['subscriptionId']
access_token = azurerm.get_access_token(tenant_id, app_id, app_secret)
# get metric definitions
provider = 'Microsoft.Compute'
resource_type = 'virtualMachineScaleSets'
metric_definitions = azurerm.list_metric_defs_for_resource(access_token, sub_id, rgname,
provider, resource_type, vmss)
print(json.dumps(metric_definitions, sort_keys=False, indent=2, separators=(',', ': ')))
metrics = azurerm.get_metrics_for_resource(access_token, sub_id, rgname,
provider, resource_type, vmss)
print(json.dumps(metrics, sort_keys=False, indent=2, separators=(',', ': '))) | Main routine. |
def run(juttle,
deployment_name,
program_name=None,
persist=False,
token_manager=None,
app_url=defaults.APP_URL):
"""
run a juttle program through the juttle streaming API and return the
various events that are part of running a Juttle program which include:
* Initial job status details including information to associate
multiple flowgraphs with their individual outputs (sinks):
{
"status": "ok",
"job": {
"channel_id": "56bde5f0",
"_start_time": "2015-10-03T06:59:49.233Z",
"alias": "jut-tools program 1443855588",
"_ms_begin": 1443855589233,
"user": "0fbbd98d-cf33-4582-8ca1-15a3d3fee510",
"timeout": 5,
"id": "b973bce6"
},
"now": "2015-10-03T06:59:49.230Z",
"stats": ...
"sinks": [
{
"location": {
"start": {
"column": 17,
"line": 1,
"offset": 16
},
"end": {
"column": 24,
"line": 1,
"offset": 23
},
"filename": "main"
},
"name": "table",
"channel": "sink237",
"options": {
"_jut_time_bounds": []
}
},
... as many sinks as there are flowgrpahs in your program
]
}
* Each set of points returned along with the indication of which sink
they belong to:
{
"points": [ array of points ],
"sink": sink_id
}
* Error event indicating where in your program the error occurred
{
"error": true,
payload with "info" and "context" explaining exact error
}
* Warning event indicating where in your program the error occurred
{
"warning": true,
payload with "info" and "context" explaining exact warning
}
* ...
juttle: juttle program to execute
deployment_name: the deployment name to execute the program on
persist: if set to True then we won't wait for response data and will
disconnect from the websocket leaving the program running in
the background if it is uses a background output
(http://docs.jut.io/juttle-guide/#background_outputs) and
therefore becomes a persistent job.
token_manager: auth.TokenManager object
app_url: optional argument used primarily for internal Jut testing
"""
headers = token_manager.get_access_token_headers()
data_url = get_juttle_data_url(deployment_name,
app_url=app_url,
token_manager=token_manager)
websocket = __wss_connect(data_url, token_manager)
data = websocket.recv()
channel_id_obj = json.loads(data)
if is_debug_enabled():
debug('got channel response %s', json.dumps(channel_id_obj))
channel_id = channel_id_obj['channel_id']
juttle_job = {
'channel_id': channel_id,
'alias': program_name,
'program': juttle
}
response = requests.post('%s/api/v1/jobs' % data_url,
data=json.dumps(juttle_job),
headers=headers)
if response.status_code != 200:
yield {
"error": True,
"context": response.json()
}
return
job_info = response.json()
# yield job_info so the caller to this method can figure out which sinks
# correlate to which flowgraphs
yield job_info
job_id = job_info['job']['id']
if is_debug_enabled():
debug('started job %s', json.dumps(job_info))
for data in connect_job(job_id,
deployment_name,
token_manager=token_manager,
app_url=app_url,
persist=persist,
websocket=websocket,
data_url=data_url):
yield data | run a juttle program through the juttle streaming API and return the
various events that are part of running a Juttle program which include:
* Initial job status details including information to associate
multiple flowgraphs with their individual outputs (sinks):
{
"status": "ok",
"job": {
"channel_id": "56bde5f0",
"_start_time": "2015-10-03T06:59:49.233Z",
"alias": "jut-tools program 1443855588",
"_ms_begin": 1443855589233,
"user": "0fbbd98d-cf33-4582-8ca1-15a3d3fee510",
"timeout": 5,
"id": "b973bce6"
},
"now": "2015-10-03T06:59:49.230Z",
"stats": ...
"sinks": [
{
"location": {
"start": {
"column": 17,
"line": 1,
"offset": 16
},
"end": {
"column": 24,
"line": 1,
"offset": 23
},
"filename": "main"
},
"name": "table",
"channel": "sink237",
"options": {
"_jut_time_bounds": []
}
},
... as many sinks as there are flowgrpahs in your program
]
}
* Each set of points returned along with the indication of which sink
they belong to:
{
"points": [ array of points ],
"sink": sink_id
}
* Error event indicating where in your program the error occurred
{
"error": true,
payload with "info" and "context" explaining exact error
}
* Warning event indicating where in your program the error occurred
{
"warning": true,
payload with "info" and "context" explaining exact warning
}
* ...
juttle: juttle program to execute
deployment_name: the deployment name to execute the program on
persist: if set to True then we won't wait for response data and will
disconnect from the websocket leaving the program running in
the background if it is uses a background output
(http://docs.jut.io/juttle-guide/#background_outputs) and
therefore becomes a persistent job.
token_manager: auth.TokenManager object
app_url: optional argument used primarily for internal Jut testing |
def RdatasetsBM(database,host=rbiomart_host):
"""
Lists BioMart datasets through a RPY2 connection.
:param database: a database listed in RdatabasesBM()
:param host: address of the host server, default='www.ensembl.org'
:returns: nothing
"""
biomaRt = importr("biomaRt")
ensemblMart=biomaRt.useMart(database, host=host)
print(biomaRt.listDatasets(ensemblMart)) | Lists BioMart datasets through a RPY2 connection.
:param database: a database listed in RdatabasesBM()
:param host: address of the host server, default='www.ensembl.org'
:returns: nothing |
def ls(self):
"""Return a list of *all* files & dirs in the repo.
Think of this as a recursive `ls` command from the root of the repo.
"""
tree = self.ls_tree()
return [t.get('file') for t in tree if t.get('file')] | Return a list of *all* files & dirs in the repo.
Think of this as a recursive `ls` command from the root of the repo. |
def get_lonlatalt(self, utc_time):
"""Calculate sublon, sublat and altitude of satellite.
http://celestrak.com/columns/v02n03/
"""
(pos_x, pos_y, pos_z), (vel_x, vel_y, vel_z) = self.get_position(
utc_time, normalize=True)
lon = ((np.arctan2(pos_y * XKMPER, pos_x * XKMPER) - astronomy.gmst(utc_time))
% (2 * np.pi))
lon = np.where(lon > np.pi, lon - np.pi * 2, lon)
lon = np.where(lon <= -np.pi, lon + np.pi * 2, lon)
r = np.sqrt(pos_x ** 2 + pos_y ** 2)
lat = np.arctan2(pos_z, r)
e2 = F * (2 - F)
while True:
lat2 = lat
c = 1 / (np.sqrt(1 - e2 * (np.sin(lat2) ** 2)))
lat = np.arctan2(pos_z + c * e2 * np.sin(lat2), r)
if np.all(abs(lat - lat2) < 1e-10):
break
alt = r / np.cos(lat) - c
alt *= A
return np.rad2deg(lon), np.rad2deg(lat), alt | Calculate sublon, sublat and altitude of satellite.
http://celestrak.com/columns/v02n03/ |
def visible_object_groups(self):
"""Return iterator of object group indexes that are set 'visible'
:rtype: Iterator
"""
return (i for (i, l) in enumerate(self.layers)
if l.visible and isinstance(l, TiledObjectGroup)) | Return iterator of object group indexes that are set 'visible'
:rtype: Iterator |
def list_joined_groups(self, user_alias=None):
"""
已加入的小组列表
:param user_alias: 用户名,默认为当前用户名
:return: 单页列表
"""
xml = self.api.xml(API_GROUP_LIST_JOINED_GROUPS % (user_alias or self.api.user_alias))
xml_results = xml.xpath('//div[@class="group-list group-cards"]/ul/li')
results = []
for item in xml_results:
try:
icon = item.xpath('.//img/@src')[0]
link = item.xpath('.//div[@class="title"]/a')[0]
url = link.get('href')
name = link.text
alias = url.rstrip('/').rsplit('/', 1)[1]
user_count = int(item.xpath('.//span[@class="num"]/text()')[0][1:-1])
results.append({
'icon': icon,
'alias': alias,
'url': url,
'name': name,
'user_count': user_count,
})
except Exception as e:
self.api.logger.exception('parse joined groups exception: %s' % e)
return build_list_result(results, xml) | 已加入的小组列表
:param user_alias: 用户名,默认为当前用户名
:return: 单页列表 |
def log(self, string):
"""
appends input string to log file and sends it to log function (self.log_function)
Returns:
"""
self.log_data.append(string)
if self.log_function is None:
print(string)
else:
self.log_function(string) | appends input string to log file and sends it to log function (self.log_function)
Returns: |
def get_name(obj, setting_name='LONG_NAME_FORMAT'):
"""
Returns the correct order of the name according to the current language.
"""
nickname = obj.get_nickname()
romanized_first_name = obj.get_romanized_first_name()
romanized_last_name = obj.get_romanized_last_name()
non_romanized_first_name = obj.get_non_romanized_first_name()
non_romanized_last_name = obj.get_non_romanized_last_name()
non_translated_title = obj.get_title()
non_translated_gender = obj.get_gender()
# when the title is blank, gettext returns weird header text. So if this
# occurs, we will pass it on blank without gettext
if non_translated_title:
title = gettext(non_translated_title)
else:
title = non_translated_title
if non_translated_gender:
gender = gettext(non_translated_gender)
else:
gender = non_translated_gender
format_string = u'{}'.format(get_format(setting_name))
format_kwargs = {}
if '{n}' in format_string:
format_kwargs.update({'n': nickname})
if '{N}' in format_string:
format_kwargs.update({'N': nickname.upper()})
if '{f}' in format_string:
format_kwargs.update({'f': romanized_first_name})
if '{F}' in format_string:
format_kwargs.update({'F': romanized_first_name.upper()})
if '{l}' in format_string:
format_kwargs.update({'l': romanized_last_name})
if '{L}' in format_string:
format_kwargs.update({'L': romanized_last_name.upper()})
if '{a}' in format_string:
format_kwargs.update({'a': non_romanized_first_name})
if '{A}' in format_string:
format_kwargs.update({'A': non_romanized_first_name.upper()})
if '{x}' in format_string:
format_kwargs.update({'x': non_romanized_last_name})
if '{X}' in format_string:
format_kwargs.update({'X': non_romanized_last_name.upper()})
if '{t}' in format_string:
format_kwargs.update({'t': title})
if '{T}' in format_string:
format_kwargs.update({'T': title.upper()})
if '{g}' in format_string:
format_kwargs.update({'g': gender})
if '{G}' in format_string:
format_kwargs.update({'G': gender.upper()})
return format_string.format(**format_kwargs) | Returns the correct order of the name according to the current language. |
def serialize_dict(self, attr, dict_type, **kwargs):
"""Serialize a dictionary of objects.
:param dict attr: Object to be serialized.
:param str dict_type: Type of object in the dictionary.
:param bool required: Whether the objects in the dictionary must
not be None or empty.
:rtype: dict
"""
serialization_ctxt = kwargs.get("serialization_ctxt", {})
serialized = {}
for key, value in attr.items():
try:
serialized[self.serialize_unicode(key)] = self.serialize_data(
value, dict_type, **kwargs)
except ValueError:
serialized[self.serialize_unicode(key)] = None
if 'xml' in serialization_ctxt:
# XML serialization is more complicated
xml_desc = serialization_ctxt['xml']
xml_name = xml_desc['name']
final_result = _create_xml_node(
xml_name,
xml_desc.get('prefix', None),
xml_desc.get('ns', None)
)
for key, value in serialized.items():
ET.SubElement(final_result, key).text = value
return final_result
return serialized | Serialize a dictionary of objects.
:param dict attr: Object to be serialized.
:param str dict_type: Type of object in the dictionary.
:param bool required: Whether the objects in the dictionary must
not be None or empty.
:rtype: dict |
def _build_layers(self, inputs, num_outputs, options):
"""Process the flattened inputs.
Note that dict inputs will be flattened into a vector. To define a
model that processes the components separately, use _build_layers_v2().
"""
hiddens = options.get("fcnet_hiddens")
activation = get_activation_fn(options.get("fcnet_activation"))
with tf.name_scope("fc_net"):
i = 1
last_layer = inputs
for size in hiddens:
label = "fc{}".format(i)
last_layer = slim.fully_connected(
last_layer,
size,
weights_initializer=normc_initializer(1.0),
activation_fn=activation,
scope=label)
i += 1
label = "fc_out"
output = slim.fully_connected(
last_layer,
num_outputs,
weights_initializer=normc_initializer(0.01),
activation_fn=None,
scope=label)
return output, last_layer | Process the flattened inputs.
Note that dict inputs will be flattened into a vector. To define a
model that processes the components separately, use _build_layers_v2(). |
def isID(self, elem, attr):
"""Determine whether an attribute is of type ID. In case we
have DTD(s) then this is done if DTD loading has been
requested. In the case of HTML documents parsed with the
HTML parser, then ID detection is done systematically. """
if elem is None: elem__o = None
else: elem__o = elem._o
if attr is None: attr__o = None
else: attr__o = attr._o
ret = libxml2mod.xmlIsID(self._o, elem__o, attr__o)
return ret | Determine whether an attribute is of type ID. In case we
have DTD(s) then this is done if DTD loading has been
requested. In the case of HTML documents parsed with the
HTML parser, then ID detection is done systematically. |
def _get_encoder_data_shapes(self, bucket_key: int, batch_size: int) -> List[mx.io.DataDesc]:
"""
Returns data shapes of the encoder module.
:param bucket_key: Maximum input length.
:param batch_size: Batch size.
:return: List of data descriptions.
"""
return [mx.io.DataDesc(name=C.SOURCE_NAME,
shape=(batch_size,) + self.input_size,
layout=C.BATCH_MAJOR_IMAGE)] | Returns data shapes of the encoder module.
:param bucket_key: Maximum input length.
:param batch_size: Batch size.
:return: List of data descriptions. |
def check_auth(email, password):
"""Check if a username/password combination is valid.
"""
try:
user = User.get(User.email == email)
except User.DoesNotExist:
return False
return password == user.password | Check if a username/password combination is valid. |
def _pip_search(stdout, stderr):
"""Callback for pip search."""
result = {}
lines = to_text_string(stdout).split('\n')
while '' in lines:
lines.remove('')
for line in lines:
if ' - ' in line:
parts = line.split(' - ')
name = parts[0].strip()
description = parts[1].strip()
result[name] = description
return result | Callback for pip search. |
def parse_request(self):
"""Parse a request (internal).
The request should be stored in self.raw_requestline; the results
are in self.command, self.path, self.request_version and
self.headers.
Return True for success, False for failure; on failure, an
error is sent back.
"""
self.command = None # set in case of error on the first line
self.request_version = version = self.default_request_version
self.close_connection = 1
requestline = str(self.raw_requestline, 'iso-8859-1')
requestline = requestline.rstrip('\r\n')
self.requestline = requestline
words = requestline.split()
if len(words) == 3:
command, path, version = words
if version[:5] != 'HTTP/':
self.send_error(400, "Bad request version (%r)" % version)
return False
try:
base_version_number = version.split('/', 1)[1]
version_number = base_version_number.split(".")
# RFC 2145 section 3.1 says there can be only one "." and
# - major and minor numbers MUST be treated as
# separate integers;
# - HTTP/2.4 is a lower version than HTTP/2.13, which in
# turn is lower than HTTP/12.3;
# - Leading zeros MUST be ignored by recipients.
if len(version_number) != 2:
raise ValueError
version_number = int(version_number[0]), int(version_number[1])
except (ValueError, IndexError):
self.send_error(400, "Bad request version (%r)" % version)
return False
if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1":
self.close_connection = 0
if version_number >= (2, 0):
self.send_error(505,
"Invalid HTTP Version (%s)" % base_version_number)
return False
elif len(words) == 2:
command, path = words
self.close_connection = 1
if command != 'GET':
self.send_error(400,
"Bad HTTP/0.9 request type (%r)" % command)
return False
elif not words:
return False
else:
self.send_error(400, "Bad request syntax (%r)" % requestline)
return False
self.command, self.path, self.request_version = command, path, version
# Examine the headers and look for a Connection directive.
try:
self.headers = http_client.parse_headers(self.rfile,
_class=self.MessageClass)
except http_client.LineTooLong:
self.send_error(400, "Line too long")
return False
conntype = self.headers.get('Connection', "")
if conntype.lower() == 'close':
self.close_connection = 1
elif (conntype.lower() == 'keep-alive' and
self.protocol_version >= "HTTP/1.1"):
self.close_connection = 0
# Examine the headers and look for an Expect directive
expect = self.headers.get('Expect', "")
if (expect.lower() == "100-continue" and
self.protocol_version >= "HTTP/1.1" and
self.request_version >= "HTTP/1.1"):
if not self.handle_expect_100():
return False
return True | Parse a request (internal).
The request should be stored in self.raw_requestline; the results
are in self.command, self.path, self.request_version and
self.headers.
Return True for success, False for failure; on failure, an
error is sent back. |
def get_lsf_status():
"""Count and print the number of jobs in various LSF states
"""
status_count = {'RUN': 0,
'PEND': 0,
'SUSP': 0,
'USUSP': 0,
'NJOB': 0,
'UNKNWN': 0}
try:
subproc = subprocess.Popen(['bjobs'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
subproc.stderr.close()
output = subproc.stdout.readlines()
except OSError:
return status_count
for line in output[1:]:
line = line.strip().split()
# Protect against format of multiproc jobs
if len(line) < 5:
continue
status_count['NJOB'] += 1
for k in status_count:
if line[2] == k:
status_count[k] += 1
return status_count | Count and print the number of jobs in various LSF states |
def SVGdocument():
"Create default SVG document"
import xml.dom.minidom
implementation = xml.dom.minidom.getDOMImplementation()
doctype = implementation.createDocumentType(
"svg", "-//W3C//DTD SVG 1.1//EN",
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"
)
document= implementation.createDocument(None, "svg", doctype)
document.documentElement.setAttribute(
'xmlns', 'http://www.w3.org/2000/svg'
)
return document | Create default SVG document |
def dateof(tag_name, tags):
"""Given a list of tags, returns the datetime of the tag with the given name; Otherwise None."""
for tag in tags:
if tag['name'] == tag_name:
commit = read_url(tag['commit']['url'])
return parse_timestamp(commit['commit']['committer']['date'])
return None | Given a list of tags, returns the datetime of the tag with the given name; Otherwise None. |
def register_parser(self, type, parser, **meta):
"""Registers a parser of a format.
:param type: The unique name of the format
:param parser: The method to parse data as the format
:param meta: The extra information associated with the format
"""
try:
self.registered_formats[type]['parser'] = parser
except KeyError:
self.registered_formats[type] = {'parser': parser}
if meta:
self.register_meta(type, **meta) | Registers a parser of a format.
:param type: The unique name of the format
:param parser: The method to parse data as the format
:param meta: The extra information associated with the format |
def home_mode_status(self, **kwargs):
"""Returns the status of Home Mode"""
api = self._api_info['home_mode']
payload = dict({
'api': api['name'],
'method': 'GetInfo',
'version': api['version'],
'_sid': self._sid
}, **kwargs)
response = self._get_json_with_retry(api['url'], payload)
return response['data']['on'] | Returns the status of Home Mode |
def all(cls, client, **kwargs):
"""
fetch all option positions
"""
max_date = kwargs['max_date'] if 'max_date' in kwargs else None
max_fetches = \
kwargs['max_fetches'] if 'max_fetches' in kwargs else None
url = 'https://api.robinhood.com/options/positions/'
params = {}
data = client.get(url, params=params)
results = data["results"]
if is_max_date_gt(max_date, results[-1]['updated_at'][0:10]):
return results
if max_fetches == 1:
return results
fetches = 1
while data["next"]:
fetches = fetches + 1
data = client.get(data["next"])
results.extend(data["results"])
if is_max_date_gt(max_date, results[-1]['updated_at'][0:10]):
return results
if max_fetches and (fetches >= max_fetches):
return results
return results | fetch all option positions |
def create_object(module_name: str, class_name: str, args: Iterable=(), kwargs: Dict[str, Any]=_EMPTY_DICT):
"""
Create an object instance of the given class from the given module.
Args and kwargs are passed to the constructor.
This mimics the following code:
.. code-block:: python
from module import class
return class(*args, **kwargs)
:param module_name: module name
:param class_name: class name
:param args: args to be passed to the object constructor
:param kwargs: kwargs to be passed to the object constructor
:return: created object instance
"""
return get_attribute(module_name, class_name)(*args, **kwargs) | Create an object instance of the given class from the given module.
Args and kwargs are passed to the constructor.
This mimics the following code:
.. code-block:: python
from module import class
return class(*args, **kwargs)
:param module_name: module name
:param class_name: class name
:param args: args to be passed to the object constructor
:param kwargs: kwargs to be passed to the object constructor
:return: created object instance |
def pivot(self,binned=True):
"""Calculate :ref:`pivot wavelength <pysynphot-formula-pivwv>`
of the observation.
.. note::
This is the calculation performed when ETC invokes ``calcphot``.
Parameters
----------
binned : bool
Use binned dataset for calculations. Otherwise, use native dataset.
Returns
-------
ans : float
Pivot wavelength.
"""
if binned:
wave = self.binwave
else:
wave = self.wave
countmulwave = self(wave)*wave
countdivwave = self(wave)/wave
num = self.trapezoidIntegration(wave,countmulwave)
den = self.trapezoidIntegration(wave,countdivwave)
if num == 0.0 or den == 0.0:
return 0.0
return math.sqrt(num/den) | Calculate :ref:`pivot wavelength <pysynphot-formula-pivwv>`
of the observation.
.. note::
This is the calculation performed when ETC invokes ``calcphot``.
Parameters
----------
binned : bool
Use binned dataset for calculations. Otherwise, use native dataset.
Returns
-------
ans : float
Pivot wavelength. |
def main():
"""Given an input whl file and target version, create a copy of the whl with that version.
This is accomplished via string replacement in files matching a list of globs. Pass the
optional `--glob` argument to add additional globs: ie `--glob='thing-to-match*.txt'`.
"""
parser = argparse.ArgumentParser()
parser.add_argument('whl_file',
help='The input whl file.')
parser.add_argument('dest_dir',
help='The destination directory for the output whl.')
parser.add_argument('target_version',
help='The target version of the output whl.')
parser.add_argument('--glob', action='append',
default=[
'*.dist-info/*',
'*-nspkg.pth',
],
help='Globs (fnmatch) to rewrite within the whl: may be specified multiple times.')
args = parser.parse_args()
reversion(args) | Given an input whl file and target version, create a copy of the whl with that version.
This is accomplished via string replacement in files matching a list of globs. Pass the
optional `--glob` argument to add additional globs: ie `--glob='thing-to-match*.txt'`. |
async def main():
"""
Main code
"""
# Create Client from endpoint string in Duniter format
client = Client(BMAS_ENDPOINT)
# Get the node summary infos to test the connection
response = await client(bma.node.summary)
print(response)
# prompt hidden user entry
salt = getpass.getpass("Enter your passphrase (salt): ")
# prompt hidden user entry
password = getpass.getpass("Enter your password: ")
# create keys from credentials
key = SigningKey.from_credentials(salt, password)
pubkey_from = key.pubkey
# prompt entry
pubkey_to = input("Enter recipient pubkey: ")
# capture current block to get version and currency and blockstamp
current_block = await client(bma.blockchain.current)
# capture sources of account
response = await client(bma.tx.sources, pubkey_from)
if len(response['sources']) == 0:
print("no sources found for account %s" % pubkey_to)
exit(1)
# get the first source
source = response['sources'][0]
# create the transaction document
transaction = get_transaction_document(current_block, source, pubkey_from, pubkey_to)
# sign document
transaction.sign([key])
# send the Transaction document to the node
response = await client(bma.tx.process, transaction.signed_raw())
if response.status == 200:
print(await response.text())
else:
print("Error while publishing transaction: {0}".format(await response.text()))
# Close client aiohttp session
await client.close() | Main code |
def activate_status_output_overall_status(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
activate_status = ET.Element("activate_status")
config = activate_status
output = ET.SubElement(activate_status, "output")
overall_status = ET.SubElement(output, "overall-status")
overall_status.text = kwargs.pop('overall_status')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def parse_time(timestring):
"""Attepmts to parse an ISO8601 formatted ``timestring``.
Returns a ``datetime.datetime`` object.
"""
timestring = str(timestring).strip()
for regex, pattern in TIME_FORMATS:
if regex.match(timestring):
found = regex.search(timestring).groupdict()
dt = datetime.utcnow().strptime(found['matched'], pattern)
dt = datetime.combine(date.today(), dt.time())
if 'fraction' in found and found['fraction'] is not None:
dt = dt.replace(microsecond=int(found['fraction'][1:]))
if 'timezone' in found and found['timezone'] is not None:
dt = dt.replace(tzinfo=Timezone(found.get('timezone', '')))
return dt
raise ParseError() | Attepmts to parse an ISO8601 formatted ``timestring``.
Returns a ``datetime.datetime`` object. |
def get_list(self, url=None, callback=None, limit=100, **data):
"""Get a list of this github component
:param url: full url
:param Comp: a :class:`.Component` class
:param callback: Optional callback
:param limit: Optional number of items to retrieve
:param data: additional query data
:return: a list of ``Comp`` objects with data
"""
url = url or str(self)
data = dict(((k, v) for k, v in data.items() if v))
all_data = []
if limit:
data['per_page'] = min(limit, 100)
while url:
response = self.http.get(url, params=data, auth=self.auth)
response.raise_for_status()
result = response.json()
n = m = len(result)
if callback:
result = callback(result)
m = len(result)
all_data.extend(result)
if limit and len(all_data) > limit:
all_data = all_data[:limit]
break
elif m == n:
data = None
next = response.links.get('next', {})
url = next.get('url')
else:
break
return all_data | Get a list of this github component
:param url: full url
:param Comp: a :class:`.Component` class
:param callback: Optional callback
:param limit: Optional number of items to retrieve
:param data: additional query data
:return: a list of ``Comp`` objects with data |
def _handle_successor(self, job, successor, successors):
"""
Returns a new CFGJob instance for further analysis, or None if there is no immediate state to perform the
analysis on.
:param CFGJob job: The current job.
"""
state = successor
all_successor_states = successors
addr = job.addr
# The PathWrapper instance to return
pw = None
job.successor_status[state] = ""
new_state = state.copy()
suc_jumpkind = state.history.jumpkind
suc_exit_stmt_idx = state.scratch.exit_stmt_idx
suc_exit_ins_addr = state.scratch.exit_ins_addr
if suc_jumpkind in {'Ijk_EmWarn', 'Ijk_NoDecode', 'Ijk_MapFail', 'Ijk_NoRedir',
'Ijk_SigTRAP', 'Ijk_SigSEGV', 'Ijk_ClientReq'}:
# Ignore SimExits that are of these jumpkinds
job.successor_status[state] = "Skipped"
return [ ]
call_target = job.extra_info['call_target']
if suc_jumpkind == "Ijk_FakeRet" and call_target is not None:
# if the call points to a SimProcedure that doesn't return, we don't follow the fakeret anymore
if self.project.is_hooked(call_target):
sim_proc = self.project._sim_procedures[call_target]
if sim_proc.NO_RET:
return [ ]
# Get target address
try:
target_addr = state.solver.eval_one(state.ip)
except (SimValueError, SimSolverModeError):
# It cannot be concretized currently. Maybe we can handle it later, maybe it just cannot be concretized
target_addr = None
if suc_jumpkind == "Ijk_Ret":
target_addr = job.call_stack.current_return_target
if target_addr is not None:
new_state.ip = new_state.solver.BVV(target_addr, new_state.arch.bits)
if target_addr is None:
# Unlucky...
return [ ]
if state.thumb:
# Make sure addresses are always odd. It is important to encode this information in the address for the
# time being.
target_addr |= 1
# see if the target successor is in our whitelist
if self._address_whitelist is not None:
if target_addr not in self._address_whitelist:
l.debug("Successor %#x is not in the address whitelist. Skip.", target_addr)
return [ ]
# see if this edge is in the base graph
if self._base_graph is not None:
# TODO: make it more efficient. the current implementation is half-assed and extremely slow
for src_, dst_ in self._base_graph.edges():
if src_.addr == addr and dst_.addr == target_addr:
break
else:
# not found
l.debug("Edge (%#x -> %#x) is not found in the base graph. Skip.", addr, target_addr)
return [ ]
# Fix target_addr for syscalls
if suc_jumpkind.startswith("Ijk_Sys"):
syscall_proc = self.project.simos.syscall(new_state)
if syscall_proc is not None:
target_addr = syscall_proc.addr
self._pre_handle_successor_state(job.extra_info, suc_jumpkind, target_addr)
if suc_jumpkind == "Ijk_FakeRet":
if target_addr == job.extra_info['last_call_exit_target']:
l.debug("... skipping a fake return exit that has the same target with its call exit.")
job.successor_status[state] = "Skipped"
return [ ]
if job.extra_info['skip_fakeret']:
l.debug('... skipping a fake return exit since the function it\'s calling doesn\'t return')
job.successor_status[state] = "Skipped - non-returning function 0x%x" % job.extra_info['call_target']
return [ ]
# TODO: Make it optional
if (suc_jumpkind == 'Ijk_Ret' and
self._call_depth is not None and
len(job.call_stack) <= 1
):
# We cannot continue anymore since this is the end of the function where we started tracing
l.debug('... reaching the end of the starting function, skip.')
job.successor_status[state] = "Skipped - reaching the end of the starting function"
return [ ]
# Create the new call stack of target block
new_call_stack = self._create_new_call_stack(addr, all_successor_states, job, target_addr,
suc_jumpkind)
# Create the callstack suffix
new_call_stack_suffix = new_call_stack.stack_suffix(self._context_sensitivity_level)
# Tuple that will be used to index this exit
new_tpl = self._generate_block_id(new_call_stack_suffix, target_addr, suc_jumpkind.startswith('Ijk_Sys'))
# We might have changed the mode for this basic block
# before. Make sure it is still running in 'fastpath' mode
self._reset_state_mode(new_state, 'fastpath')
pw = CFGJob(target_addr,
new_state,
self._context_sensitivity_level,
src_block_id=job.block_id,
src_exit_stmt_idx=suc_exit_stmt_idx,
src_ins_addr=suc_exit_ins_addr,
call_stack=new_call_stack,
jumpkind=suc_jumpkind,
)
# Special case: If the binary has symbols and the target address is a function, but for some reason (e.g.,
# a tail-call optimization) the CallStack's function address is still the old function address, we will have to
# overwrite it here.
if not self._is_call_jumpkind(pw.jumpkind):
target_symbol = self.project.loader.find_symbol(target_addr)
if target_symbol and target_symbol.is_function:
# Force update the function address
pw.func_addr = target_addr
# Generate new exits
if suc_jumpkind == "Ijk_Ret":
# This is the real return exit
job.successor_status[state] = "Appended"
elif suc_jumpkind == "Ijk_FakeRet":
# This is the default "fake" retn that generated at each
# call. Save them first, but don't process them right
# away
# st = self.project._simos.prepare_call_state(new_state, initial_state=saved_state)
st = new_state
self._reset_state_mode(st, 'fastpath')
pw = None # clear the job
pe = PendingJob(job.func_addr,
job.extra_info['call_target'],
st,
job.block_id,
suc_exit_stmt_idx,
suc_exit_ins_addr,
new_call_stack
)
self._pending_jobs[new_tpl] = pe
self._register_analysis_job(pe.caller_func_addr, pe)
job.successor_status[state] = "Pended"
elif self._traced_addrs[new_call_stack_suffix][target_addr] >= 1 and suc_jumpkind == "Ijk_Ret":
# This is a corner case for the f****** ARM instruction
# like
# BLEQ <address>
# If we have analyzed the boring exit before returning from that called address, we will lose the link
# between the last block of the function being called and the basic block it returns to. We cannot
# reanalyze the basic block as we are not flow-sensitive, but we can still record the connection and make
# for it afterwards.
pass
else:
job.successor_status[state] = "Appended"
if job.extra_info['is_call_jump'] and job.extra_info['call_target'] in self._non_returning_functions:
job.extra_info['skip_fakeret'] = True
if not pw:
return [ ]
if self._base_graph is not None:
# remove all existing jobs that has the same block ID
if next((en for en in self.jobs if en.block_id == pw.block_id), None):
# TODO: this is very hackish. Reimplement this logic later
self._job_info_queue = [entry for entry in self._job_info_queue if entry.job.block_id != pw.block_id]
# register the job
self._register_analysis_job(pw.func_addr, pw)
return [ pw ] | Returns a new CFGJob instance for further analysis, or None if there is no immediate state to perform the
analysis on.
:param CFGJob job: The current job. |
def getKwAsDict(self, kw):
""" return keyword configuration as a dict
Usage: rdict = getKwAsDict(kw)
"""
self.getKw(kw)
return self.str2dict(self.confstr) | return keyword configuration as a dict
Usage: rdict = getKwAsDict(kw) |
def get_order_detail(self, code):
"""
查询A股Level 2权限下提供的委托明细
:param code: 股票代码,例如:'HK.02318'
:return: (ret, data)
ret == RET_OK data为1个dict,包含以下数据
ret != RET_OK data为错误字符串
{‘code’: 股票代码
‘Ask’:[ order_num, [order_volume1, order_volume2] ]
‘Bid’: [ order_num, [order_volume1, order_volume2] ]
}
'Ask':卖盘, 'Bid'买盘。order_num指委托订单数量,order_volume是每笔委托的委托量,当前最多返回前50笔委托的委托数量。即order_num有可能多于后面的order_volume
"""
if code is None or is_str(code) is False:
error_str = ERROR_STR_PREFIX + "the type of code param is wrong"
return RET_ERROR, error_str
query_processor = self._get_sync_query_processor(
OrderDetail.pack_req, OrderDetail.unpack_rsp)
kargs = {
"code": code,
"conn_id": self.get_sync_conn_id()
}
ret_code, msg, order_detail = query_processor(**kargs)
if ret_code == RET_ERROR:
return ret_code, msg
return RET_OK, order_detail | 查询A股Level 2权限下提供的委托明细
:param code: 股票代码,例如:'HK.02318'
:return: (ret, data)
ret == RET_OK data为1个dict,包含以下数据
ret != RET_OK data为错误字符串
{‘code’: 股票代码
‘Ask’:[ order_num, [order_volume1, order_volume2] ]
‘Bid’: [ order_num, [order_volume1, order_volume2] ]
}
'Ask':卖盘, 'Bid'买盘。order_num指委托订单数量,order_volume是每笔委托的委托量,当前最多返回前50笔委托的委托数量。即order_num有可能多于后面的order_volume |
def calculate_rate(phone_number, address_country_code=None, address_exception=None):
"""
Calculates the VAT rate based on a telephone number
:param phone_number:
The string phone number, in international format with leading +
:param address_country_code:
The user's country_code, as detected from billing_address or
declared_residence. This prevents an UndefinitiveError from being
raised.
:param address_exception:
The user's exception name, as detected from billing_address or
declared_residence. This prevents an UndefinitiveError from being
raised.
:raises:
ValueError - error with phone number provided
UndefinitiveError - when no address_country_code and address_exception are provided and the phone number area code matching isn't specific enough
:return:
A tuple of (Decimal percentage rate, country code, exception name [or None])
"""
if not phone_number:
raise ValueError('No phone number provided')
if not isinstance(phone_number, str_cls):
raise ValueError('Phone number is not a string')
phone_number = phone_number.strip()
phone_number = re.sub('[^+0-9]', '', phone_number)
if not phone_number or phone_number[0] != '+':
raise ValueError('Phone number is not in international format with a leading +')
phone_number = phone_number[1:]
if not phone_number:
raise ValueError('Phone number does not appear to contain any digits')
country_code = _lookup_country_code(phone_number)
if not country_code:
raise ValueError('Phone number does not appear to be a valid international phone number')
if country_code in CALLING_CODE_EXCEPTIONS:
for info in CALLING_CODE_EXCEPTIONS[country_code]:
if not re.match(info['regex'], phone_number):
continue
mapped_country = info['country_code']
mapped_name = info['name']
if not info['definitive']:
if address_country_code is None:
raise UndefinitiveError('It is not possible to determine the users VAT rates based on the information provided')
if address_country_code != mapped_country:
continue
if address_exception != info['name']:
continue
rate = rates.BY_COUNTRY[mapped_country]['exceptions'][mapped_name]
return (rate, mapped_country, mapped_name)
if country_code not in rates.BY_COUNTRY:
return (Decimal('0.0'), country_code, None)
return (rates.BY_COUNTRY[country_code]['rate'], country_code, None) | Calculates the VAT rate based on a telephone number
:param phone_number:
The string phone number, in international format with leading +
:param address_country_code:
The user's country_code, as detected from billing_address or
declared_residence. This prevents an UndefinitiveError from being
raised.
:param address_exception:
The user's exception name, as detected from billing_address or
declared_residence. This prevents an UndefinitiveError from being
raised.
:raises:
ValueError - error with phone number provided
UndefinitiveError - when no address_country_code and address_exception are provided and the phone number area code matching isn't specific enough
:return:
A tuple of (Decimal percentage rate, country code, exception name [or None]) |
def _Rforce(self, R, z, phi=0, t=0):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force at (R,z, phi)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
radial force at (R,z, phi)
HISTORY:
2016-06-06 - Written - Aladdin
"""
if not self.isNonAxi and phi is None:
phi= 0.
r, theta, phi = bovy_coords.cyl_to_spher(R,z,phi)
#x = R
dr_dR = nu.divide(R,r); dtheta_dR = nu.divide(z,r**2); dphi_dR = 0
return self._computeforceArray(dr_dR, dtheta_dR, dphi_dR, R,z,phi) | NAME:
_Rforce
PURPOSE:
evaluate the radial force at (R,z, phi)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
radial force at (R,z, phi)
HISTORY:
2016-06-06 - Written - Aladdin |
def _set_attributes_on_managed_object(self, managed_object, attributes):
"""
Given a kmip.pie object and a dictionary of attributes, attempt to set
the attribute values on the object.
"""
for attribute_name, attribute_value in six.iteritems(attributes):
object_type = managed_object._object_type
if self._attribute_policy.is_attribute_applicable_to_object_type(
attribute_name,
object_type):
self._set_attribute_on_managed_object(
managed_object,
(attribute_name, attribute_value)
)
else:
name = object_type.name
raise exceptions.InvalidField(
"Cannot set {0} attribute on {1} object.".format(
attribute_name,
''.join([x.capitalize() for x in name.split('_')])
)
) | Given a kmip.pie object and a dictionary of attributes, attempt to set
the attribute values on the object. |
def do_capture(parser, token):
"""
Capture the contents of a tag output.
Usage:
.. code-block:: html+django
{% capture %}..{% endcapture %} # output in {{ capture }}
{% capture silent %}..{% endcapture %} # output in {{ capture }} only
{% capture as varname %}..{% endcapture %} # output in {{ varname }}
{% capture as varname silent %}..{% endcapture %} # output in {{ varname }} only
For example:
.. code-block:: html+django
{# Allow templates to override the page title/description #}
<meta name="description" content="{% capture as meta_description %}{% block meta-description %}{% endblock %}{% endcapture %}" />
<title>{% capture as meta_title %}{% block meta-title %}Untitled{% endblock %}{% endcapture %}</title>
{# copy the values to the Social Media meta tags #}
<meta property="og:description" content="{% block og-description %}{{ meta_description }}{% endblock %}" />
<meta name="twitter:title" content="{% block twitter-title %}{{ meta_title }}{% endblock %}" />
"""
bits = token.split_contents()
# tokens
t_as = 'as'
t_silent = 'silent'
var = 'capture'
silent = False
num_bits = len(bits)
if len(bits) > 4:
raise TemplateSyntaxError("'capture' node supports '[as variable] [silent]' parameters.")
elif num_bits == 4:
t_name, t_as, var, t_silent = bits
silent = True
elif num_bits == 3:
t_name, t_as, var = bits
elif num_bits == 2:
t_name, t_silent = bits
silent = True
else:
var = 'capture'
silent = False
if t_silent != 'silent' or t_as != 'as':
raise TemplateSyntaxError("'capture' node expects 'as variable' or 'silent' syntax.")
nodelist = parser.parse(('endcapture',))
parser.delete_first_token()
return CaptureNode(nodelist, var, silent) | Capture the contents of a tag output.
Usage:
.. code-block:: html+django
{% capture %}..{% endcapture %} # output in {{ capture }}
{% capture silent %}..{% endcapture %} # output in {{ capture }} only
{% capture as varname %}..{% endcapture %} # output in {{ varname }}
{% capture as varname silent %}..{% endcapture %} # output in {{ varname }} only
For example:
.. code-block:: html+django
{# Allow templates to override the page title/description #}
<meta name="description" content="{% capture as meta_description %}{% block meta-description %}{% endblock %}{% endcapture %}" />
<title>{% capture as meta_title %}{% block meta-title %}Untitled{% endblock %}{% endcapture %}</title>
{# copy the values to the Social Media meta tags #}
<meta property="og:description" content="{% block og-description %}{{ meta_description }}{% endblock %}" />
<meta name="twitter:title" content="{% block twitter-title %}{{ meta_title }}{% endblock %}" /> |
def parameters_to_segments(origins, vectors, parameters):
"""
Convert a parametric line segment representation to
a two point line segment representation
Parameters
------------
origins : (n, 3) float
Line origin point
vectors : (n, 3) float
Unit line directions
parameters : (n, 2) float
Start and end distance pairs for each line
Returns
--------------
segments : (n, 2, 3) float
Line segments defined by start and end points
"""
# don't copy input
origins = np.asanyarray(origins, dtype=np.float64)
vectors = np.asanyarray(vectors, dtype=np.float64)
parameters = np.asanyarray(parameters, dtype=np.float64)
# turn the segments into a reshapable 2D array
segments = np.hstack((origins + vectors * parameters[:, :1],
origins + vectors * parameters[:, 1:]))
return segments.reshape((-1, 2, origins.shape[1])) | Convert a parametric line segment representation to
a two point line segment representation
Parameters
------------
origins : (n, 3) float
Line origin point
vectors : (n, 3) float
Unit line directions
parameters : (n, 2) float
Start and end distance pairs for each line
Returns
--------------
segments : (n, 2, 3) float
Line segments defined by start and end points |
def _remote_connection(server, opts, argparser_):
"""Initiate a remote connection, via PyWBEM. Arguments for
the request are part of the command line arguments and include
user name, password, namespace, etc.
"""
global CONN # pylint: disable=global-statement
if opts.timeout is not None:
if opts.timeout < 0 or opts.timeout > 300:
argparser_.error('timeout option(%s) out of range' % opts.timeout)
# mock only uses the namespace timeout and statistics options from the
# original set of options. It ignores the url
if opts.mock_server:
CONN = FakedWBEMConnection(
default_namespace=opts.namespace,
timeout=opts.timeout,
stats_enabled=opts.statistics)
try:
build_mock_repository(CONN, opts.mock_server, opts.verbose)
except ValueError as ve:
argparser_.error('Build Repository failed: %s' % ve)
return CONN
if server[0] == '/':
url = server
elif re.match(r"^https{0,1}://", server) is not None:
url = server
elif re.match(r"^[a-zA-Z0-9]+://", server) is not None:
argparser_.error('Invalid scheme on server argument.'
' Use "http" or "https"')
else:
url = '%s://%s' % ('https', server)
creds = None
if opts.key_file is not None and opts.cert_file is None:
argparser_.error('keyfile option requires certfile option')
if opts.user is not None and opts.password is None:
opts.password = _getpass.getpass('Enter password for %s: '
% opts.user)
if opts.user is not None or opts.password is not None:
creds = (opts.user, opts.password)
# if client cert and key provided, create dictionary for
# wbem connection
x509_dict = None
if opts.cert_file is not None:
x509_dict = {"cert_file": opts.cert_file}
if opts.key_file is not None:
x509_dict.update({'key_file': opts.key_file})
CONN = WBEMConnection(url, creds, default_namespace=opts.namespace,
no_verification=opts.no_verify_cert,
x509=x509_dict, ca_certs=opts.ca_certs,
timeout=opts.timeout,
stats_enabled=opts.statistics)
CONN.debug = True
return CONN | Initiate a remote connection, via PyWBEM. Arguments for
the request are part of the command line arguments and include
user name, password, namespace, etc. |
def load_vocab(self, vocab_name, **kwargs):
""" loads a vocabulary into the defintion triplestore
args:
vocab_name: the prefix, uri or filename of a vocabulary
"""
log.setLevel(kwargs.get("log_level", self.log_level))
vocab = self.get_vocab(vocab_name , **kwargs)
if vocab['filename'] in self.loaded:
if self.loaded_times.get(vocab['filename'],
datetime.datetime(2001,1,1)).timestamp() \
< vocab['modified']:
self.drop_file(vocab['filename'], **kwargs)
else:
return
conn = kwargs.get("conn", self.conn)
conn.load_data(graph=getattr(__NSM__.kdr, vocab['filename']).clean_uri,
data=vocab['data'],
datatype=vocab['filename'].split(".")[-1],
log_level=logging.WARNING)
self.__update_time__(vocab['filename'], **kwargs)
log.warning("\n\tvocab: '%s' loaded \n\tconn: '%s'",
vocab['filename'],
conn)
self.loaded.append(vocab['filename']) | loads a vocabulary into the defintion triplestore
args:
vocab_name: the prefix, uri or filename of a vocabulary |
def average_precision(truth, recommend):
"""Average Precision (AP).
Args:
truth (numpy 1d array): Set of truth samples.
recommend (numpy 1d array): Ordered set of recommended samples.
Returns:
float: AP.
"""
if len(truth) == 0:
if len(recommend) == 0:
return 1.
return 0.
tp = accum = 0.
for n in range(recommend.size):
if recommend[n] in truth:
tp += 1.
accum += (tp / (n + 1.))
return accum / truth.size | Average Precision (AP).
Args:
truth (numpy 1d array): Set of truth samples.
recommend (numpy 1d array): Ordered set of recommended samples.
Returns:
float: AP. |
def CaptureFrameLocals(self, frame):
"""Captures local variables and arguments of the specified frame.
Args:
frame: frame to capture locals and arguments.
Returns:
(arguments, locals) tuple.
"""
# Capture all local variables (including method arguments).
variables = {n: self.CaptureNamedVariable(n, v, 1,
self.default_capture_limits)
for n, v in six.viewitems(frame.f_locals)}
# Split between locals and arguments (keeping arguments in the right order).
nargs = frame.f_code.co_argcount
if frame.f_code.co_flags & inspect.CO_VARARGS: nargs += 1
if frame.f_code.co_flags & inspect.CO_VARKEYWORDS: nargs += 1
frame_arguments = []
for argname in frame.f_code.co_varnames[:nargs]:
if argname in variables: frame_arguments.append(variables.pop(argname))
return (frame_arguments, list(six.viewvalues(variables))) | Captures local variables and arguments of the specified frame.
Args:
frame: frame to capture locals and arguments.
Returns:
(arguments, locals) tuple. |
def __json_strnum_to_bignum(json_object):
"""
Converts json string numerals to native python bignums.
"""
for key in ('id', 'week', 'in_reply_to_id', 'in_reply_to_account_id', 'logins', 'registrations', 'statuses'):
if (key in json_object and isinstance(json_object[key], six.text_type)):
try:
json_object[key] = int(json_object[key])
except ValueError:
pass
return json_object | Converts json string numerals to native python bignums. |
def example_lab_to_ipt():
"""
This function shows a simple conversion of an XYZ color to an IPT color.
"""
print("=== Simple Example: XYZ->IPT ===")
# Instantiate an XYZ color object with the given values.
xyz = XYZColor(0.5, 0.5, 0.5, illuminant='d65')
# Show a string representation.
print(xyz)
# Convert to IPT.
ipt = convert_color(xyz, IPTColor)
print(ipt)
print("=== End Example ===\n") | This function shows a simple conversion of an XYZ color to an IPT color. |
def render_tile(cells, ti, tj, render, params, metadata, layout, summary):
"""
Render each cell in the tile and stitch it into a single image
"""
image_size = params["cell_size"] * params["n_tile"]
tile = Image.new("RGB", (image_size, image_size), (255,255,255))
keys = cells.keys()
for i,key in enumerate(keys):
print("cell", i+1, "/", len(keys), end='\r')
cell_image = render(cells[key], params, metadata, layout, summary)
# stitch this rendering into the tile image
ci = key[0] % params["n_tile"]
cj = key[1] % params["n_tile"]
xmin = ci*params["cell_size"]
ymin = cj*params["cell_size"]
xmax = (ci+1)*params["cell_size"]
ymax = (cj+1)*params["cell_size"]
if params.get("scale_density", False):
density = len(cells[key]["gi"])
# scale = density/summary["max_density"]
scale = math.log(density)/(math.log(summary["max_density"]) or 1)
owidth = xmax - xmin
width = int(round(owidth * scale))
if(width < 1):
width = 1
offsetL = int(round((owidth - width)/2))
offsetR = owidth - width - offsetL # handle odd numbers
# print("\n")
# print("width", width, offsetL, offsetR)
box = [xmin + offsetL, ymin + offsetL, xmax - offsetR, ymax - offsetR]
resample = params.get("scale_type", Image.NEAREST)
cell_image = cell_image.resize(size=(width,width), resample=resample)
# print(cell_image)
else:
box = [xmin, ymin, xmax, ymax]
# print("box", box)
tile.paste(cell_image, box)
print("\n")
return tile | Render each cell in the tile and stitch it into a single image |
def get_bins_by_query(self, bin_query):
"""Gets a list of ``Bins`` matching the given bin query.
arg: bin_query (osid.resource.BinQuery): the bin query
return: (osid.resource.BinList) - the returned ``BinList``
raise: NullArgument - ``bin_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - a ``bin_query`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinQuerySession.get_bins_by_query_template
if self._catalog_session is not None:
return self._catalog_session.get_catalogs_by_query(bin_query)
query_terms = dict(bin_query._query_terms)
collection = JSONClientValidated('resource',
collection='Bin',
runtime=self._runtime)
result = collection.find(query_terms).sort('_id', DESCENDING)
return objects.BinList(result, runtime=self._runtime) | Gets a list of ``Bins`` matching the given bin query.
arg: bin_query (osid.resource.BinQuery): the bin query
return: (osid.resource.BinList) - the returned ``BinList``
raise: NullArgument - ``bin_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - a ``bin_query`` is not of this service
*compliance: mandatory -- This method must be implemented.* |
def flattennd(d, levels=0, key_as_tuple=True, delim='.',
list_of_dicts=None):
""" get nested dict as {key:dict,...},
where key is tuple/string of all-n levels of nested keys
Parameters
----------
d : dict
levels : int
the number of levels to leave unflattened
key_as_tuple : bool
whether keys are list of nested keys or delimited string of nested keys
delim : str
if key_as_tuple=False, delimiter for keys
list_of_dicts: str or None
if not None, flatten lists of dicts using this prefix
Examples
--------
>>> from pprint import pprint
>>> d = {1:{2:{3:{'b':'B','c':'C'},4:'D'}}}
>>> pprint(flattennd(d,0))
{(1, 2, 3, 'b'): 'B', (1, 2, 3, 'c'): 'C', (1, 2, 4): 'D'}
>>> pprint(flattennd(d,1))
{(1, 2): {4: 'D'}, (1, 2, 3): {'b': 'B', 'c': 'C'}}
>>> pprint(flattennd(d,2))
{(1,): {2: {4: 'D'}}, (1, 2): {3: {'b': 'B', 'c': 'C'}}}
>>> pprint(flattennd(d,3))
{(): {1: {2: {4: 'D'}}}, (1,): {2: {3: {'b': 'B', 'c': 'C'}}}}
>>> pprint(flattennd(d,4))
{(): {1: {2: {3: {'b': 'B', 'c': 'C'}, 4: 'D'}}}}
>>> pprint(flattennd(d,5))
{(): {1: {2: {3: {'b': 'B', 'c': 'C'}, 4: 'D'}}}}
>>> pprint(flattennd(d,1,key_as_tuple=False,delim='.'))
{'1.2': {4: 'D'}, '1.2.3': {'b': 'B', 'c': 'C'}}
>>> test_dict = {"a":[{"b":[{"c":1, "d": 2}, {"e":3, "f": 4}]}, {"b":[{"c":5, "d": 6}, {"e":7, "f": 8}]}]}
>>> pprint(flattennd(test_dict, list_of_dicts="__list__", levels=2))
{('a', '__list__0', 'b'): [{'c': 1, 'd': 2}, {'e': 3, 'f': 4}],
('a', '__list__1', 'b'): [{'c': 5, 'd': 6}, {'e': 7, 'f': 8}]}
>>> pprint(flattennd(test_dict, list_of_dicts="__list__", levels=3))
{('a', '__list__0'): {'b': [{'c': 1, 'd': 2}, {'e': 3, 'f': 4}]},
('a', '__list__1'): {'b': [{'c': 5, 'd': 6}, {'e': 7, 'f': 8}]}}
""" # noqa: E501
if levels < 0:
raise ValueError('unflattened levels must be greater than 0')
new_d = {}
flattened = flatten(d, True, delim, list_of_dicts=list_of_dicts)
if levels == 0:
return flattened
for key, value in flattened.items():
if key_as_tuple:
new_key = key[: - (levels)]
else:
new_key = delim.join([str(k) for k in key[:-(levels)]])
new_levels = key[-(levels):]
# val_dict = {new_levels: value}
# val_dict = unflatten(val_dict, True, delim)
if new_key not in new_d:
new_d[new_key] = {new_levels: value}
else:
if new_levels in new_d[new_key]:
raise ValueError(
"key clash for: {0}; {1}".format(new_key, new_levels))
new_d[new_key][new_levels] = value
for nkey, nvalue in new_d.items():
new_d[nkey] = unflatten(
nvalue, list_of_dicts=list_of_dicts, deepcopy=False)
return new_d | get nested dict as {key:dict,...},
where key is tuple/string of all-n levels of nested keys
Parameters
----------
d : dict
levels : int
the number of levels to leave unflattened
key_as_tuple : bool
whether keys are list of nested keys or delimited string of nested keys
delim : str
if key_as_tuple=False, delimiter for keys
list_of_dicts: str or None
if not None, flatten lists of dicts using this prefix
Examples
--------
>>> from pprint import pprint
>>> d = {1:{2:{3:{'b':'B','c':'C'},4:'D'}}}
>>> pprint(flattennd(d,0))
{(1, 2, 3, 'b'): 'B', (1, 2, 3, 'c'): 'C', (1, 2, 4): 'D'}
>>> pprint(flattennd(d,1))
{(1, 2): {4: 'D'}, (1, 2, 3): {'b': 'B', 'c': 'C'}}
>>> pprint(flattennd(d,2))
{(1,): {2: {4: 'D'}}, (1, 2): {3: {'b': 'B', 'c': 'C'}}}
>>> pprint(flattennd(d,3))
{(): {1: {2: {4: 'D'}}}, (1,): {2: {3: {'b': 'B', 'c': 'C'}}}}
>>> pprint(flattennd(d,4))
{(): {1: {2: {3: {'b': 'B', 'c': 'C'}, 4: 'D'}}}}
>>> pprint(flattennd(d,5))
{(): {1: {2: {3: {'b': 'B', 'c': 'C'}, 4: 'D'}}}}
>>> pprint(flattennd(d,1,key_as_tuple=False,delim='.'))
{'1.2': {4: 'D'}, '1.2.3': {'b': 'B', 'c': 'C'}}
>>> test_dict = {"a":[{"b":[{"c":1, "d": 2}, {"e":3, "f": 4}]}, {"b":[{"c":5, "d": 6}, {"e":7, "f": 8}]}]}
>>> pprint(flattennd(test_dict, list_of_dicts="__list__", levels=2))
{('a', '__list__0', 'b'): [{'c': 1, 'd': 2}, {'e': 3, 'f': 4}],
('a', '__list__1', 'b'): [{'c': 5, 'd': 6}, {'e': 7, 'f': 8}]}
>>> pprint(flattennd(test_dict, list_of_dicts="__list__", levels=3))
{('a', '__list__0'): {'b': [{'c': 1, 'd': 2}, {'e': 3, 'f': 4}]},
('a', '__list__1'): {'b': [{'c': 5, 'd': 6}, {'e': 7, 'f': 8}]}} |
def display_notes(self):
"""Display information about scores and raters.
"""
if self.annot is not None:
short_xml_file = short_strings(basename(self.annot.xml_file))
self.idx_annotations.setText(short_xml_file)
# if annotations were loaded without dataset
if self.parent.overview.scene is None:
self.parent.overview.update()
if not self.annot.raters:
self.new_rater()
self.idx_rater.setText(self.annot.current_rater)
self.display_eventtype()
self.update_annotations()
self.display_stats()
self.epoch_length = self.annot.epoch_length | Display information about scores and raters. |
def set_primary_key_auto(self, table):
"""
Analysis a table and set a primary key.
Determine primary key by identifying a column with unique values
or creating a new column.
:param table: Table to alter
:return: Primary Key column
"""
# Confirm no primary key exists
pk = self.get_primary_key(table)
if not pk:
# Determine if there is a unique column that can become the PK
unique_col = self.get_unique_column(table)
# Set primary key
if unique_col:
self.set_primary_key(table, unique_col)
# Create unique 'ID' column
else:
unique_col = self.add_column(table, primary_key=True)
return unique_col
else:
return pk | Analysis a table and set a primary key.
Determine primary key by identifying a column with unique values
or creating a new column.
:param table: Table to alter
:return: Primary Key column |
def copy_folder_content(src, dst):
"""
Copy all content in src directory to dst directory.
The src and dst must exist.
"""
for file in os.listdir(src):
file_path = os.path.join(src, file)
dst_file_path = os.path.join(dst, file)
if os.path.isdir(file_path):
shutil.copytree(file_path, dst_file_path)
else:
shutil.copyfile(file_path, dst_file_path) | Copy all content in src directory to dst directory.
The src and dst must exist. |
def cdsthreads(self):
"""
Determines which core genes from a pre-calculated database are present in each strain
"""
# Create and start threads
for i in range(self.cpus):
# Send the threads to the appropriate destination function
threads = Thread(target=self.cds, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for sample in self.metadata.samples:
#
sample[self.analysistype].corepresence = dict()
self.cdsqueue.put(sample)
self.cdsqueue.join() | Determines which core genes from a pre-calculated database are present in each strain |
def get_email_context(self, activation_key):
"""
Build the template context used for the activation email.
"""
scheme = 'https' if self.request.is_secure() else 'http'
return {
'scheme': scheme,
'activation_key': activation_key,
'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS,
'site': get_current_site(self.request)
} | Build the template context used for the activation email. |
def get_middle_point(self):
"""
Return the middle point of the mesh.
:returns:
An instance of :class:`~openquake.hazardlib.geo.point.Point`.
The middle point is taken from the middle row and a middle column
of the mesh if there are odd number of both. Otherwise the geometric
mean point of two or four middle points.
"""
num_rows, num_cols = self.lons.shape
mid_row = num_rows // 2
depth = 0
if num_rows & 1 == 1:
# there are odd number of rows
mid_col = num_cols // 2
if num_cols & 1 == 1:
# odd number of columns, we can easily take
# the middle point
depth = self.depths[mid_row, mid_col]
return Point(self.lons[mid_row, mid_col],
self.lats[mid_row, mid_col], depth)
else:
# even number of columns, need to take two middle
# points on the middle row
lon1, lon2 = self.lons[mid_row, mid_col - 1: mid_col + 1]
lat1, lat2 = self.lats[mid_row, mid_col - 1: mid_col + 1]
depth1 = self.depths[mid_row, mid_col - 1]
depth2 = self.depths[mid_row, mid_col]
else:
# there are even number of rows. take the row just above
# and the one just below the middle and find middle point
# of each
submesh1 = self[mid_row - 1: mid_row]
submesh2 = self[mid_row: mid_row + 1]
p1, p2 = submesh1.get_middle_point(), submesh2.get_middle_point()
lon1, lat1, depth1 = p1.longitude, p1.latitude, p1.depth
lon2, lat2, depth2 = p2.longitude, p2.latitude, p2.depth
# we need to find the middle between two points
depth = (depth1 + depth2) / 2.0
lon, lat = geo_utils.get_middle_point(lon1, lat1, lon2, lat2)
return Point(lon, lat, depth) | Return the middle point of the mesh.
:returns:
An instance of :class:`~openquake.hazardlib.geo.point.Point`.
The middle point is taken from the middle row and a middle column
of the mesh if there are odd number of both. Otherwise the geometric
mean point of two or four middle points. |
def get_suppressions(relative_filepaths, root, messages):
"""
Given every message which was emitted by the tools, and the
list of files to inspect, create a list of files to ignore,
and a map of filepath -> line-number -> codes to ignore
"""
paths_to_ignore = set()
lines_to_ignore = defaultdict(set)
messages_to_ignore = defaultdict(lambda: defaultdict(set))
# first deal with 'noqa' style messages
for filepath in relative_filepaths:
abspath = os.path.join(root, filepath)
try:
file_contents = encoding.read_py_file(abspath).split('\n')
except encoding.CouldNotHandleEncoding as err:
# TODO: this output will break output formats such as JSON
warnings.warn('{0}: {1}'.format(err.path, err.cause), ImportWarning)
continue
ignore_file, ignore_lines = get_noqa_suppressions(file_contents)
if ignore_file:
paths_to_ignore.add(filepath)
lines_to_ignore[filepath] |= ignore_lines
# now figure out which messages were suppressed by pylint
pylint_ignore_files, pylint_ignore_messages = _parse_pylint_informational(messages)
paths_to_ignore |= pylint_ignore_files
for filepath, line in pylint_ignore_messages.items():
for line_number, codes in line.items():
for code in codes:
messages_to_ignore[filepath][line_number].add(('pylint', code))
if code in _PYLINT_EQUIVALENTS:
for equivalent in _PYLINT_EQUIVALENTS[code]:
messages_to_ignore[filepath][line_number].add(equivalent)
return paths_to_ignore, lines_to_ignore, messages_to_ignore | Given every message which was emitted by the tools, and the
list of files to inspect, create a list of files to ignore,
and a map of filepath -> line-number -> codes to ignore |
def osm_net_download(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
network_type='walk', timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Download OSM ways and nodes within a bounding box from the Overpass API.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : string
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian
pathways and 'drive' includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
response_json : dict
"""
# create a filter to exclude certain kinds of ways based on the requested
# network_type
if custom_osm_filter is None:
request_filter = osm_filter(network_type)
else:
request_filter = custom_osm_filter
response_jsons_list = []
response_jsons = []
# server memory allocation in bytes formatted for Overpass API query
if memory is None:
maxsize = ''
else:
maxsize = '[maxsize:{}]'.format(memory)
# define the Overpass API query
# way["highway"] denotes ways with highway keys and {filters} returns
# ways with the requested key/value. the '>' makes it recurse so we get
# ways and way nodes. maxsize is in bytes.
# turn bbox into a polygon and project to local UTM
polygon = Polygon([(lng_max, lat_min), (lng_min, lat_min),
(lng_min, lat_max), (lng_max, lat_max)])
geometry_proj, crs_proj = project_geometry(polygon,
crs={'init': 'epsg:4326'})
# subdivide the bbox area poly if it exceeds the max area size
# (in meters), then project back to WGS84
geometry_proj_consolidated_subdivided = consolidate_subdivide_geometry(
geometry_proj, max_query_area_size=max_query_area_size)
geometry, crs = project_geometry(geometry_proj_consolidated_subdivided,
crs=crs_proj, to_latlong=True)
log('Requesting network data within bounding box from Overpass API '
'in {:,} request(s)'.format(len(geometry)))
start_time = time.time()
# loop through each polygon in the geometry
for poly in geometry:
# represent bbox as lng_max, lat_min, lng_min, lat_max and round
# lat-longs to 8 decimal places to create
# consistent URL strings
lng_max, lat_min, lng_min, lat_max = poly.bounds
query_template = '[out:json][timeout:{timeout}]{maxsize};' \
'(way["highway"]' \
'{filters}({lat_min:.8f},{lng_max:.8f},' \
'{lat_max:.8f},{lng_min:.8f});>;);out;'
query_str = query_template.format(lat_max=lat_max, lat_min=lat_min,
lng_min=lng_min, lng_max=lng_max,
filters=request_filter,
timeout=timeout, maxsize=maxsize)
response_json = overpass_request(data={'data': query_str},
timeout=timeout)
response_jsons_list.append(response_json)
log('Downloaded OSM network data within bounding box from Overpass '
'API in {:,} request(s) and'
' {:,.2f} seconds'.format(len(geometry), time.time()-start_time))
# stitch together individual json results
for json in response_jsons_list:
try:
response_jsons.extend(json['elements'])
except KeyError:
pass
# remove duplicate records resulting from the json stitching
start_time = time.time()
record_count = len(response_jsons)
if record_count == 0:
raise Exception('Query resulted in no data. Check your query '
'parameters: {}'.format(query_str))
else:
response_jsons_df = pd.DataFrame.from_records(response_jsons,
index='id')
nodes = response_jsons_df[response_jsons_df['type'] == 'node']
nodes = nodes[~nodes.index.duplicated(keep='first')]
ways = response_jsons_df[response_jsons_df['type'] == 'way']
ways = ways[~ways.index.duplicated(keep='first')]
response_jsons_df = pd.concat([nodes, ways], axis=0)
response_jsons_df.reset_index(inplace=True)
response_jsons = response_jsons_df.to_dict(orient='records')
if record_count - len(response_jsons) > 0:
log('{:,} duplicate records removed. Took {:,.2f} seconds'.format(
record_count - len(response_jsons), time.time() - start_time))
return {'elements': response_jsons} | Download OSM ways and nodes within a bounding box from the Overpass API.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : string
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian
pathways and 'drive' includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
response_json : dict |
def compute_before_after(self):
"""Compute the list of lines before and after the proposed docstring changes.
:return: tuple of before,after where each is a list of lines of python code.
"""
if not self.parsed:
self._parse()
list_from = self.input_lines
list_to = []
last = 0
for e in self.docs_list:
start, end = e['location']
if start <= 0:
start, end = -start, -end
list_to.extend(list_from[last:start + 1])
else:
list_to.extend(list_from[last:start])
docs = e['docs'].get_raw_docs()
list_docs = [l + '\n' for l in docs.splitlines()]
list_to.extend(list_docs)
last = end + 1
if last < len(list_from):
list_to.extend(list_from[last:])
return list_from, list_to | Compute the list of lines before and after the proposed docstring changes.
:return: tuple of before,after where each is a list of lines of python code. |
def _get_node_column(cls, node, column_name):
"""Given a ParsedNode, add some fields that might be missing. Return a
reference to the dict that refers to the given column, creating it if
it doesn't yet exist.
"""
if not hasattr(node, 'columns'):
node.set('columns', {})
if column_name in node.columns:
column = node.columns[column_name]
else:
column = {'name': column_name, 'description': ''}
node.columns[column_name] = column
return column | Given a ParsedNode, add some fields that might be missing. Return a
reference to the dict that refers to the given column, creating it if
it doesn't yet exist. |
def get_active_project_path(self):
"""Get path of the active project"""
active_project_path = None
if self.current_active_project:
active_project_path = self.current_active_project.root_path
return active_project_path | Get path of the active project |
def child_allocation(self):
""" The sum of all child asset classes' allocations """
sum = Decimal(0)
if self.classes:
for child in self.classes:
sum += child.child_allocation
else:
# This is not a branch but a leaf. Return own allocation.
sum = self.allocation
return sum | The sum of all child asset classes' allocations |
def get_index_by_alias(self, alias):
"""Get index name for given alias.
If there is no alias assume it's an index.
:param alias: alias name
"""
try:
info = self.es.indices.get_alias(name=alias)
return next(iter(info.keys()))
except elasticsearch.exceptions.NotFoundError:
return alias | Get index name for given alias.
If there is no alias assume it's an index.
:param alias: alias name |
def strings(self, otherchar=None):
'''
Each time next() is called on this iterator, a new string is returned
which will the present lego piece can match. StopIteration is raised once
all such strings have been returned, although a regex with a * in may
match infinitely many strings.
'''
# In the case of a regex like "[^abc]", there are infinitely many (well, a
# very large finite number of) single characters which will match. It's not
# productive to iterate over all of these giving every single example.
# You must supply your own "otherchar" to stand in for all of these
# possibilities.
for string in self.to_fsm().strings():
# Have to represent `fsm.anything_else` somehow.
if fsm.anything_else in string:
if otherchar == None:
raise Exception("Please choose an 'otherchar'")
string = [
otherchar if char == fsm.anything_else else char
for char in string
]
yield "".join(string) | Each time next() is called on this iterator, a new string is returned
which will the present lego piece can match. StopIteration is raised once
all such strings have been returned, although a regex with a * in may
match infinitely many strings. |
def get_element_name(parent, ns):
# type: (_Element, str) -> str
"""Get element short name."""
name = parent.find('./' + ns + 'SHORT-NAME')
if name is not None and name.text is not None:
return name.text
return "" | Get element short name. |
def get_config_groups(self, groups_conf, groups_pillar_name):
'''
get info from groups in config, and from the named pillar
todo: add specification for the minion to use to recover pillar
'''
# Get groups
# Default to returning something that'll never match
ret_groups = {
'default': {
'users': set(),
'commands': set(),
'aliases': {},
'default_target': {},
'targets': {}
}
}
# allow for empty groups in the config file, and instead let some/all of this come
# from pillar data.
if not groups_conf:
use_groups = {}
else:
use_groups = groups_conf
# First obtain group lists from pillars, then in case there is any overlap, iterate over the groups
# that come from pillars. The configuration in files on disk/from startup
# will override any configs from pillars. They are meant to be complementary not to provide overrides.
log.debug('use_groups %s', use_groups)
try:
groups_gen = itertools.chain(self._groups_from_pillar(groups_pillar_name).items(), use_groups.items())
except AttributeError:
log.warning('Failed to get groups from %s: %s or from config: %s',
groups_pillar_name,
self._groups_from_pillar(groups_pillar_name),
use_groups
)
groups_gen = []
for name, config in groups_gen:
log.info('Trying to get %s and %s to be useful', name, config)
ret_groups.setdefault(name, {
'users': set(), 'commands': set(), 'aliases': {},
'default_target': {}, 'targets': {}
})
try:
ret_groups[name]['users'].update(set(config.get('users', [])))
ret_groups[name]['commands'].update(set(config.get('commands', [])))
ret_groups[name]['aliases'].update(config.get('aliases', {}))
ret_groups[name]['default_target'].update(config.get('default_target', {}))
ret_groups[name]['targets'].update(config.get('targets', {}))
except (IndexError, AttributeError):
log.warning("Couldn't use group %s. Check that targets is a dictionary and not a list", name)
log.debug('Got the groups: %s', ret_groups)
return ret_groups | get info from groups in config, and from the named pillar
todo: add specification for the minion to use to recover pillar |
def _clean(self, rmConnetions=True, lockNonExternal=True):
"""
Remove all signals from this interface (used after unit is synthesized
and its parent is connecting its interface to this unit)
"""
if self._interfaces:
for i in self._interfaces:
i._clean(rmConnetions=rmConnetions,
lockNonExternal=lockNonExternal)
else:
self._sigInside = self._sig
del self._sig
if lockNonExternal and not self._isExtern:
self._isAccessible = False | Remove all signals from this interface (used after unit is synthesized
and its parent is connecting its interface to this unit) |
def getMetricDetails(self, metricLabel):
"""
Gets detailed info about a given metric, in addition to its value. This
may including any statistics or auxilary data that are computed for a given
metric.
:param metricLabel: (string) label of the given metric (see
:class:`~nupic.frameworks.opf.metrics.MetricSpec`)
:returns: (dict) of metric information, as returned by
:meth:`nupic.frameworks.opf.metrics.MetricsIface.getMetric`.
"""
try:
metricIndex = self.__metricLabels.index(metricLabel)
except IndexError:
return None
return self.__metrics[metricIndex].getMetric() | Gets detailed info about a given metric, in addition to its value. This
may including any statistics or auxilary data that are computed for a given
metric.
:param metricLabel: (string) label of the given metric (see
:class:`~nupic.frameworks.opf.metrics.MetricSpec`)
:returns: (dict) of metric information, as returned by
:meth:`nupic.frameworks.opf.metrics.MetricsIface.getMetric`. |
def _parse_group(self, group_name, group):
"""
Parse a group definition from a dynamic inventory. These are top-level
elements which are not '_meta(data)'.
"""
if type(group) == dict:
# Example:
# {
# "mgmt": {
# "hosts": [ "mgmt01", "mgmt02" ],
# "vars": {
# "eth0": {
# "onboot": "yes",
# "nm_controlled": "no"
# }
# }
# }
# }
#
hostnames_in_group = set()
# Group member with hosts and variable definitions.
for hostname in group.get('hosts', []):
self._get_host(hostname)['groups'].add(group_name)
hostnames_in_group.add(hostname)
# Apply variables to all hosts in group
for var_key, var_val in group.get('vars', {}).items():
for hostname in hostnames_in_group:
self._get_host(hostname)['hostvars'][var_key] = var_val
elif type(group) == list:
# List of hostnames for this group
for hostname in group:
self._get_host(hostname)['groups'].add(group_name)
else:
self.log.warning("Invalid element found in dynamic inventory output: {0}".format(type(group))) | Parse a group definition from a dynamic inventory. These are top-level
elements which are not '_meta(data)'. |
def frets_to_NoteContainer(self, fingering):
"""Convert a list such as returned by find_fret to a NoteContainer."""
res = []
for (string, fret) in enumerate(fingering):
if fret is not None:
res.append(self.get_Note(string, fret))
return NoteContainer(res) | Convert a list such as returned by find_fret to a NoteContainer. |
def _assemble_active_form(self, stmt):
"""Example: p(HGNC:ELK1, pmod(Ph)) => act(p(HGNC:ELK1), ma(tscript))"""
act_agent = Agent(stmt.agent.name, db_refs=stmt.agent.db_refs)
act_agent.activity = ActivityCondition(stmt.activity, True)
activates = stmt.is_active
relation = get_causal_edge(stmt, activates)
self._add_nodes_edges(stmt.agent, act_agent, relation, stmt.evidence) | Example: p(HGNC:ELK1, pmod(Ph)) => act(p(HGNC:ELK1), ma(tscript)) |
def connectQ2Q(self, fromAddress, toAddress, protocolName, protocolFactory,
usePrivateCertificate=None, fakeFromDomain=None,
chooser=None):
"""
Connect a named protocol factory from a resource@domain to a
resource@domain.
This is analagous to something like connectTCP, in that it creates a
connection-oriented transport for each connection, except instead of
specifying your credentials with an application-level (username,
password) and your endpoint with a framework-level (host, port), you
specify both at once, in the form of your ID (user@my-domain), their ID
(user@their-domain) and the desired protocol. This provides several
useful features:
- All connections are automatically authenticated via SSL
certificates, although not authorized for any particular
activities, based on their transport interface rather than having
to have protocol logic to authenticate.
- User-meaningful protocol nicknames are attached to
implementations of protocol logic, rather than arbitrary
numbering.
- Endpoints can specify a variety of transport mechanisms
transparently to the application: for example, you might be
connecting to an authorized user-agent on the user's server or to
the user directly using a NAT-circumvention handshake. All the
application has to know is that it wants to establish a TCP-like
connection.
XXX Really, really should return an IConnector implementor for symmetry
with other connection-oriented transport APIs, but currently does not.
The 'resource' parameters are so named (rather than beginning with
'user', for example) because they are sometimes used to refer to
abstract entities or roles, such as 'payments', or groups of users
(communities) but generally the convention is to document them as
individual users for simplicity's sake.
The parameters are described as if Alice <[email protected]> were trying
try connect to Bob <[email protected]> to transfer a file over HTTP.
@param fromAddress: The address of the connecting user: in this case,
Q2QAddress("divmod.com", "alice")
@param toAddress: The address of the user connected to: in this case,
Q2QAddress("notdivmod.com", "bob")
@param protocolName: The name of the protocol, by convention observing
similar names to http://www.iana.org/assignments/port-numbers when
appropriate. In this case, 'http'.
@param protocolFactory: An implementation of
L{twisted.internet.interfaces.IProtocolFactory}
@param usePrivateCertificate: Use a different private certificate for
initiating the 'secure' call. Mostly for testing different invalid
certificate attacks.
@param fakeFromDomain: This domain name will be used for an argument to
the 'connect' command, but NOT as an argument to the SECURE command.
This is to test a particular kind of invalid cert attack.
@param chooser: a function taking a list of connection-describing
objects and returning another list. Those items in the remaining list
will be attempted as connections and buildProtocol called on the client
factory. May return a Deferred.
@default chooser: C{lambda x: x and [x[0]]}
@return:
"""
if chooser is None:
chooser = lambda x: x and [x[0]]
def onSecureConnection(protocol):
if fakeFromDomain:
connectFromAddress = Q2QAddress(
fakeFromDomain,
toAddress.resource
)
else:
connectFromAddress = fromAddress
return protocol.connect(connectFromAddress, toAddress,
protocolName, protocolFactory,
chooser)
def onSecureConnectionFailure(reason):
protocolFactory.clientConnectionFailed(None, reason)
return reason
return self.getSecureConnection(
fromAddress, toAddress,
port, usePrivateCertificate).addCallback(
onSecureConnection).addErrback(onSecureConnectionFailure) | Connect a named protocol factory from a resource@domain to a
resource@domain.
This is analagous to something like connectTCP, in that it creates a
connection-oriented transport for each connection, except instead of
specifying your credentials with an application-level (username,
password) and your endpoint with a framework-level (host, port), you
specify both at once, in the form of your ID (user@my-domain), their ID
(user@their-domain) and the desired protocol. This provides several
useful features:
- All connections are automatically authenticated via SSL
certificates, although not authorized for any particular
activities, based on their transport interface rather than having
to have protocol logic to authenticate.
- User-meaningful protocol nicknames are attached to
implementations of protocol logic, rather than arbitrary
numbering.
- Endpoints can specify a variety of transport mechanisms
transparently to the application: for example, you might be
connecting to an authorized user-agent on the user's server or to
the user directly using a NAT-circumvention handshake. All the
application has to know is that it wants to establish a TCP-like
connection.
XXX Really, really should return an IConnector implementor for symmetry
with other connection-oriented transport APIs, but currently does not.
The 'resource' parameters are so named (rather than beginning with
'user', for example) because they are sometimes used to refer to
abstract entities or roles, such as 'payments', or groups of users
(communities) but generally the convention is to document them as
individual users for simplicity's sake.
The parameters are described as if Alice <[email protected]> were trying
try connect to Bob <[email protected]> to transfer a file over HTTP.
@param fromAddress: The address of the connecting user: in this case,
Q2QAddress("divmod.com", "alice")
@param toAddress: The address of the user connected to: in this case,
Q2QAddress("notdivmod.com", "bob")
@param protocolName: The name of the protocol, by convention observing
similar names to http://www.iana.org/assignments/port-numbers when
appropriate. In this case, 'http'.
@param protocolFactory: An implementation of
L{twisted.internet.interfaces.IProtocolFactory}
@param usePrivateCertificate: Use a different private certificate for
initiating the 'secure' call. Mostly for testing different invalid
certificate attacks.
@param fakeFromDomain: This domain name will be used for an argument to
the 'connect' command, but NOT as an argument to the SECURE command.
This is to test a particular kind of invalid cert attack.
@param chooser: a function taking a list of connection-describing
objects and returning another list. Those items in the remaining list
will be attempted as connections and buildProtocol called on the client
factory. May return a Deferred.
@default chooser: C{lambda x: x and [x[0]]}
@return: |
def get(self):
'Retrieve the most recent value generated'
# If you attempt to use a generator comprehension below, it will
# consume the StopIteration exception and just return an empty tuple,
# instead of stopping iteration normally
return tuple([(x.name(), x.get()) for x in self._generators]) | Retrieve the most recent value generated |
def report(self, event, metadata=None, block=None):
"""
Reports an event to Alooma by formatting it properly and placing it in
the buffer to be sent by the Sender instance
:param event: A dict / string representing an event
:param metadata: (Optional) A dict with extra metadata to be attached to
the event
:param block: (Optional) If True, the function will block the thread
until the event buffer has space for the event.
If False, reported events are discarded if the queue is
full. Defaults to None, which uses the global `block`
parameter given in the `init`.
:return: True if the event was successfully enqueued, else False
"""
# Don't allow reporting if the underlying sender is terminated
if self._sender.is_terminated:
self._notify(logging.ERROR, consts.LOG_MSG_REPORT_AFTER_TERMINATION)
return False
# Send the event to the queue if it is a dict or a string.
if isinstance(event, (dict,) + py2to3.basestring):
formatted_event = self._format_event(event, metadata)
should_block = block if block is not None else self.is_blocking
return self._sender.enqueue_event(formatted_event, should_block)
else: # Event is not a dict nor a string. Deny it.
error_message = (consts.LOG_MSG_BAD_EVENT % (type(event), event))
self._notify(logging.ERROR, error_message)
return False | Reports an event to Alooma by formatting it properly and placing it in
the buffer to be sent by the Sender instance
:param event: A dict / string representing an event
:param metadata: (Optional) A dict with extra metadata to be attached to
the event
:param block: (Optional) If True, the function will block the thread
until the event buffer has space for the event.
If False, reported events are discarded if the queue is
full. Defaults to None, which uses the global `block`
parameter given in the `init`.
:return: True if the event was successfully enqueued, else False |
def network_security_group_delete(name, resource_group, **kwargs):
'''
.. versionadded:: 2019.2.0
Delete a network security group within a resource group.
:param name: The name of the network security group to delete.
:param resource_group: The resource group name assigned to the
network security group.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.network_security_group_delete testnsg testgroup
'''
result = False
netconn = __utils__['azurearm.get_client']('network', **kwargs)
try:
secgroup = netconn.network_security_groups.delete(
resource_group_name=resource_group,
network_security_group_name=name
)
secgroup.wait()
result = True
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
return result | .. versionadded:: 2019.2.0
Delete a network security group within a resource group.
:param name: The name of the network security group to delete.
:param resource_group: The resource group name assigned to the
network security group.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.network_security_group_delete testnsg testgroup |
def _delete(self, state=None):
"""Helper for :meth:`delete`
Adds a delete mutation (for the entire row) to the accumulated
mutations.
``state`` is unused by :class:`DirectRow` but is used by
subclasses.
:type state: bool
:param state: (Optional) The state that is passed along to
:meth:`_get_mutations`.
"""
mutation_val = data_v2_pb2.Mutation.DeleteFromRow()
mutation_pb = data_v2_pb2.Mutation(delete_from_row=mutation_val)
self._get_mutations(state).append(mutation_pb) | Helper for :meth:`delete`
Adds a delete mutation (for the entire row) to the accumulated
mutations.
``state`` is unused by :class:`DirectRow` but is used by
subclasses.
:type state: bool
:param state: (Optional) The state that is passed along to
:meth:`_get_mutations`. |
def p(self, value, event):
"""Return the conditional probability
P(X=value | parents=parent_values), where parent_values
are the values of parents in event. (event must assign each
parent a value.)
>>> bn = BayesNode('X', 'Burglary', {T: 0.2, F: 0.625})
>>> bn.p(False, {'Burglary': False, 'Earthquake': True})
0.375"""
assert isinstance(value, bool)
ptrue = self.cpt[event_values(event, self.parents)]
return if_(value, ptrue, 1 - ptrue) | Return the conditional probability
P(X=value | parents=parent_values), where parent_values
are the values of parents in event. (event must assign each
parent a value.)
>>> bn = BayesNode('X', 'Burglary', {T: 0.2, F: 0.625})
>>> bn.p(False, {'Burglary': False, 'Earthquake': True})
0.375 |
def configure(self, options, conf):
"""
Configure plugin.
"""
super(LeakDetectorPlugin, self).configure(options, conf)
if options.leak_detector_level:
self.reporting_level = int(options.leak_detector_level)
self.report_delta = options.leak_detector_report_delta
self.patch_mock = options.leak_detector_patch_mock
self.ignore_patterns = options.leak_detector_ignore_patterns
self.save_traceback = options.leak_detector_save_traceback
self.multiprocessing_enabled = bool(getattr(options, 'multiprocess_workers', False)) | Configure plugin. |
def lock(self): # type: () -> Installer
"""
Prepare the installer for locking only.
"""
self.update()
self.execute_operations(False)
self._lock = True
return self | Prepare the installer for locking only. |
def downgrade():
"""Downgrade database."""
# Remove 'created' and 'updated' columns
op.drop_column('oauthclient_remoteaccount', 'created')
op.drop_column('oauthclient_remoteaccount', 'updated')
op.drop_column('oauthclient_remotetoken', 'created')
op.drop_column('oauthclient_remotetoken', 'updated')
op.drop_column('oauthclient_useridentity', 'created')
op.drop_column('oauthclient_useridentity', 'updated') | Downgrade database. |
def solve_gamlasso(self, lam):
'''Solves the Graph-fused gamma lasso via POSE (Taddy, 2013)'''
weights = lam / (1 + self.gamma * np.abs(self.beta[self.trails[::2]] - self.beta[self.trails[1::2]]))
s = self.solve_gfl(u)
self.steps.append(s)
return self.beta | Solves the Graph-fused gamma lasso via POSE (Taddy, 2013) |
def compute_pixels(orb, sgeom, times, rpy=(0.0, 0.0, 0.0)):
"""Compute cartesian coordinates of the pixels in instrument scan."""
if isinstance(orb, (list, tuple)):
tle1, tle2 = orb
orb = Orbital("mysatellite", line1=tle1, line2=tle2)
# get position and velocity for each time of each pixel
pos, vel = orb.get_position(times, normalize=False)
# now, get the vectors pointing to each pixel
vectors = sgeom.vectors(pos, vel, *rpy)
# compute intersection of lines (directed by vectors and passing through
# (0, 0, 0)) and ellipsoid. Derived from:
# http://en.wikipedia.org/wiki/Line%E2%80%93sphere_intersection
# do the computation between line and ellipsoid (WGS 84)
# NB: AAPP uses GRS 80...
centre = -pos
a__ = 6378.137 # km
# b__ = 6356.75231414 # km, GRS80
b__ = 6356.752314245 # km, WGS84
radius = np.array([[1 / a__, 1 / a__, 1 / b__]]).T
shape = vectors.shape
xr_ = vectors.reshape([3, -1]) * radius
cr_ = centre.reshape([3, -1]) * radius
ldotc = np.einsum("ij,ij->j", xr_, cr_)
lsq = np.einsum("ij,ij->j", xr_, xr_)
csq = np.einsum("ij,ij->j", cr_, cr_)
d1_ = (ldotc - np.sqrt(ldotc ** 2 - csq * lsq + lsq)) / lsq
# return the actual pixel positions
return vectors * d1_.reshape(shape[1:]) - centre | Compute cartesian coordinates of the pixels in instrument scan. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.