code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def forward(self, # pylint: disable=arguments-differ
inputs: torch.Tensor,
word_inputs: torch.Tensor = None) -> Dict[str, Union[torch.Tensor, List[torch.Tensor]]]:
"""
Parameters
----------
inputs: ``torch.Tensor``, required.
Shape ``(batch_size, timesteps, 50)`` of character ids representing the current batch.
word_inputs : ``torch.Tensor``, required.
If you passed a cached vocab, you can in addition pass a tensor of shape ``(batch_size, timesteps)``,
which represent word ids which have been pre-cached.
Returns
-------
Dict with keys:
``'activations'``: ``List[torch.Tensor]``
A list of activations at each layer of the network, each of shape
``(batch_size, timesteps + 2, embedding_dim)``
``'mask'``: ``torch.Tensor``
Shape ``(batch_size, timesteps + 2)`` long tensor with sequence mask.
Note that the output tensors all include additional special begin and end of sequence
markers.
"""
if self._word_embedding is not None and word_inputs is not None:
try:
mask_without_bos_eos = (word_inputs > 0).long()
# The character cnn part is cached - just look it up.
embedded_inputs = self._word_embedding(word_inputs) # type: ignore
# shape (batch_size, timesteps + 2, embedding_dim)
type_representation, mask = add_sentence_boundary_token_ids(
embedded_inputs,
mask_without_bos_eos,
self._bos_embedding,
self._eos_embedding
)
except RuntimeError:
# Back off to running the character convolutions,
# as we might not have the words in the cache.
token_embedding = self._token_embedder(inputs)
mask = token_embedding['mask']
type_representation = token_embedding['token_embedding']
else:
token_embedding = self._token_embedder(inputs)
mask = token_embedding['mask']
type_representation = token_embedding['token_embedding']
lstm_outputs = self._elmo_lstm(type_representation, mask)
# Prepare the output. The first layer is duplicated.
# Because of minor differences in how masking is applied depending
# on whether the char cnn layers are cached, we'll be defensive and
# multiply by the mask here. It's not strictly necessary, as the
# mask passed on is correct, but the values in the padded areas
# of the char cnn representations can change.
output_tensors = [
torch.cat([type_representation, type_representation], dim=-1) * mask.float().unsqueeze(-1)
]
for layer_activations in torch.chunk(lstm_outputs, lstm_outputs.size(0), dim=0):
output_tensors.append(layer_activations.squeeze(0))
return {
'activations': output_tensors,
'mask': mask,
} | Parameters
----------
inputs: ``torch.Tensor``, required.
Shape ``(batch_size, timesteps, 50)`` of character ids representing the current batch.
word_inputs : ``torch.Tensor``, required.
If you passed a cached vocab, you can in addition pass a tensor of shape ``(batch_size, timesteps)``,
which represent word ids which have been pre-cached.
Returns
-------
Dict with keys:
``'activations'``: ``List[torch.Tensor]``
A list of activations at each layer of the network, each of shape
``(batch_size, timesteps + 2, embedding_dim)``
``'mask'``: ``torch.Tensor``
Shape ``(batch_size, timesteps + 2)`` long tensor with sequence mask.
Note that the output tensors all include additional special begin and end of sequence
markers. |
def create_role(name, policy_document=None, path=None, region=None, key=None,
keyid=None, profile=None):
'''
Create an instance role.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.create_role myrole
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if role_exists(name, region, key, keyid, profile):
return True
if not policy_document:
policy_document = None
try:
conn.create_role(name, assume_role_policy_document=policy_document,
path=path)
log.info('Created IAM role %s.', name)
return True
except boto.exception.BotoServerError as e:
log.error(e)
log.error('Failed to create IAM role %s.', name)
return False | Create an instance role.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.create_role myrole |
def on_builder_inited(app):
"""
Hooks into Sphinx's ``builder-inited`` event.
"""
app.cache_db_path = ":memory:"
if app.config["uqbar_book_use_cache"]:
logger.info(bold("[uqbar-book]"), nonl=True)
logger.info(" initializing cache db")
app.connection = uqbar.book.sphinx.create_cache_db(app.cache_db_path) | Hooks into Sphinx's ``builder-inited`` event. |
def from_urdf_file(cls, urdf_file, base_elements=None, last_link_vector=None, base_element_type="link", active_links_mask=None, name="chain"):
"""Creates a chain from an URDF file
Parameters
----------
urdf_file: str
The path of the URDF file
base_elements: list of strings
List of the links beginning the chain
last_link_vector: numpy.array
Optional : The translation vector of the tip.
name: str
The name of the Chain
base_element_type: str
active_links_mask: list[bool]
"""
if base_elements is None:
base_elements = ["base_link"]
links = URDF_utils.get_urdf_parameters(urdf_file, base_elements=base_elements, last_link_vector=last_link_vector, base_element_type=base_element_type)
# Add an origin link at the beginning
return cls([link_lib.OriginLink()] + links, active_links_mask=active_links_mask, name=name) | Creates a chain from an URDF file
Parameters
----------
urdf_file: str
The path of the URDF file
base_elements: list of strings
List of the links beginning the chain
last_link_vector: numpy.array
Optional : The translation vector of the tip.
name: str
The name of the Chain
base_element_type: str
active_links_mask: list[bool] |
def start_server(socket, projectname, xmlfilename: str) -> None:
"""Start the *HydPy* server using the given socket.
The folder with the given `projectname` must be available within the
current working directory. The XML configuration file must be placed
within the project folder unless `xmlfilename` is an absolute file path.
The XML configuration file must be valid concerning the schema file
`HydPyConfigMultipleRuns.xsd` (see method |ServerState.initialise|
for further information).
"""
state.initialise(projectname, xmlfilename)
server = http.server.HTTPServer(('', int(socket)), HydPyServer)
server.serve_forever() | Start the *HydPy* server using the given socket.
The folder with the given `projectname` must be available within the
current working directory. The XML configuration file must be placed
within the project folder unless `xmlfilename` is an absolute file path.
The XML configuration file must be valid concerning the schema file
`HydPyConfigMultipleRuns.xsd` (see method |ServerState.initialise|
for further information). |
def phistogram(view, a, bins=10, rng=None, normed=False):
"""Compute the histogram of a remote array a.
Parameters
----------
view
IPython DirectView instance
a : str
String name of the remote array
bins : int
Number of histogram bins
rng : (float, float)
Tuple of min, max of the range to histogram
normed : boolean
Should the histogram counts be normalized to 1
"""
nengines = len(view.targets)
# view.push(dict(bins=bins, rng=rng))
with view.sync_imports():
import numpy
rets = view.apply_sync(lambda a, b, rng: numpy.histogram(a,b,rng), Reference(a), bins, rng)
hists = [ r[0] for r in rets ]
lower_edges = [ r[1] for r in rets ]
# view.execute('hist, lower_edges = numpy.histogram(%s, bins, rng)' % a)
lower_edges = view.pull('lower_edges', targets=0)
hist_array = numpy.array(hists).reshape(nengines, -1)
# hist_array.shape = (nengines,-1)
total_hist = numpy.sum(hist_array, 0)
if normed:
total_hist = total_hist/numpy.sum(total_hist,dtype=float)
return total_hist, lower_edges | Compute the histogram of a remote array a.
Parameters
----------
view
IPython DirectView instance
a : str
String name of the remote array
bins : int
Number of histogram bins
rng : (float, float)
Tuple of min, max of the range to histogram
normed : boolean
Should the histogram counts be normalized to 1 |
def remove_unicode_dict(input_dict):
'''remove unicode keys and values from dict, encoding in utf8
'''
if isinstance(input_dict, collections.Mapping):
return dict(map(remove_unicode_dict, input_dict.iteritems()))
elif isinstance(input_dict, collections.Iterable):
return type(input_dict)(map(remove_unicode_dict, input_dict))
else:
return input_dict | remove unicode keys and values from dict, encoding in utf8 |
def parse_encoding(fp):
"""Deduce the encoding of a Python source file (binary mode) from magic
comment.
It does this in the same way as the `Python interpreter`__
.. __: http://docs.python.org/ref/encodings.html
The ``fp`` argument should be a seekable file object in binary mode.
"""
pos = fp.tell()
fp.seek(0)
try:
line1 = fp.readline()
has_bom = line1.startswith(codecs.BOM_UTF8)
if has_bom:
line1 = line1[len(codecs.BOM_UTF8):]
m = _PYTHON_MAGIC_COMMENT_re.match(line1.decode('ascii', 'ignore'))
if not m:
try:
import parser
parser.suite(line1.decode('ascii', 'ignore'))
except (ImportError, SyntaxError):
# Either it's a real syntax error, in which case the source
# is not valid python source, or line2 is a continuation of
# line1, in which case we don't want to scan line2 for a magic
# comment.
pass
else:
line2 = fp.readline()
m = _PYTHON_MAGIC_COMMENT_re.match(
line2.decode('ascii', 'ignore'))
if has_bom:
if m:
raise SyntaxError("python refuses to compile code with both a UTF8" \
" byte-order-mark and a magic encoding comment")
return 'utf_8'
elif m:
return m.group(1)
else:
return None
finally:
fp.seek(pos) | Deduce the encoding of a Python source file (binary mode) from magic
comment.
It does this in the same way as the `Python interpreter`__
.. __: http://docs.python.org/ref/encodings.html
The ``fp`` argument should be a seekable file object in binary mode. |
def max_entropy_distribution(node_indices, number_of_nodes):
"""Return the maximum entropy distribution over a set of nodes.
This is different from the network's uniform distribution because nodes
outside ``node_indices`` are fixed and treated as if they have only 1
state.
Args:
node_indices (tuple[int]): The set of node indices over which to take
the distribution.
number_of_nodes (int): The total number of nodes in the network.
Returns:
np.ndarray: The maximum entropy distribution over the set of nodes.
"""
distribution = np.ones(repertoire_shape(node_indices, number_of_nodes))
return distribution / distribution.size | Return the maximum entropy distribution over a set of nodes.
This is different from the network's uniform distribution because nodes
outside ``node_indices`` are fixed and treated as if they have only 1
state.
Args:
node_indices (tuple[int]): The set of node indices over which to take
the distribution.
number_of_nodes (int): The total number of nodes in the network.
Returns:
np.ndarray: The maximum entropy distribution over the set of nodes. |
def partial_dependence(self, term, X=None, width=None, quantiles=None,
meshgrid=False):
"""
Computes the term functions for the GAM
and possibly their confidence intervals.
if both width=None and quantiles=None,
then no confidence intervals are computed
Parameters
----------
term : int, optional
Term for which to compute the partial dependence functions.
X : array-like with input data, optional
if `meshgrid=False`, then `X` should be an array-like
of shape (n_samples, m_features).
if `meshgrid=True`, then `X` should be a tuple containing
an array for each feature in the term.
if None, an equally spaced grid of points is generated.
width : float on (0, 1), optional
Width of the confidence interval.
quantiles : array-like of floats on (0, 1), optional
instead of specifying the prediciton width, one can specify the
quantiles. so width=.95 is equivalent to quantiles=[.025, .975].
if None, defaults to width.
meshgrid : bool, whether to return and accept meshgrids.
Useful for creating outputs that are suitable for
3D plotting.
Note, for simple terms with no interactions, the output
of this function will be the same for ``meshgrid=True`` and
``meshgrid=False``, but the inputs will need to be different.
Returns
-------
pdeps : np.array of shape (n_samples,)
conf_intervals : list of length len(term)
containing np.arrays of shape (n_samples, 2 or len(quantiles))
Raises
------
ValueError :
If the term requested is an intercept
since it does not make sense to process the intercept term.
See Also
--------
generate_X_grid : for help creating meshgrids.
"""
if not self._is_fitted:
raise AttributeError('GAM has not been fitted. Call fit first.')
if not isinstance(term, int):
raise ValueError('term must be an integer, but found term: {}'.format(term))
# ensure term exists
if (term >= len(self.terms)) or (term < -1):
raise ValueError('Term {} out of range for model with {} terms'\
.format(term, len(self.terms)))
# cant do Intercept
if self.terms[term].isintercept:
raise ValueError('cannot create grid for intercept term')
if X is None:
X = self.generate_X_grid(term=term, meshgrid=meshgrid)
if meshgrid:
if not isinstance(X, tuple):
raise ValueError('X must be a tuple of grids if `meshgrid=True`, '\
'but found X: {}'.format(X))
shape = X[0].shape
X = self._flatten_mesh(X, term=term)
X = check_X(X, n_feats=self.statistics_['m_features'],
edge_knots=self.edge_knots_, dtypes=self.dtype,
features=self.feature, verbose=self.verbose)
modelmat = self._modelmat(X, term=term)
pdep = self._linear_predictor(modelmat=modelmat, term=term)
out = [pdep]
compute_quantiles = (width is not None) or (quantiles is not None)
if compute_quantiles:
conf_intervals = self._get_quantiles(X, width=width,
quantiles=quantiles,
modelmat=modelmat,
lp=pdep,
term=term,
xform=False)
out += [conf_intervals]
if meshgrid:
for i, array in enumerate(out):
# add extra dimensions arising from multiple confidence intervals
if array.ndim > 1:
depth = array.shape[-1]
shape += (depth,)
out[i] = np.reshape(array, shape)
if compute_quantiles:
return out
return out[0] | Computes the term functions for the GAM
and possibly their confidence intervals.
if both width=None and quantiles=None,
then no confidence intervals are computed
Parameters
----------
term : int, optional
Term for which to compute the partial dependence functions.
X : array-like with input data, optional
if `meshgrid=False`, then `X` should be an array-like
of shape (n_samples, m_features).
if `meshgrid=True`, then `X` should be a tuple containing
an array for each feature in the term.
if None, an equally spaced grid of points is generated.
width : float on (0, 1), optional
Width of the confidence interval.
quantiles : array-like of floats on (0, 1), optional
instead of specifying the prediciton width, one can specify the
quantiles. so width=.95 is equivalent to quantiles=[.025, .975].
if None, defaults to width.
meshgrid : bool, whether to return and accept meshgrids.
Useful for creating outputs that are suitable for
3D plotting.
Note, for simple terms with no interactions, the output
of this function will be the same for ``meshgrid=True`` and
``meshgrid=False``, but the inputs will need to be different.
Returns
-------
pdeps : np.array of shape (n_samples,)
conf_intervals : list of length len(term)
containing np.arrays of shape (n_samples, 2 or len(quantiles))
Raises
------
ValueError :
If the term requested is an intercept
since it does not make sense to process the intercept term.
See Also
--------
generate_X_grid : for help creating meshgrids. |
def match_comment(self):
"""matches the multiline version of a comment"""
match = self.match(r"<%doc>(.*?)</%doc>", re.S)
if match:
self.append_node(parsetree.Comment, match.group(1))
return True
else:
return False | matches the multiline version of a comment |
def _find_symbol(self, module, name, fallback=None):
"""
Find the symbol of the specified name inside the module or raise an
exception.
"""
if not hasattr(module, name) and fallback:
return self._find_symbol(module, fallback, None)
return getattr(module, name) | Find the symbol of the specified name inside the module or raise an
exception. |
def __parse_dois(self, x):
"""
Parse the Dataset_DOI field. Could be one DOI string, or a list of DOIs
:param any x: Str or List of DOI ids
:return none: list is set to self
"""
# datasetDOI is a string. parse, validate and return a list of DOIs
if isinstance(x, str):
# regex cleans string, and returns a list with 1 entry for each regex doi match
m = clean_doi(x)
# make sure m is not an empty list
if m:
# set list directly into self
self.doi = m
# datasetDOI is a list. use regex to validate each doi entry.
elif isinstance(x, list):
for entry in x:
# regex cleans string, and returns a list with 1 entry for each regex doi match
m = clean_doi(entry)
# make sure m is not an empty list
if m:
# combine lists with existing self list
self.doi += m
return | Parse the Dataset_DOI field. Could be one DOI string, or a list of DOIs
:param any x: Str or List of DOI ids
:return none: list is set to self |
def create_request(query):
"""
Creates a GET request to Yarr! server
:param query: Free-text search query
:returns: Requests object
"""
yarr_url = app.config.get('YARR_URL', False)
if not yarr_url:
raise('No URL to Yarr! server specified in config.')
api_token = app.config.get('YARR_API_TOKEN', False)
headers = {'X-API-KEY': api_token} if api_token else {}
payload = {'q': query}
url = '%s/search' % yarr_url
return requests.get(url, params=payload, headers=headers) | Creates a GET request to Yarr! server
:param query: Free-text search query
:returns: Requests object |
def insert_lemmatisation_data(germanet_db):
'''
Creates the lemmatiser collection in the given MongoDB instance
using the data derived from the Projekt deutscher Wortschatz.
Arguments:
- `germanet_db`: a pymongo.database.Database object
'''
# drop the database collection if it already exists
germanet_db.lemmatiser.drop()
num_lemmas = 0
input_file = gzip.open(os.path.join(os.path.dirname(__file__),
LEMMATISATION_FILE))
for line in input_file:
line = line.decode('iso-8859-1').strip().split('\t')
assert len(line) == 2
germanet_db.lemmatiser.insert(dict(list(zip(('word', 'lemma'), line))))
num_lemmas += 1
input_file.close()
# index the collection on 'word'
germanet_db.lemmatiser.create_index('word')
print('Inserted {0} lemmatiser entries.'.format(num_lemmas)) | Creates the lemmatiser collection in the given MongoDB instance
using the data derived from the Projekt deutscher Wortschatz.
Arguments:
- `germanet_db`: a pymongo.database.Database object |
def vtas2cas(tas, h):
""" tas2cas conversion both m/s """
p, rho, T = vatmos(h)
qdyn = p*((1.+rho*tas*tas/(7.*p))**3.5-1.)
cas = np.sqrt(7.*p0/rho0*((qdyn/p0+1.)**(2./7.)-1.))
# cope with negative speed
cas = np.where(tas<0, -1*cas, cas)
return cas | tas2cas conversion both m/s |
def paginate(self, url, key, params=None):
"""
Fetch a sequence of paginated resources from the API endpoint. The
initial request to ``url`` and all subsequent requests must respond
with a JSON object; the field specified by ``key`` must be a list,
whose elements will be yielded, and the next request will be made to
the URL in the ``.links.pages.next`` field until the responses no
longer contain that field.
:param str url: the URL to make the initial request of. If ``url``
begins with a forward slash, :attr:`endpoint` is prepended to it;
otherwise, ``url`` is treated as an absolute URL.
:param str key: the field on each page containing a list of values to
yield
:param dict params: parameters to add to the initial URL's query
string. A ``"per_page"`` parameter may be included to override
the default :attr:`per_page` setting.
:rtype: generator of decoded JSON values
:raises ValueError: if a response body is not an object or ``key`` is
not one of its keys
:raises DOAPIError: if the API endpoint replies with an error
"""
if params is None:
params = {}
if self.per_page is not None and "per_page" not in params:
params = dict(params, per_page=self.per_page)
page = self.request(url, params=params)
while True:
try:
objects = page[key]
except (KeyError, TypeError):
raise ValueError('{0!r}: not a key of the response body'\
.format(key))
for obj in objects:
yield obj
try:
url = page["links"]["pages"]["next"]
except KeyError:
break
page = self.request(url) | Fetch a sequence of paginated resources from the API endpoint. The
initial request to ``url`` and all subsequent requests must respond
with a JSON object; the field specified by ``key`` must be a list,
whose elements will be yielded, and the next request will be made to
the URL in the ``.links.pages.next`` field until the responses no
longer contain that field.
:param str url: the URL to make the initial request of. If ``url``
begins with a forward slash, :attr:`endpoint` is prepended to it;
otherwise, ``url`` is treated as an absolute URL.
:param str key: the field on each page containing a list of values to
yield
:param dict params: parameters to add to the initial URL's query
string. A ``"per_page"`` parameter may be included to override
the default :attr:`per_page` setting.
:rtype: generator of decoded JSON values
:raises ValueError: if a response body is not an object or ``key`` is
not one of its keys
:raises DOAPIError: if the API endpoint replies with an error |
def get_ip(data):
'''
Return the IP address of the VM
If the VM has public IP as defined by libcloud module then use it
Otherwise try to extract the private IP and use that one.
'''
try:
ip = data.public_ips[0]
except Exception:
ip = data.private_ips[0]
return ip | Return the IP address of the VM
If the VM has public IP as defined by libcloud module then use it
Otherwise try to extract the private IP and use that one. |
def rescan_file(self, resource, date='', period='', repeat='', notify_url='', notify_changes_only='', timeout=None):
""" Rescan a previously submitted filed or schedule an scan to be performed in the future.
This API allows you to rescan files present in VirusTotal's file store without having to
resubmit them, thus saving bandwidth. You only need to know one of the hashes of the file
to rescan.
:param resource: An md5/sha1/sha256 hash. You can also specify a CSV list made up of a
combination of any of the three allowed hashes (up to 25 items), this allows you to perform
a batch request with just one single call. Note that the file must already be present in our
file store.
:param date: (optional) Date in %Y%m%d%H%M%S format (example: 20120725170000) in which the rescan should
be performed. If not specified the rescan will be performed immediately.
:param period: (optional) Periodicity (in days) with which the file should be rescanned. If this argument
is provided the file will be rescanned periodically every period days, if not, the rescan is
performed once and not repated again.
:param repeat: (optional) Used in conjunction with period to specify the number of times the file should be
rescanned. If this argument is provided the file will be rescanned the given amount of times in coherence
with the chosen periodicity, if not, the file will be rescanned indefinitely.
:param notify_url: (optional) A URL to which a POST notification should be sent when the rescan finishes.
:param notify_changes_only: (optional) Used in conjunction with notify_url. Indicates if POST notifications
should only be sent if the scan results differ from the previous one.
:param timeout: The amount of time in seconds the request should wait before timing out.
:return: JSON response that contains scan_id and permalink.
"""
params = {'apikey': self.api_key, 'resource': resource}
try:
response = requests.post(self.base + 'file/rescan', params=params, proxies=self.proxies, timeout=timeout)
except requests.RequestException as e:
return dict(error=str(e))
return _return_response_and_status_code(response) | Rescan a previously submitted filed or schedule an scan to be performed in the future.
This API allows you to rescan files present in VirusTotal's file store without having to
resubmit them, thus saving bandwidth. You only need to know one of the hashes of the file
to rescan.
:param resource: An md5/sha1/sha256 hash. You can also specify a CSV list made up of a
combination of any of the three allowed hashes (up to 25 items), this allows you to perform
a batch request with just one single call. Note that the file must already be present in our
file store.
:param date: (optional) Date in %Y%m%d%H%M%S format (example: 20120725170000) in which the rescan should
be performed. If not specified the rescan will be performed immediately.
:param period: (optional) Periodicity (in days) with which the file should be rescanned. If this argument
is provided the file will be rescanned periodically every period days, if not, the rescan is
performed once and not repated again.
:param repeat: (optional) Used in conjunction with period to specify the number of times the file should be
rescanned. If this argument is provided the file will be rescanned the given amount of times in coherence
with the chosen periodicity, if not, the file will be rescanned indefinitely.
:param notify_url: (optional) A URL to which a POST notification should be sent when the rescan finishes.
:param notify_changes_only: (optional) Used in conjunction with notify_url. Indicates if POST notifications
should only be sent if the scan results differ from the previous one.
:param timeout: The amount of time in seconds the request should wait before timing out.
:return: JSON response that contains scan_id and permalink. |
def _mouseMoveDrag(moveOrDrag, x, y, xOffset, yOffset, duration, tween=linear, button=None):
"""Handles the actual move or drag event, since different platforms
implement them differently.
On Windows & Linux, a drag is a normal mouse move while a mouse button is
held down. On OS X, a distinct "drag" event must be used instead.
The code for moving and dragging the mouse is similar, so this function
handles both. Users should call the moveTo() or dragTo() functions instead
of calling _mouseMoveDrag().
Args:
moveOrDrag (str): Either 'move' or 'drag', for the type of action this is.
x (int, float, None, optional): How far left (for negative values) or
right (for positive values) to move the cursor. 0 by default.
y (int, float, None, optional): How far up (for negative values) or
down (for positive values) to move the cursor. 0 by default.
xOffset (int, float, None, optional): How far left (for negative values) or
right (for positive values) to move the cursor. 0 by default.
yOffset (int, float, None, optional): How far up (for negative values) or
down (for positive values) to move the cursor. 0 by default.
duration (float, optional): The amount of time it takes to move the mouse
cursor to the new xy coordinates. If 0, then the mouse cursor is moved
instantaneously. 0.0 by default.
tween (func, optional): The tweening function used if the duration is not
0. A linear tween is used by default. See the tweens.py file for
details.
button (str, int, optional): The mouse button clicked. Must be one of
'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by
default.
Returns:
None
"""
# The move and drag code is similar, but OS X requires a special drag event instead of just a move event when dragging.
# See https://stackoverflow.com/a/2696107/1893164
assert moveOrDrag in ('move', 'drag'), "moveOrDrag must be in ('move', 'drag'), not %s" % (moveOrDrag)
if sys.platform != 'darwin':
moveOrDrag = 'move' # Only OS X needs the drag event specifically.
xOffset = int(xOffset) if xOffset is not None else 0
yOffset = int(yOffset) if yOffset is not None else 0
if x is None and y is None and xOffset == 0 and yOffset == 0:
return # Special case for no mouse movement at all.
startx, starty = position()
x = int(x) if x is not None else startx
y = int(y) if y is not None else starty
# x, y, xOffset, yOffset are now int.
x += xOffset
y += yOffset
width, height = size()
# Make sure x and y are within the screen bounds.
x = max(0, min(x, width - 1))
y = max(0, min(y, height - 1))
# If the duration is small enough, just move the cursor there instantly.
steps = [(x, y)]
if duration > MINIMUM_DURATION:
# Non-instant moving/dragging involves tweening:
num_steps = max(width, height)
sleep_amount = duration / num_steps
if sleep_amount < MINIMUM_SLEEP:
num_steps = int(duration / MINIMUM_SLEEP)
sleep_amount = duration / num_steps
steps = [
getPointOnLine(startx, starty, x, y, tween(n / num_steps))
for n in range(num_steps)
]
# Making sure the last position is the actual destination.
steps.append((x, y))
for tweenX, tweenY in steps:
if len(steps) > 1:
# A single step does not require tweening.
time.sleep(sleep_amount)
_failSafeCheck()
tweenX = int(round(tweenX))
tweenY = int(round(tweenY))
if moveOrDrag == 'move':
platformModule._moveTo(tweenX, tweenY)
elif moveOrDrag == 'drag':
platformModule._dragTo(tweenX, tweenY, button)
else:
raise NotImplementedError('Unknown value of moveOrDrag: {0}'.format(moveOrDrag))
_failSafeCheck() | Handles the actual move or drag event, since different platforms
implement them differently.
On Windows & Linux, a drag is a normal mouse move while a mouse button is
held down. On OS X, a distinct "drag" event must be used instead.
The code for moving and dragging the mouse is similar, so this function
handles both. Users should call the moveTo() or dragTo() functions instead
of calling _mouseMoveDrag().
Args:
moveOrDrag (str): Either 'move' or 'drag', for the type of action this is.
x (int, float, None, optional): How far left (for negative values) or
right (for positive values) to move the cursor. 0 by default.
y (int, float, None, optional): How far up (for negative values) or
down (for positive values) to move the cursor. 0 by default.
xOffset (int, float, None, optional): How far left (for negative values) or
right (for positive values) to move the cursor. 0 by default.
yOffset (int, float, None, optional): How far up (for negative values) or
down (for positive values) to move the cursor. 0 by default.
duration (float, optional): The amount of time it takes to move the mouse
cursor to the new xy coordinates. If 0, then the mouse cursor is moved
instantaneously. 0.0 by default.
tween (func, optional): The tweening function used if the duration is not
0. A linear tween is used by default. See the tweens.py file for
details.
button (str, int, optional): The mouse button clicked. Must be one of
'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by
default.
Returns:
None |
def configfile_from_path(path, strict=True):
"""Get a ConfigFile object based on a file path.
This method will inspect the file extension and return the appropriate
ConfigFile subclass initialized with the given path.
Args:
path (str): The file path which represents the configuration file.
strict (bool): Whether or not to parse the file in strict mode.
Returns:
confpy.loaders.base.ConfigurationFile: The subclass which is
specialized for the given file path.
Raises:
UnrecognizedFileExtension: If there is no loader for the path.
"""
extension = path.split('.')[-1]
conf_type = FILE_TYPES.get(extension)
if not conf_type:
raise exc.UnrecognizedFileExtension(
"Cannot parse file of type {0}. Choices are {1}.".format(
extension,
FILE_TYPES.keys(),
)
)
return conf_type(path=path, strict=strict) | Get a ConfigFile object based on a file path.
This method will inspect the file extension and return the appropriate
ConfigFile subclass initialized with the given path.
Args:
path (str): The file path which represents the configuration file.
strict (bool): Whether or not to parse the file in strict mode.
Returns:
confpy.loaders.base.ConfigurationFile: The subclass which is
specialized for the given file path.
Raises:
UnrecognizedFileExtension: If there is no loader for the path. |
def set_option(self, option, value):
"""
Set a plugin option in configuration file.
Note: Use sig_option_changed to call it from widgets of the
same or another plugin.
"""
CONF.set(self.CONF_SECTION, str(option), value) | Set a plugin option in configuration file.
Note: Use sig_option_changed to call it from widgets of the
same or another plugin. |
def setColumnMapper(self, columnName, callable):
"""
Sets the mapper for the given column name to the callable. The inputed
callable should accept a single argument for a record from the tree and
return the text that should be displayed in the column.
:param columnName | <str>
callable | <function> || <method> || <lambda>
"""
columnName = nativestring(columnName)
if ( callable is None and columnName in self._columnMappers ):
self._columnMappers.pop(columnName)
return
self._columnMappers[nativestring(columnName)] = callable | Sets the mapper for the given column name to the callable. The inputed
callable should accept a single argument for a record from the tree and
return the text that should be displayed in the column.
:param columnName | <str>
callable | <function> || <method> || <lambda> |
async def play_tone(self, pin, tone_command, frequency, duration):
"""
This method will call the Tone library for the selected pin.
It requires FirmataPlus to be loaded onto the arduino
If the tone command is set to TONE_TONE, then the specified
tone will be played.
Else, if the tone command is TONE_NO_TONE, then any currently
playing tone will be disabled.
:param pin: Pin number
:param tone_command: Either TONE_TONE, or TONE_NO_TONE
:param frequency: Frequency of tone
:param duration: Duration of tone in milliseconds
:returns: No return value
"""
# convert the integer values to bytes
if tone_command == Constants.TONE_TONE:
# duration is specified
if duration:
data = [tone_command, pin, frequency & 0x7f, (frequency >> 7) & 0x7f,
duration & 0x7f, (duration >> 7) & 0x7f]
else:
data = [tone_command, pin,
frequency & 0x7f, (frequency >> 7) & 0x7f, 0, 0]
# turn off tone
else:
data = [tone_command, pin]
await self._send_sysex(PrivateConstants.TONE_DATA, data) | This method will call the Tone library for the selected pin.
It requires FirmataPlus to be loaded onto the arduino
If the tone command is set to TONE_TONE, then the specified
tone will be played.
Else, if the tone command is TONE_NO_TONE, then any currently
playing tone will be disabled.
:param pin: Pin number
:param tone_command: Either TONE_TONE, or TONE_NO_TONE
:param frequency: Frequency of tone
:param duration: Duration of tone in milliseconds
:returns: No return value |
def starts(self, layer):
"""Retrieve start positions of elements if given layer."""
starts = []
for data in self[layer]:
starts.append(data[START])
return starts | Retrieve start positions of elements if given layer. |
def crypto_core_ed25519_is_valid_point(p):
"""
Check if ``p`` represents a point on the edwards25519 curve, in canonical
form, on the main subgroup, and that the point doesn't have a small order.
:param p: a :py:data:`.crypto_core_ed25519_BYTES` long bytes sequence
representing a point on the edwards25519 curve
:type p: bytes
:return: point validity
:rtype: bool
"""
ensure(isinstance(p, bytes) and len(p) == crypto_core_ed25519_BYTES,
'Point must be a crypto_core_ed25519_BYTES long bytes sequence',
raising=exc.TypeError)
rc = lib.crypto_core_ed25519_is_valid_point(p)
return rc == 1 | Check if ``p`` represents a point on the edwards25519 curve, in canonical
form, on the main subgroup, and that the point doesn't have a small order.
:param p: a :py:data:`.crypto_core_ed25519_BYTES` long bytes sequence
representing a point on the edwards25519 curve
:type p: bytes
:return: point validity
:rtype: bool |
def githubtunnel(user1, server1, user2, server2, port, verbose, stanford=False):
"""
Opens a nested tunnel, first to *user1*@*server1*, then to *user2*@*server2*, for accessing on *port*.
If *verbose* is true, prints various ssh commands.
If *stanford* is true, shifts ports up by 1.
Attempts to get *user1*, *user2* from environment variable ``USER_NAME`` if called from the command line.
"""
if stanford:
port_shift = 1
else:
port_shift = 0
# command1 = 'ssh -nNf -L {}:quickpicmac3.slac.stanford.edu:22 {}@{}'.format(port, user, server)
command1 = 'ssh -nNf -L {}:{}:22 {}@{}'.format(port-1-port_shift, server2, user1, server1)
command2 = 'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -nNf -L {}:cardinal.stanford.edu:22 -p {} {}@localhost'.format(port-port_shift, port-port_shift-1, user2)
command3 = 'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -nNf -L {}:github.com:22 -p {} {}@localhost'.format(port, port-1, user2)
if verbose:
print(command1)
if stanford:
print(command2)
print(command3)
try:
call(shlex.split(command1))
if stanford:
call(shlex.split(command2))
call(shlex.split(command3))
except:
print('Failure!')
pass | Opens a nested tunnel, first to *user1*@*server1*, then to *user2*@*server2*, for accessing on *port*.
If *verbose* is true, prints various ssh commands.
If *stanford* is true, shifts ports up by 1.
Attempts to get *user1*, *user2* from environment variable ``USER_NAME`` if called from the command line. |
def perimeter(self):
'''
Sum of the length of all sides, float.
'''
return sum([a.distance(b) for a, b in self.pairs()]) | Sum of the length of all sides, float. |
def transfer(self, name, local, remote, **kwargs):
"""
Transfers the file with the given name from the local to the remote
storage backend.
:param name: The name of the file to transfer
:param local: The local storage backend instance
:param remote: The remote storage backend instance
:returns: `True` when the transfer succeeded, `False` if not. Retries
the task when returning `False`
:rtype: bool
"""
try:
remote.save(name, local.open(name))
return True
except Exception as e:
logger.error("Unable to save '%s' to remote storage. "
"About to retry." % name)
logger.exception(e)
return False | Transfers the file with the given name from the local to the remote
storage backend.
:param name: The name of the file to transfer
:param local: The local storage backend instance
:param remote: The remote storage backend instance
:returns: `True` when the transfer succeeded, `False` if not. Retries
the task when returning `False`
:rtype: bool |
def resize_max(img, max_side):
"""
Resize the image to threshold the maximum dimension within max_side
:param img:
:param max_side: Length of the maximum height or width
:return:
"""
h, w = img.shape[:2]
if h > w:
nh = max_side
nw = w * (nh / h)
else:
nw = max_side
nh = h * (nw / w)
return cv.resize(img, (nw, nh)) | Resize the image to threshold the maximum dimension within max_side
:param img:
:param max_side: Length of the maximum height or width
:return: |
def filter(self, s, method='chebyshev', order=30):
r"""Filter signals (analysis or synthesis).
A signal is defined as a rank-3 tensor of shape ``(N_NODES, N_SIGNALS,
N_FEATURES)``, where ``N_NODES`` is the number of nodes in the graph,
``N_SIGNALS`` is the number of independent signals, and ``N_FEATURES``
is the number of features which compose a graph signal, or the
dimensionality of a graph signal. For example if you filter a signal
with a filter bank of 8 filters, you're extracting 8 features and
decomposing your signal into 8 parts. That is called analysis. Your are
thus transforming your signal tensor from ``(G.N, 1, 1)`` to ``(G.N, 1,
8)``. Now you may want to combine back the features to form an unique
signal. For this you apply again 8 filters, one filter per feature, and
sum the result up. As such you're transforming your ``(G.N, 1, 8)``
tensor signal back to ``(G.N, 1, 1)``. That is known as synthesis. More
generally, you may want to map a set of features to another, though
that is not implemented yet.
The method computes the transform coefficients of a signal :math:`s`,
where the atoms of the transform dictionary are generalized
translations of each graph spectral filter to each vertex on the graph:
.. math:: c = D^* s,
where the columns of :math:`D` are :math:`g_{i,m} = T_i g_m` and
:math:`T_i` is a generalized translation operator applied to each
filter :math:`\hat{g}_m(\cdot)`. Each column of :math:`c` is the
response of the signal to one filter.
In other words, this function is applying the analysis operator
:math:`D^*`, respectively the synthesis operator :math:`D`, associated
with the frame defined by the filter bank to the signals.
Parameters
----------
s : array_like
Graph signals, a tensor of shape ``(N_NODES, N_SIGNALS,
N_FEATURES)``, where ``N_NODES`` is the number of nodes in the
graph, ``N_SIGNALS`` the number of independent signals you want to
filter, and ``N_FEATURES`` is either 1 (analysis) or the number of
filters in the filter bank (synthesis).
method : {'exact', 'chebyshev'}
Whether to use the exact method (via the graph Fourier transform)
or the Chebyshev polynomial approximation. A Lanczos
approximation is coming.
order : int
Degree of the Chebyshev polynomials.
Returns
-------
s : ndarray
Graph signals, a tensor of shape ``(N_NODES, N_SIGNALS,
N_FEATURES)``, where ``N_NODES`` and ``N_SIGNALS`` are the number
of nodes and signals of the signal tensor that pas passed in, and
``N_FEATURES`` is either 1 (synthesis) or the number of filters in
the filter bank (analysis).
References
----------
See :cite:`hammond2011wavelets` for details on filtering graph signals.
Examples
--------
Create a bunch of smooth signals by low-pass filtering white noise:
>>> import matplotlib.pyplot as plt
>>> G = graphs.Ring(N=60)
>>> G.estimate_lmax()
>>> s = np.random.RandomState(42).uniform(size=(G.N, 10))
>>> taus = [1, 10, 100]
>>> s = filters.Heat(G, taus).filter(s)
>>> s.shape
(60, 10, 3)
Plot the 3 smoothed versions of the 10th signal:
>>> fig, ax = plt.subplots()
>>> G.set_coordinates('line1D') # To visualize multiple signals in 1D.
>>> _ = G.plot(s[:, 9, :], ax=ax)
>>> legend = [r'$\tau={}$'.format(t) for t in taus]
>>> ax.legend(legend) # doctest: +ELLIPSIS
<matplotlib.legend.Legend object at ...>
Low-pass filter a delta to create a localized smooth signal:
>>> G = graphs.Sensor(30, seed=42)
>>> G.compute_fourier_basis() # Reproducible computation of lmax.
>>> s1 = np.zeros(G.N)
>>> s1[13] = 1
>>> s1 = filters.Heat(G, 3).filter(s1)
>>> s1.shape
(30,)
Filter and reconstruct our signal:
>>> g = filters.MexicanHat(G, Nf=4)
>>> s2 = g.analyze(s1)
>>> s2.shape
(30, 4)
>>> s2 = g.synthesize(s2)
>>> s2.shape
(30,)
Look how well we were able to reconstruct:
>>> fig, axes = plt.subplots(1, 2)
>>> _ = G.plot(s1, ax=axes[0])
>>> _ = G.plot(s2, ax=axes[1])
>>> print('{:.5f}'.format(np.linalg.norm(s1 - s2)))
0.26808
Perfect reconstruction with Itersine, a tight frame:
>>> g = filters.Itersine(G)
>>> s2 = g.analyze(s1, method='exact')
>>> s2 = g.synthesize(s2, method='exact')
>>> np.linalg.norm(s1 - s2) < 1e-10
True
"""
s = self.G._check_signal(s)
# TODO: not in self.Nin (Nf = Nin x Nout).
if s.ndim == 1 or s.shape[-1] not in [1, self.Nf]:
if s.ndim == 3:
raise ValueError('Third dimension (#features) should be '
'either 1 or the number of filters Nf = {}, '
'got {}.'.format(self.Nf, s.shape))
s = np.expand_dims(s, -1)
n_features_in = s.shape[-1]
if s.ndim < 3:
s = np.expand_dims(s, 1)
n_signals = s.shape[1]
if s.ndim > 3:
raise ValueError('At most 3 dimensions: '
'#nodes x #signals x #features.')
assert s.ndim == 3
# TODO: generalize to 2D (m --> n) filter banks.
# Only 1 --> Nf (analysis) and Nf --> 1 (synthesis) for now.
n_features_out = self.Nf if n_features_in == 1 else 1
if method == 'exact':
# TODO: will be handled by g.adjoint().
axis = 1 if n_features_in == 1 else 2
f = self.evaluate(self.G.e)
f = np.expand_dims(f.T, axis)
assert f.shape == (self.G.N, n_features_in, n_features_out)
s = self.G.gft(s)
s = np.matmul(s, f)
s = self.G.igft(s)
elif method == 'chebyshev':
# TODO: update Chebyshev implementation (after 2D filter banks).
c = approximations.compute_cheby_coeff(self, m=order)
if n_features_in == 1: # Analysis.
s = s.squeeze(axis=2)
s = approximations.cheby_op(self.G, c, s)
s = s.reshape((self.G.N, n_features_out, n_signals), order='F')
s = s.swapaxes(1, 2)
elif n_features_in == self.Nf: # Synthesis.
s = s.swapaxes(1, 2)
s_in = s.reshape(
(self.G.N * n_features_in, n_signals), order='F')
s = np.zeros((self.G.N, n_signals))
tmpN = np.arange(self.G.N, dtype=int)
for i in range(n_features_in):
s += approximations.cheby_op(self.G,
c[i],
s_in[i * self.G.N + tmpN])
s = np.expand_dims(s, 2)
else:
raise ValueError('Unknown method {}.'.format(method))
# Return a 1D signal if e.g. a 1D signal was filtered by one filter.
return s.squeeze() | r"""Filter signals (analysis or synthesis).
A signal is defined as a rank-3 tensor of shape ``(N_NODES, N_SIGNALS,
N_FEATURES)``, where ``N_NODES`` is the number of nodes in the graph,
``N_SIGNALS`` is the number of independent signals, and ``N_FEATURES``
is the number of features which compose a graph signal, or the
dimensionality of a graph signal. For example if you filter a signal
with a filter bank of 8 filters, you're extracting 8 features and
decomposing your signal into 8 parts. That is called analysis. Your are
thus transforming your signal tensor from ``(G.N, 1, 1)`` to ``(G.N, 1,
8)``. Now you may want to combine back the features to form an unique
signal. For this you apply again 8 filters, one filter per feature, and
sum the result up. As such you're transforming your ``(G.N, 1, 8)``
tensor signal back to ``(G.N, 1, 1)``. That is known as synthesis. More
generally, you may want to map a set of features to another, though
that is not implemented yet.
The method computes the transform coefficients of a signal :math:`s`,
where the atoms of the transform dictionary are generalized
translations of each graph spectral filter to each vertex on the graph:
.. math:: c = D^* s,
where the columns of :math:`D` are :math:`g_{i,m} = T_i g_m` and
:math:`T_i` is a generalized translation operator applied to each
filter :math:`\hat{g}_m(\cdot)`. Each column of :math:`c` is the
response of the signal to one filter.
In other words, this function is applying the analysis operator
:math:`D^*`, respectively the synthesis operator :math:`D`, associated
with the frame defined by the filter bank to the signals.
Parameters
----------
s : array_like
Graph signals, a tensor of shape ``(N_NODES, N_SIGNALS,
N_FEATURES)``, where ``N_NODES`` is the number of nodes in the
graph, ``N_SIGNALS`` the number of independent signals you want to
filter, and ``N_FEATURES`` is either 1 (analysis) or the number of
filters in the filter bank (synthesis).
method : {'exact', 'chebyshev'}
Whether to use the exact method (via the graph Fourier transform)
or the Chebyshev polynomial approximation. A Lanczos
approximation is coming.
order : int
Degree of the Chebyshev polynomials.
Returns
-------
s : ndarray
Graph signals, a tensor of shape ``(N_NODES, N_SIGNALS,
N_FEATURES)``, where ``N_NODES`` and ``N_SIGNALS`` are the number
of nodes and signals of the signal tensor that pas passed in, and
``N_FEATURES`` is either 1 (synthesis) or the number of filters in
the filter bank (analysis).
References
----------
See :cite:`hammond2011wavelets` for details on filtering graph signals.
Examples
--------
Create a bunch of smooth signals by low-pass filtering white noise:
>>> import matplotlib.pyplot as plt
>>> G = graphs.Ring(N=60)
>>> G.estimate_lmax()
>>> s = np.random.RandomState(42).uniform(size=(G.N, 10))
>>> taus = [1, 10, 100]
>>> s = filters.Heat(G, taus).filter(s)
>>> s.shape
(60, 10, 3)
Plot the 3 smoothed versions of the 10th signal:
>>> fig, ax = plt.subplots()
>>> G.set_coordinates('line1D') # To visualize multiple signals in 1D.
>>> _ = G.plot(s[:, 9, :], ax=ax)
>>> legend = [r'$\tau={}$'.format(t) for t in taus]
>>> ax.legend(legend) # doctest: +ELLIPSIS
<matplotlib.legend.Legend object at ...>
Low-pass filter a delta to create a localized smooth signal:
>>> G = graphs.Sensor(30, seed=42)
>>> G.compute_fourier_basis() # Reproducible computation of lmax.
>>> s1 = np.zeros(G.N)
>>> s1[13] = 1
>>> s1 = filters.Heat(G, 3).filter(s1)
>>> s1.shape
(30,)
Filter and reconstruct our signal:
>>> g = filters.MexicanHat(G, Nf=4)
>>> s2 = g.analyze(s1)
>>> s2.shape
(30, 4)
>>> s2 = g.synthesize(s2)
>>> s2.shape
(30,)
Look how well we were able to reconstruct:
>>> fig, axes = plt.subplots(1, 2)
>>> _ = G.plot(s1, ax=axes[0])
>>> _ = G.plot(s2, ax=axes[1])
>>> print('{:.5f}'.format(np.linalg.norm(s1 - s2)))
0.26808
Perfect reconstruction with Itersine, a tight frame:
>>> g = filters.Itersine(G)
>>> s2 = g.analyze(s1, method='exact')
>>> s2 = g.synthesize(s2, method='exact')
>>> np.linalg.norm(s1 - s2) < 1e-10
True |
def lorenz_animation(N_trajectories=20, rseed=1, frames=200, interval=30):
"""Plot a 3D visualization of the dynamics of the Lorenz system"""
from scipy import integrate
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import cnames
def lorentz_deriv(coords, t0, sigma=10., beta=8./3, rho=28.0):
"""Compute the time-derivative of a Lorentz system."""
x, y, z = coords
return [sigma * (y - x), x * (rho - z) - y, x * y - beta * z]
# Choose random starting points, uniformly distributed from -15 to 15
np.random.seed(rseed)
x0 = -15 + 30 * np.random.random((N_trajectories, 3))
# Solve for the trajectories
t = np.linspace(0, 2, 500)
x_t = np.asarray([integrate.odeint(lorentz_deriv, x0i, t)
for x0i in x0])
# Set up figure & 3D axis for animation
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1], projection='3d')
ax.axis('off')
# choose a different color for each trajectory
colors = plt.cm.jet(np.linspace(0, 1, N_trajectories))
# set up lines and points
lines = sum([ax.plot([], [], [], '-', c=c)
for c in colors], [])
pts = sum([ax.plot([], [], [], 'o', c=c, ms=4)
for c in colors], [])
# prepare the axes limits
ax.set_xlim((-25, 25))
ax.set_ylim((-35, 35))
ax.set_zlim((5, 55))
# set point-of-view: specified by (altitude degrees, azimuth degrees)
ax.view_init(30, 0)
# initialization function: plot the background of each frame
def init():
for line, pt in zip(lines, pts):
line.set_data([], [])
line.set_3d_properties([])
pt.set_data([], [])
pt.set_3d_properties([])
return lines + pts
# animation function: called sequentially
def animate(i):
# we'll step two time-steps per frame. This leads to nice results.
i = (2 * i) % x_t.shape[1]
for line, pt, xi in zip(lines, pts, x_t):
x, y, z = xi[:i + 1].T
line.set_data(x, y)
line.set_3d_properties(z)
pt.set_data(x[-1:], y[-1:])
pt.set_3d_properties(z[-1:])
ax.view_init(30, 0.3 * i)
fig.canvas.draw()
return lines + pts
return animation.FuncAnimation(fig, animate, init_func=init,
frames=frames, interval=interval) | Plot a 3D visualization of the dynamics of the Lorenz system |
def analyze_internal_angles(self, return_plot=False):
"""Analyze the internal angles of the grid. Angles shouldn't be too
small because this can cause problems/uncertainties in the
Finite-Element solution of the forward problem. This function prints
the min/max values, as well as quantiles, to the command line, and can
also produce a histogram plot of the angles.
Parameters
----------
return_plot: bool
if true, return (fig, ax) objects of the histogram plot
Returns
-------
fig: matplotlib.figure
figure object
ax: matplotlib.axes
axes object
Examples
--------
>>> import crtomo.grid as CRGrid
grid = CRGrid.crt_grid()
grid.load_elem_file('elem.dat')
fig, ax = grid.analyze_internal_angles(Angles)
This grid was sorted using CutMcK. The nodes were resorted!
Triangular grid found
Minimal angle: 22.156368696965796 degrees
Maximal angle: 134.99337326279496 degrees
Angle percentile 10%: 51.22 degrees
Angle percentile 20%: 55.59 degrees
Angle percentile 30%: 58.26 degrees
Angle percentile 40%: 59.49 degrees
Angle percentile 50%: 59.95 degrees
Angle percentile 60%: 60.25 degrees
Angle percentile 70%: 61.16 degrees
Angle percentile 80%: 63.44 degrees
Angle percentile 90%: 68.72 degrees
generating plot...
>>> # save to file with
fig.savefig('element_angles.png', dpi=300)
"""
angles = self.get_internal_angles().flatten()
print('Minimal angle: {0} degrees'.format(np.min(angles)))
print('Maximal angle: {0} degrees'.format(np.max(angles)))
# print out quantiles
for i in range(10, 100, 10):
print('Angle percentile {0}%: {1:0.2f} degrees'.format(
i,
np.percentile(angles, i),
))
if return_plot:
print('generating plot...')
fig, ax = plt.subplots(1, 1, figsize=(12 / 2.54, 8 / 2.54))
ax.hist(angles, int(angles.size / 10))
ax.set_xlabel('angle [deg]')
ax.set_ylabel('count')
fig.tight_layout()
# fig.savefig('plot_element_angles.jpg', dpi=300)
return fig, ax | Analyze the internal angles of the grid. Angles shouldn't be too
small because this can cause problems/uncertainties in the
Finite-Element solution of the forward problem. This function prints
the min/max values, as well as quantiles, to the command line, and can
also produce a histogram plot of the angles.
Parameters
----------
return_plot: bool
if true, return (fig, ax) objects of the histogram plot
Returns
-------
fig: matplotlib.figure
figure object
ax: matplotlib.axes
axes object
Examples
--------
>>> import crtomo.grid as CRGrid
grid = CRGrid.crt_grid()
grid.load_elem_file('elem.dat')
fig, ax = grid.analyze_internal_angles(Angles)
This grid was sorted using CutMcK. The nodes were resorted!
Triangular grid found
Minimal angle: 22.156368696965796 degrees
Maximal angle: 134.99337326279496 degrees
Angle percentile 10%: 51.22 degrees
Angle percentile 20%: 55.59 degrees
Angle percentile 30%: 58.26 degrees
Angle percentile 40%: 59.49 degrees
Angle percentile 50%: 59.95 degrees
Angle percentile 60%: 60.25 degrees
Angle percentile 70%: 61.16 degrees
Angle percentile 80%: 63.44 degrees
Angle percentile 90%: 68.72 degrees
generating plot...
>>> # save to file with
fig.savefig('element_angles.png', dpi=300) |
def write(self, data):
"""Write ``data`` into the wire.
Returns an empty tuple or a :class:`~asyncio.Future` if this
protocol has paused writing.
"""
if self.closed:
raise ConnectionResetError(
'Transport closed - cannot write on %s' % self
)
else:
t = self.transport
if self._paused or self._buffer:
self._buffer.appendleft(data)
self._buffer_size += len(data)
self._write_from_buffer()
if self._buffer_size > 2 * self._b_limit:
if self._waiter and not self._waiter.cancelled():
self.logger.warning(
'%s buffer size is %d: limit is %d ',
self._buffer_size, self._b_limit
)
else:
t.pause_reading()
self._waiter = self._loop.create_future()
else:
t.write(data)
self.changed()
return self._waiter | Write ``data`` into the wire.
Returns an empty tuple or a :class:`~asyncio.Future` if this
protocol has paused writing. |
def authenticate(session, username, password):
"""
Authenticate a PasswordUser with the specified
username/password.
:param session: An active SQLAlchemy session
:param username: The username
:param password: The password
:raise AuthenticationError: if an error occurred
:return: a PasswordUser
"""
if not username or not password:
raise AuthenticationError()
user = session.query(PasswordUser).filter(
PasswordUser.username == username).first()
if not user:
raise AuthenticationError()
if not user.authenticate(password):
raise AuthenticationError()
log.info("User %s successfully authenticated", username)
return user | Authenticate a PasswordUser with the specified
username/password.
:param session: An active SQLAlchemy session
:param username: The username
:param password: The password
:raise AuthenticationError: if an error occurred
:return: a PasswordUser |
def handle_request(self):
"""simply collect requests and put them on the queue for the workers."""
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
self.workerpool.run(self.process_request_thread,
**{'request': request,
'client_address': client_address}) | simply collect requests and put them on the queue for the workers. |
def removeAssociation(self, server_url, handle):
"""Remove an association if it exists. Do nothing if it does not.
(str, str) -> bool
"""
assoc = self.getAssociation(server_url, handle)
if assoc is None:
return 0
else:
filename = self.getAssociationFilename(server_url, handle)
return _removeIfPresent(filename) | Remove an association if it exists. Do nothing if it does not.
(str, str) -> bool |
def get(self, model_class, strict=True, returnDict=False, fetchOne=False, **where):
'''params:
model_class: The queried model class
strict: bool -> If True, queries are run with EQUAL(=) operator.
If False: Queries are run with RLIKE keyword
returnDict: bool -> Return a list if dictionaries(field_names: values)
fetchOne: bool -> cursor.fetchone() else: cursor.fetchall()
where: **kwargs for quere WHERE condition.
if where in {}: Returns all results in the table
Usage:
print(Session().get(Employee, id=1, returnDict=True))
'''
self.typeassert(model_class, strict, returnDict, where)
table = model_class.__name__.lower()
with Session(self.settings) as conn:
if not where:
query = f'SELECT * FROM {table}'
else:
query = f'SELECT * FROM {table} WHERE'
index= 1
operator = '=' if strict else 'RLIKE'
for key, value in where.items():
if index == 1:
query+= " %s %s '%s' "%(key, operator, value)
else:
query+= " AND %s %s '%s' "%(key, operator, value)
index += 1
try:
cursor=conn.cursor()
cursor.execute(query)
except mysql.Error as e:
if e.errno == 1146:
print(f"The table {table} does not exist")
return []
else:
raise e
else:
if fetchOne:
colnames = [d[0] for d in cursor.description]
results = cursor.fetchone()
if returnDict:
return {col: val for col, val in zip(colnames, results)}\
if results else {}
return results
return self.handleResult(cursor, returnDict) | params:
model_class: The queried model class
strict: bool -> If True, queries are run with EQUAL(=) operator.
If False: Queries are run with RLIKE keyword
returnDict: bool -> Return a list if dictionaries(field_names: values)
fetchOne: bool -> cursor.fetchone() else: cursor.fetchall()
where: **kwargs for quere WHERE condition.
if where in {}: Returns all results in the table
Usage:
print(Session().get(Employee, id=1, returnDict=True)) |
def add_text(self, tag, text, global_step=None):
"""Add text data to the event file.
Parameters
----------
tag : str
Name for the `text`.
text : str
Text to be saved to the event file.
global_step : int
Global step value to record.
"""
self._file_writer.add_summary(text_summary(tag, text), global_step)
if tag not in self._text_tags:
self._text_tags.append(tag)
extension_dir = self.get_logdir() + '/plugins/tensorboard_text/'
if not os.path.exists(extension_dir):
os.makedirs(extension_dir)
with open(extension_dir + 'tensors.json', 'w') as fp:
json.dump(self._text_tags, fp) | Add text data to the event file.
Parameters
----------
tag : str
Name for the `text`.
text : str
Text to be saved to the event file.
global_step : int
Global step value to record. |
def extract_secs(self, tx, tx_in_idx):
"""
For a given script solution, iterate yield its sec blobs
"""
sc = tx.SolutionChecker(tx)
tx_context = sc.tx_context_for_idx(tx_in_idx)
# set solution_stack in case there are no results from puzzle_and_solution_iterator
solution_stack = []
for puzzle_script, solution_stack, flags, sighash_f in sc.puzzle_and_solution_iterator(tx_context):
for opcode, data, pc, new_pc in self._script_tools.get_opcodes(puzzle_script):
if data and is_sec(data):
yield data
for data in solution_stack:
if is_sec(data):
yield data | For a given script solution, iterate yield its sec blobs |
def find_config_section(self, object_type, name=None):
"""
Return the section name with the given name prefix (following the
same pattern as ``protocol_desc`` in ``config``. It must have the
given name, or for ``'main'`` an empty name is allowed. The
prefix must be followed by a ``:``.
Case is *not* ignored.
"""
possible = []
for name_options in object_type.config_prefixes:
for name_prefix in name_options:
found = self._find_sections(
self.parser.sections(), name_prefix, name)
if found:
possible.extend(found)
break
if not possible:
raise LookupError(
"No section %r (prefixed by %s) found in config %s"
% (name,
' or '.join(map(repr, _flatten(object_type.config_prefixes))),
self.filename))
if len(possible) > 1:
raise LookupError(
"Ambiguous section names %r for section %r (prefixed by %s) "
"found in config %s"
% (possible, name,
' or '.join(map(repr, _flatten(object_type.config_prefixes))),
self.filename))
return possible[0] | Return the section name with the given name prefix (following the
same pattern as ``protocol_desc`` in ``config``. It must have the
given name, or for ``'main'`` an empty name is allowed. The
prefix must be followed by a ``:``.
Case is *not* ignored. |
def flow_meter_discharge(D, Do, P1, P2, rho, C, expansibility=1.0):
r'''Calculates the flow rate of an orifice plate based on the geometry
of the plate, measured pressures of the orifice, and the density of the
fluid.
.. math::
m = \left(\frac{\pi D_o^2}{4}\right) C \frac{\sqrt{2\Delta P \rho_1}}
{\sqrt{1 - \beta^4}}\cdot \epsilon
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
P1 : float
Static pressure of fluid upstream of orifice at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of orifice at the cross-section of
the pressure tap, [Pa]
rho : float
Density of fluid at `P1`, [kg/m^3]
C : float
Coefficient of discharge of the orifice, [-]
expansibility : float, optional
Expansibility factor (1 for incompressible fluids, less than 1 for
real fluids), [-]
Returns
-------
m : float
Mass flow rate of fluid, [kg/s]
Notes
-----
This is formula 1-12 in [1]_ and also [2]_.
Examples
--------
>>> flow_meter_discharge(D=0.0739, Do=0.0222, P1=1E5, P2=9.9E4, rho=1.1646,
... C=0.5988, expansibility=0.9975)
0.01120390943807026
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates.
'''
beta = Do/D
beta2 = beta*beta
return (0.25*pi*Do*Do)*C*expansibility*(
(2.0*rho*(P1 - P2))/(1.0 - beta2*beta2))**0.5 | r'''Calculates the flow rate of an orifice plate based on the geometry
of the plate, measured pressures of the orifice, and the density of the
fluid.
.. math::
m = \left(\frac{\pi D_o^2}{4}\right) C \frac{\sqrt{2\Delta P \rho_1}}
{\sqrt{1 - \beta^4}}\cdot \epsilon
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
P1 : float
Static pressure of fluid upstream of orifice at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of orifice at the cross-section of
the pressure tap, [Pa]
rho : float
Density of fluid at `P1`, [kg/m^3]
C : float
Coefficient of discharge of the orifice, [-]
expansibility : float, optional
Expansibility factor (1 for incompressible fluids, less than 1 for
real fluids), [-]
Returns
-------
m : float
Mass flow rate of fluid, [kg/s]
Notes
-----
This is formula 1-12 in [1]_ and also [2]_.
Examples
--------
>>> flow_meter_discharge(D=0.0739, Do=0.0222, P1=1E5, P2=9.9E4, rho=1.1646,
... C=0.5988, expansibility=0.9975)
0.01120390943807026
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates. |
def temporal_from_resource(resource):
'''
Parse a temporal coverage from a RDF class/resource ie. either:
- a `dct:PeriodOfTime` with schema.org `startDate` and `endDate` properties
- an inline gov.uk Time Interval value
- an URI reference to a gov.uk Time Interval ontology
http://reference.data.gov.uk/
'''
if isinstance(resource.identifier, URIRef):
# Fetch remote ontology if necessary
g = Graph().parse(str(resource.identifier))
resource = g.resource(resource.identifier)
if resource.value(SCHEMA.startDate):
return db.DateRange(
start=resource.value(SCHEMA.startDate).toPython(),
end=resource.value(SCHEMA.endDate).toPython()
)
elif resource.value(SCV.min):
return db.DateRange(
start=resource.value(SCV.min).toPython(),
end=resource.value(SCV.max).toPython()
) | Parse a temporal coverage from a RDF class/resource ie. either:
- a `dct:PeriodOfTime` with schema.org `startDate` and `endDate` properties
- an inline gov.uk Time Interval value
- an URI reference to a gov.uk Time Interval ontology
http://reference.data.gov.uk/ |
def get_parent_vault_ids(self, vault_id):
"""Gets the parent ``Ids`` of the given vault.
arg: vault_id (osid.id.Id): a vault ``Id``
return: (osid.id.IdList) - the parent ``Ids`` of the vault
raise: NotFound - ``vault_id`` is not found
raise: NullArgument - ``vault_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_parent_bin_ids
if self._catalog_session is not None:
return self._catalog_session.get_parent_catalog_ids(catalog_id=vault_id)
return self._hierarchy_session.get_parents(id_=vault_id) | Gets the parent ``Ids`` of the given vault.
arg: vault_id (osid.id.Id): a vault ``Id``
return: (osid.id.IdList) - the parent ``Ids`` of the vault
raise: NotFound - ``vault_id`` is not found
raise: NullArgument - ``vault_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
def forward(self, input_ids, target=None, mems=None):
""" Params:
input_ids :: [bsz, len]
target :: [bsz, len]
Returns:
tuple(softmax_output, new_mems) where:
new_mems: list (num layers) of hidden states at the entry of each layer
shape :: [mem_len, bsz, self.config.d_model] :: Warning: shapes are transposed here w. regards to input_ids
softmax_output: output of the (adaptive) softmax:
if target is None:
Negative log likelihood of shape :: [bsz, len]
else:
log probabilities of tokens, shape :: [bsz, len, n_tokens]
"""
bsz = input_ids.size(0)
tgt_len = input_ids.size(1)
last_hidden, new_mems = self.transformer(input_ids, mems)
pred_hid = last_hidden[:, -tgt_len:]
if self.sample_softmax > 0 and self.training:
assert self.config.tie_weight
logit = sample_logits(self.transformer.word_emb, self.out_layer.bias, target, pred_hid, self.sampler)
softmax_output = -F.log_softmax(logit, -1)[:, :, 0]
else:
softmax_output = self.crit(pred_hid.view(-1, pred_hid.size(-1)), target)
if target is None:
softmax_output = softmax_output.view(bsz, tgt_len, -1)
else:
softmax_output = softmax_output.view(bsz, tgt_len)
# We transpose back
return (softmax_output, new_mems) | Params:
input_ids :: [bsz, len]
target :: [bsz, len]
Returns:
tuple(softmax_output, new_mems) where:
new_mems: list (num layers) of hidden states at the entry of each layer
shape :: [mem_len, bsz, self.config.d_model] :: Warning: shapes are transposed here w. regards to input_ids
softmax_output: output of the (adaptive) softmax:
if target is None:
Negative log likelihood of shape :: [bsz, len]
else:
log probabilities of tokens, shape :: [bsz, len, n_tokens] |
def sh_report(self, full=True, latest=False):
"""
Show shell command necessary to clone this repository
If there is no primary remote url, prefix-comment the command
Keyword Arguments:
full (bool): also include commands to recreate branches and remotes
latest (bool): checkout repo.branch instead of repo.current_id
Yields:
str: shell command necessary to clone this repository
"""
def pathvar_repr(var):
_var = var.replace('"', '\"')
return '"%s"' % _var
output = []
if not self.remote_url:
output.append('#')
output = output + (
[self.label]
+ self.clone_cmd
+ [pathvar_repr(self.remote_url)] # TODO: shell quote?
+ [pathvar_repr(self.relpath)]
)
yield ''
yield "## %s" % pathvar_repr(self.relpath)
yield ' '.join(output)
if full:
checkout_rev = self.current_id
# if latest: checkout_rev = self.branch
relpath = pathvar_repr(self.relpath) if self.relpath else None
relpath = relpath if relpath else ''
checkout_branch_cmd = (
[self.label]
+ self.checkout_branch_cmd + [self.branch]
+ self.repo_abspath_cmd
+ [relpath])
checkout_rev_cmd = (
[self.label]
+ self.checkout_rev_cmd + [checkout_rev]
+ self.repo_abspath_cmd
+ [relpath])
if latest:
checkout_cmd = checkout_branch_cmd
comment = checkout_rev_cmd
else:
checkout_cmd = checkout_rev_cmd
comment = checkout_branch_cmd
yield ' '.join(c for c in checkout_cmd if c is not None)
yield '### %s' % ' '.join(c for c in comment if c is not None)
# output.extend([checkout_cmd, ';', ' ###', comment])
for x in self.recreate_remotes_shellcmd():
yield x | Show shell command necessary to clone this repository
If there is no primary remote url, prefix-comment the command
Keyword Arguments:
full (bool): also include commands to recreate branches and remotes
latest (bool): checkout repo.branch instead of repo.current_id
Yields:
str: shell command necessary to clone this repository |
def AddXrefTo(self, ref_kind, classobj, methodobj, offset):
"""
Creates a crossreference to another class.
XrefTo means, that the current class calls another class.
The current class should also be contained in the another class' XrefFrom list.
:param REF_TYPE ref_kind: type of call
:param classobj: :class:`ClassAnalysis` object to link
:param methodobj:
:param offset: Offset in the Methods Bytecode, where the call happens
:return:
"""
self.xrefto[classobj].add((ref_kind, methodobj, offset)) | Creates a crossreference to another class.
XrefTo means, that the current class calls another class.
The current class should also be contained in the another class' XrefFrom list.
:param REF_TYPE ref_kind: type of call
:param classobj: :class:`ClassAnalysis` object to link
:param methodobj:
:param offset: Offset in the Methods Bytecode, where the call happens
:return: |
def clean_linebreaks(self, tag):
"""
get unicode string without any other content transformation.
and clean extra spaces
"""
stripped = tag.decode(formatter=None)
stripped = re.sub('\s+', ' ', stripped)
stripped = re.sub('\n', '', stripped)
return stripped | get unicode string without any other content transformation.
and clean extra spaces |
def columns_used(self):
"""
Columns from any table used in the model. May come from either
the choosers or alternatives tables.
"""
return list(tz.unique(tz.concatv(
self.choosers_columns_used(),
self.alts_columns_used(),
self.interaction_columns_used()))) | Columns from any table used in the model. May come from either
the choosers or alternatives tables. |
def iterbyscore(self, min='-inf', max='+inf', start=None, num=None,
withscores=False, reverse=None):
""" Return a range of values from the sorted set name with scores
between @min and @max.
If @start and @num are specified, then return a slice
of the range.
@min: #int minimum score, or #str '-inf'
@max: #int minimum score, or #str '+inf'
@start: #int starting range position
@num: #int number of members to fetch
@withscores: #bool indicates to return the scores along with the
members, as a list of |(member, score)| pairs
@reverse: #bool indicating whether to sort the results descendingly
-> yields members or |(member, score)| #tuple pairs
"""
reverse = reverse if reverse is not None else self.reversed
zfunc = self._client.zrangebyscore if not reverse \
else self._client.zrevrangebyscore
_loads = self._loads
for member in zfunc(
self.key_prefix, min=min, max=max, start=start, num=num,
withscores=withscores, score_cast_func=self.cast):
if withscores:
yield (_loads(member[0]), self.cast(member[1]))
else:
yield _loads(member) | Return a range of values from the sorted set name with scores
between @min and @max.
If @start and @num are specified, then return a slice
of the range.
@min: #int minimum score, or #str '-inf'
@max: #int minimum score, or #str '+inf'
@start: #int starting range position
@num: #int number of members to fetch
@withscores: #bool indicates to return the scores along with the
members, as a list of |(member, score)| pairs
@reverse: #bool indicating whether to sort the results descendingly
-> yields members or |(member, score)| #tuple pairs |
def lookup_by_partial_name(self, partial_name):
"""
Similar to lookup_by_name(name), this method uses loose matching rule UAX44-LM2 to attempt to find the
UnicodeCharacter associated with a name. However, it attempts to permit even looser matching by doing a
substring search instead of a simple match. This method will return a generator that yields instances of
UnicodeCharacter where the partial_name passed in is a substring of the full name.
For example:
>>> ucd = UnicodeData()
>>> for data in ucd.lookup_by_partial_name("SHARP S"):
>>> print(data.code + " " + data.name)
>>>
>>> U+00DF LATIN SMALL LETTER SHARP S
>>> U+1E9E LATIN CAPITAL LETTER SHARP S
>>> U+266F MUSIC SHARP SIGN
:param partial_name: Partial name of the character to look up.
:return: Generator that yields instances of UnicodeCharacter.
"""
for k, v in self._name_database.items():
if _uax44lm2transform(partial_name) in k:
yield v | Similar to lookup_by_name(name), this method uses loose matching rule UAX44-LM2 to attempt to find the
UnicodeCharacter associated with a name. However, it attempts to permit even looser matching by doing a
substring search instead of a simple match. This method will return a generator that yields instances of
UnicodeCharacter where the partial_name passed in is a substring of the full name.
For example:
>>> ucd = UnicodeData()
>>> for data in ucd.lookup_by_partial_name("SHARP S"):
>>> print(data.code + " " + data.name)
>>>
>>> U+00DF LATIN SMALL LETTER SHARP S
>>> U+1E9E LATIN CAPITAL LETTER SHARP S
>>> U+266F MUSIC SHARP SIGN
:param partial_name: Partial name of the character to look up.
:return: Generator that yields instances of UnicodeCharacter. |
def national_significant_number(numobj):
"""Gets the national significant number of a phone number.
Note that a national significant number doesn't contain a national prefix
or any formatting.
Arguments:
numobj -- The PhoneNumber object for which the national significant number
is needed.
Returns the national significant number of the PhoneNumber object passed
in.
"""
# If leading zero(s) have been set, we prefix this now. Note this is not a
# national prefix.
national_number = U_EMPTY_STRING
if numobj.italian_leading_zero:
num_zeros = numobj.number_of_leading_zeros
if num_zeros is None:
num_zeros = 1
if num_zeros > 0:
national_number = U_ZERO * num_zeros
national_number += str(numobj.national_number)
return national_number | Gets the national significant number of a phone number.
Note that a national significant number doesn't contain a national prefix
or any formatting.
Arguments:
numobj -- The PhoneNumber object for which the national significant number
is needed.
Returns the national significant number of the PhoneNumber object passed
in. |
def translate_js_with_compilation_plan(js, HEADER=DEFAULT_HEADER):
"""js has to be a javascript source code.
returns equivalent python code.
compile plans only work with the following restrictions:
- only enabled for oneliner expressions
- when there are comments in the js code string substitution is disabled
- when there nested escaped quotes string substitution is disabled, so
cacheable:
Q1 == 1 && name == 'harry'
not cacheable:
Q1 == 1 && name == 'harry' // some comment
not cacheable:
Q1 == 1 && name == 'o\'Reilly'
not cacheable:
Q1 == 1 && name /* some comment */ == 'o\'Reilly'
"""
match_increaser_str, match_increaser_num, compilation_plan = get_compilation_plan(
js)
cp_hash = hashlib.md5(compilation_plan.encode('utf-8')).digest()
try:
python_code = cache[cp_hash]['proto_python_code']
except:
parser = pyjsparser.PyJsParser()
parsed = parser.parse(compilation_plan) # js to esprima syntax tree
# Another way of doing that would be with my auto esprima translation but its much slower and causes import problems:
# parsed = esprima.parse(js).to_dict()
translating_nodes.clean_stacks()
python_code = translating_nodes.trans(
parsed) # syntax tree to python code
cache[cp_hash] = {
'compilation_plan': compilation_plan,
'proto_python_code': python_code,
}
python_code = match_increaser_str.wrap_up(python_code)
python_code = match_increaser_num.wrap_up(python_code)
return HEADER + python_code | js has to be a javascript source code.
returns equivalent python code.
compile plans only work with the following restrictions:
- only enabled for oneliner expressions
- when there are comments in the js code string substitution is disabled
- when there nested escaped quotes string substitution is disabled, so
cacheable:
Q1 == 1 && name == 'harry'
not cacheable:
Q1 == 1 && name == 'harry' // some comment
not cacheable:
Q1 == 1 && name == 'o\'Reilly'
not cacheable:
Q1 == 1 && name /* some comment */ == 'o\'Reilly' |
def from_las3(cls, string, lexicon=None,
source="LAS",
dlm=',',
abbreviations=False):
"""
Turn LAS3 'lithology' section into a Striplog.
Args:
string (str): A section from an LAS3 file.
lexicon (Lexicon): The language for conversion to components.
source (str): A source for the data.
dlm (str): The delimiter.
abbreviations (bool): Whether to expand abbreviations.
Returns:
Striplog: The ``striplog`` object.
Note:
Handles multiple 'Data' sections. It would be smarter for it
to handle one at a time, and to deal with parsing the multiple
sections in the Well object.
Does not read an actual LAS file. Use the Well object for that.
"""
f = re.DOTALL | re.IGNORECASE
regex = r'\~\w+?_Data.+?\n(.+?)(?:\n\n+|\n*\~|\n*$)'
pattern = re.compile(regex, flags=f)
text = pattern.search(string).group(1)
s = re.search(r'\.(.+?)\: ?.+?source', string)
if s:
source = s.group(1).strip()
return cls.from_descriptions(text, lexicon,
source=source,
dlm=dlm,
abbreviations=abbreviations) | Turn LAS3 'lithology' section into a Striplog.
Args:
string (str): A section from an LAS3 file.
lexicon (Lexicon): The language for conversion to components.
source (str): A source for the data.
dlm (str): The delimiter.
abbreviations (bool): Whether to expand abbreviations.
Returns:
Striplog: The ``striplog`` object.
Note:
Handles multiple 'Data' sections. It would be smarter for it
to handle one at a time, and to deal with parsing the multiple
sections in the Well object.
Does not read an actual LAS file. Use the Well object for that. |
def _insert_stack(stack, sample_count, call_tree):
"""Inserts stack into the call tree.
Args:
stack: Call stack.
sample_count: Sample count of call stack.
call_tree: Call tree.
"""
curr_level = call_tree
for func in stack:
next_level_index = {
node['stack']: node for node in curr_level['children']}
if func not in next_level_index:
new_node = {'stack': func, 'children': [], 'sampleCount': 0}
curr_level['children'].append(new_node)
curr_level = new_node
else:
curr_level = next_level_index[func]
curr_level['sampleCount'] = sample_count | Inserts stack into the call tree.
Args:
stack: Call stack.
sample_count: Sample count of call stack.
call_tree: Call tree. |
def cache_page(**kwargs):
"""
This decorator is similar to `django.views.decorators.cache.cache_page`
"""
cache_timeout = kwargs.pop('cache_timeout', None)
key_prefix = kwargs.pop('key_prefix', None)
cache_min_age = kwargs.pop('cache_min_age', None)
decorator = decorators.decorator_from_middleware_with_args(CacheMiddleware)(
cache_timeout=cache_timeout,
key_prefix=key_prefix,
cache_min_age=cache_min_age,
**kwargs
)
return decorator | This decorator is similar to `django.views.decorators.cache.cache_page` |
def on_exception(func):
"""
Run a function when a handler thows an exception. It's return value is
returned to AWS.
Usage::
>>> # to create a reusable decorator
>>> @on_exception
... def handle_errors(exception):
... print(exception)
... return {'statusCode': 500, 'body': 'uh oh'}
>>> @handle_errors
... def handler(event, context):
... raise Exception('it broke!')
>>> handler({}, object())
it broke!
{'statusCode': 500, 'body': 'uh oh'}
>>> # or a one off
>>> @on_exception(lambda e: {'statusCode': 500})
... def handler(body, context):
... raise Exception
>>> handler({}, object())
{'statusCode': 500}
"""
class OnExceptionDecorator(LambdaDecorator):
def on_exception(self, exception):
return func(exception)
return OnExceptionDecorator | Run a function when a handler thows an exception. It's return value is
returned to AWS.
Usage::
>>> # to create a reusable decorator
>>> @on_exception
... def handle_errors(exception):
... print(exception)
... return {'statusCode': 500, 'body': 'uh oh'}
>>> @handle_errors
... def handler(event, context):
... raise Exception('it broke!')
>>> handler({}, object())
it broke!
{'statusCode': 500, 'body': 'uh oh'}
>>> # or a one off
>>> @on_exception(lambda e: {'statusCode': 500})
... def handler(body, context):
... raise Exception
>>> handler({}, object())
{'statusCode': 500} |
def switch_focus(self, layout, column, widget):
"""
Switch focus to the specified widget.
:param layout: The layout that owns the widget.
:param column: The column the widget is in.
:param widget: The index of the widget to take the focus.
"""
# Find the layout to own the focus.
for i, l in enumerate(self._layouts):
if l is layout:
break
else:
# No matching layout - give up now
return
self._layouts[self._focus].blur()
self._focus = i
self._layouts[self._focus].focus(force_column=column,
force_widget=widget) | Switch focus to the specified widget.
:param layout: The layout that owns the widget.
:param column: The column the widget is in.
:param widget: The index of the widget to take the focus. |
def fetch_github_activity(gen, metadata):
"""
registered handler for the github activity plugin
it puts in generator.context the html needed to be displayed on a
template
"""
if 'GITHUB_ACTIVITY_FEED' in gen.settings.keys():
gen.context['github_activity'] = gen.plugin_instance.fetch() | registered handler for the github activity plugin
it puts in generator.context the html needed to be displayed on a
template |
def load_all_distributions(self):
"""Replace the :attr:`distributions` attribute with all scipy distributions"""
distributions = []
for this in dir(scipy.stats):
if "fit" in eval("dir(scipy.stats." + this +")"):
distributions.append(this)
self.distributions = distributions[:] | Replace the :attr:`distributions` attribute with all scipy distributions |
def format_text_as_docstr(text):
r"""
CommandLine:
python ~/local/vim/rc/pyvim_funcs.py --test-format_text_as_docstr
Example:
>>> # DISABLE_DOCTEST
>>> from pyvim_funcs import * # NOQA
>>> text = testdata_text()
>>> formated_text = format_text_as_docstr(text)
>>> result = ('formated_text = \n%s' % (str(formated_text),))
>>> print(result)
"""
import utool as ut
import re
min_indent = ut.get_minimum_indentation(text)
indent_ = ' ' * min_indent
formated_text = re.sub('^' + indent_, '' + indent_ + '>>> ', text,
flags=re.MULTILINE)
formated_text = re.sub('^$', '' + indent_ + '>>> #', formated_text,
flags=re.MULTILINE)
return formated_text | r"""
CommandLine:
python ~/local/vim/rc/pyvim_funcs.py --test-format_text_as_docstr
Example:
>>> # DISABLE_DOCTEST
>>> from pyvim_funcs import * # NOQA
>>> text = testdata_text()
>>> formated_text = format_text_as_docstr(text)
>>> result = ('formated_text = \n%s' % (str(formated_text),))
>>> print(result) |
def _get_serialization_name(element_name):
"""converts a Python name into a serializable name"""
known = _KNOWN_SERIALIZATION_XFORMS.get(element_name)
if known is not None:
return known
if element_name.startswith('x_ms_'):
return element_name.replace('_', '-')
if element_name.endswith('_id'):
element_name = element_name.replace('_id', 'ID')
for name in ['content_', 'last_modified', 'if_', 'cache_control']:
if element_name.startswith(name):
element_name = element_name.replace('_', '-_')
return ''.join(name.capitalize() for name in element_name.split('_')) | converts a Python name into a serializable name |
def parser(self):
"""
Instantiates the argparse parser
"""
if self._parser is None:
apkw = {
'description': self.description,
'epilog': self.epilog,
}
self._parser = argparse.ArgumentParser(**apkw)
# For Python 3 add version as a default command
if self.version:
self._parser.add_argument(
'-v', '--version', action='version',
version="%(prog)s {}".format(self.version),
)
return self._parser | Instantiates the argparse parser |
def matches(self, filter_props):
"""Check if the filter matches the supplied properties."""
if filter_props is None:
return False
found_one = False
for key, value in filter_props.items():
if key in self.properties and value != self.properties[key]:
return False
elif key in self.properties and value == self.properties[key]:
found_one = True
return found_one | Check if the filter matches the supplied properties. |
def parse(template, delimiters=None):
"""
Parse a unicode template string and return a ParsedTemplate instance.
Arguments:
template: a unicode template string.
delimiters: a 2-tuple of delimiters. Defaults to the package default.
Examples:
>>> parsed = parse(u"Hey {{#who}}{{name}}!{{/who}}")
>>> print str(parsed).replace('u', '') # This is a hack to get the test to pass both in Python 2 and 3.
['Hey ', _SectionNode(key='who', index_begin=12, index_end=21, parsed=[_EscapeNode(key='name'), '!'])]
"""
if type(template) is not unicode:
raise Exception("Template is not unicode: %s" % type(template))
parser = _Parser(delimiters)
return parser.parse(template) | Parse a unicode template string and return a ParsedTemplate instance.
Arguments:
template: a unicode template string.
delimiters: a 2-tuple of delimiters. Defaults to the package default.
Examples:
>>> parsed = parse(u"Hey {{#who}}{{name}}!{{/who}}")
>>> print str(parsed).replace('u', '') # This is a hack to get the test to pass both in Python 2 and 3.
['Hey ', _SectionNode(key='who', index_begin=12, index_end=21, parsed=[_EscapeNode(key='name'), '!'])] |
def get_template_loader(app, subdir='templates'):
'''
Convenience method that calls get_template_loader() on the DMP
template engine instance.
'''
dmp = apps.get_app_config('django_mako_plus')
return dmp.engine.get_template_loader(app, subdir, create=True) | Convenience method that calls get_template_loader() on the DMP
template engine instance. |
def _make_actor_method_executor(self, method_name, method, actor_imported):
"""Make an executor that wraps a user-defined actor method.
The wrapped method updates the worker's internal state and performs any
necessary checkpointing operations.
Args:
method_name (str): The name of the actor method.
method (instancemethod): The actor method to wrap. This should be a
method defined on the actor class and should therefore take an
instance of the actor as the first argument.
actor_imported (bool): Whether the actor has been imported.
Checkpointing operations will not be run if this is set to
False.
Returns:
A function that executes the given actor method on the worker's
stored instance of the actor. The function also updates the
worker's internal state to record the executed method.
"""
def actor_method_executor(dummy_return_id, actor, *args):
# Update the actor's task counter to reflect the task we're about
# to execute.
self._worker.actor_task_counter += 1
# Execute the assigned method and save a checkpoint if necessary.
try:
if is_class_method(method):
method_returns = method(*args)
else:
method_returns = method(actor, *args)
except Exception as e:
# Save the checkpoint before allowing the method exception
# to be thrown, but don't save the checkpoint for actor
# creation task.
if (isinstance(actor, ray.actor.Checkpointable)
and self._worker.actor_task_counter != 1):
self._save_and_log_checkpoint(actor)
raise e
else:
# Handle any checkpointing operations before storing the
# method's return values.
# NOTE(swang): If method_returns is a pointer to the actor's
# state and the checkpointing operations can modify the return
# values if they mutate the actor's state. Is this okay?
if isinstance(actor, ray.actor.Checkpointable):
# If this is the first task to execute on the actor, try to
# resume from a checkpoint.
if self._worker.actor_task_counter == 1:
if actor_imported:
self._restore_and_log_checkpoint(actor)
else:
# Save the checkpoint before returning the method's
# return values.
self._save_and_log_checkpoint(actor)
return method_returns
return actor_method_executor | Make an executor that wraps a user-defined actor method.
The wrapped method updates the worker's internal state and performs any
necessary checkpointing operations.
Args:
method_name (str): The name of the actor method.
method (instancemethod): The actor method to wrap. This should be a
method defined on the actor class and should therefore take an
instance of the actor as the first argument.
actor_imported (bool): Whether the actor has been imported.
Checkpointing operations will not be run if this is set to
False.
Returns:
A function that executes the given actor method on the worker's
stored instance of the actor. The function also updates the
worker's internal state to record the executed method. |
def spacing(text):
"""
Perform paranoid text spacing on text.
"""
if len(text) <= 1 or not ANY_CJK.search(text):
return text
new_text = text
# TODO: refactoring
matched = CONVERT_TO_FULLWIDTH_CJK_SYMBOLS_CJK.search(new_text)
while matched:
start, end = matched.span()
new_text = ''.join((new_text[:start + 1], convert_to_fullwidth(new_text[start + 1:end - 1]), new_text[end - 1:]))
matched = CONVERT_TO_FULLWIDTH_CJK_SYMBOLS_CJK.search(new_text)
matched = CONVERT_TO_FULLWIDTH_CJK_SYMBOLS.search(new_text)
while matched:
start, end = matched.span()
new_text = ''.join((new_text[:start + 1].strip(), convert_to_fullwidth(new_text[start + 1:end]), new_text[end:].strip()))
matched = CONVERT_TO_FULLWIDTH_CJK_SYMBOLS.search(new_text)
new_text = DOTS_CJK.sub(r'\1 \2', new_text)
new_text = FIX_CJK_COLON_ANS.sub(r'\1:\2', new_text)
new_text = CJK_QUOTE.sub(r'\1 \2', new_text)
new_text = QUOTE_CJK.sub(r'\1 \2', new_text)
new_text = FIX_QUOTE_ANY_QUOTE.sub(r'\1\3\5', new_text)
new_text = CJK_SINGLE_QUOTE_BUT_POSSESSIVE.sub(r'\1 \2', new_text)
new_text = SINGLE_QUOTE_CJK.sub(r'\1 \2', new_text)
new_text = FIX_POSSESSIVE_SINGLE_QUOTE.sub(r"\1's", new_text)
new_text = HASH_ANS_CJK_HASH.sub(r'\1 \2\3\4 \5', new_text)
new_text = CJK_HASH.sub(r'\1 \2', new_text)
new_text = HASH_CJK.sub(r'\1 \3', new_text)
new_text = CJK_OPERATOR_ANS.sub(r'\1 \2 \3', new_text)
new_text = ANS_OPERATOR_CJK.sub(r'\1 \2 \3', new_text)
new_text = FIX_SLASH_AS.sub(r'\1\2', new_text)
new_text = FIX_SLASH_AS_SLASH.sub(r'\1\2\3', new_text)
new_text = CJK_LEFT_BRACKET.sub(r'\1 \2', new_text)
new_text = RIGHT_BRACKET_CJK.sub(r'\1 \2', new_text)
new_text = FIX_LEFT_BRACKET_ANY_RIGHT_BRACKET.sub(r'\1\3\5', new_text)
new_text = ANS_CJK_LEFT_BRACKET_ANY_RIGHT_BRACKET.sub(r'\1 \2\3\4', new_text)
new_text = LEFT_BRACKET_ANY_RIGHT_BRACKET_ANS_CJK.sub(r'\1\2\3 \4', new_text)
new_text = AN_LEFT_BRACKET.sub(r'\1 \2', new_text)
new_text = RIGHT_BRACKET_AN.sub(r'\1 \2', new_text)
new_text = CJK_ANS.sub(r'\1 \2', new_text)
new_text = ANS_CJK.sub(r'\1 \2', new_text)
new_text = S_A.sub(r'\1 \2', new_text)
new_text = MIDDLE_DOT.sub('・', new_text)
return new_text.strip() | Perform paranoid text spacing on text. |
def save_new_channel(self):
"""
It saves new channel according to specified channel features.
"""
form_info = self.input['form']
channel = Channel(typ=15, name=form_info['name'],
description=form_info['description'],
owner_id=form_info['owner_id'])
channel.blocking_save()
self.current.task_data['target_channel_key'] = channel.key | It saves new channel according to specified channel features. |
def eof_received(self) -> bool:
"""
Close the transport after receiving EOF.
Since Python 3.5, `:meth:~StreamReaderProtocol.eof_received` returns
``True`` on non-TLS connections.
See http://bugs.python.org/issue24539 for more information.
This is inappropriate for websockets for at least three reasons:
1. The use case is to read data until EOF with self.reader.read(-1).
Since websockets is a TLV protocol, this never happens.
2. It doesn't work on TLS connections. A falsy value must be
returned to have the same behavior on TLS and plain connections.
3. The websockets protocol has its own closing handshake. Endpoints
close the TCP connection after sending a close frame.
As a consequence we revert to the previous, more useful behavior.
"""
logger.debug("%s - event = eof_received()", self.side)
super().eof_received()
return False | Close the transport after receiving EOF.
Since Python 3.5, `:meth:~StreamReaderProtocol.eof_received` returns
``True`` on non-TLS connections.
See http://bugs.python.org/issue24539 for more information.
This is inappropriate for websockets for at least three reasons:
1. The use case is to read data until EOF with self.reader.read(-1).
Since websockets is a TLV protocol, this never happens.
2. It doesn't work on TLS connections. A falsy value must be
returned to have the same behavior on TLS and plain connections.
3. The websockets protocol has its own closing handshake. Endpoints
close the TCP connection after sending a close frame.
As a consequence we revert to the previous, more useful behavior. |
def saltbridge(poscenter, negcenter, protispos):
"""Detect all salt bridges (pliprofiler between centers of positive and negative charge)"""
data = namedtuple(
'saltbridge', 'positive negative distance protispos resnr restype reschain resnr_l restype_l reschain_l')
pairings = []
for pc, nc in itertools.product(poscenter, negcenter):
if not config.MIN_DIST < euclidean3d(pc.center, nc.center) < config.SALTBRIDGE_DIST_MAX:
continue
resnr = pc.resnr if protispos else nc.resnr
resnr_l = whichresnumber(nc.orig_atoms[0]) if protispos else whichresnumber(pc.orig_atoms[0])
restype = pc.restype if protispos else nc.restype
restype_l = whichrestype(nc.orig_atoms[0]) if protispos else whichrestype(pc.orig_atoms[0])
reschain = pc.reschain if protispos else nc.reschain
reschain_l = whichchain(nc.orig_atoms[0]) if protispos else whichchain(pc.orig_atoms[0])
contact = data(positive=pc, negative=nc, distance=euclidean3d(pc.center, nc.center), protispos=protispos,
resnr=resnr, restype=restype, reschain=reschain, resnr_l=resnr_l, restype_l=restype_l,
reschain_l=reschain_l)
pairings.append(contact)
return filter_contacts(pairings) | Detect all salt bridges (pliprofiler between centers of positive and negative charge) |
def setup_button_connectors(self):
"""Setup signal/slot mechanisms for dock buttons."""
self.help_button.clicked.connect(self.show_help)
self.run_button.clicked.connect(self.accept)
self.about_button.clicked.connect(self.about)
self.print_button.clicked.connect(self.show_print_dialog)
self.hazard_layer_combo.currentIndexChanged.connect(
self.index_changed_hazard_layer_combo)
self.exposure_layer_combo.currentIndexChanged.connect(
self.index_changed_exposure_layer_combo)
self.aggregation_layer_combo.currentIndexChanged.connect(
self.index_changed_aggregation_layer_combo) | Setup signal/slot mechanisms for dock buttons. |
def photo_url(self):
"""获取用户头像图片地址.
:return: 用户头像url
:rtype: str
"""
if self.url is not None:
if self.soup is not None:
img = self.soup.find('img', class_='Avatar Avatar--l')['src']
return img.replace('_l', '_r')
else:
assert (self.card is not None)
return PROTOCOL + self.card.img['src'].replace('_xs', '_r')
else:
return 'http://pic1.zhimg.com/da8e974dc_r.jpg' | 获取用户头像图片地址.
:return: 用户头像url
:rtype: str |
def Popen(self, cmd, **kwargs):
"""
Remote Popen.
"""
prefixed_cmd = self._prepare_cmd(cmd)
return subprocess.Popen(prefixed_cmd, **kwargs) | Remote Popen. |
def build_matlab(static=False):
"""build the messenger mex for MATLAB
static : bool
Determines if the zmq library has been statically linked.
If so, it will append the command line option -DZMQ_STATIC
when compiling the mex so it matches libzmq.
"""
cfg = get_config()
# To deal with spaces, remove quotes now, and add
# to the full commands themselves.
if 'matlab_bin' in cfg and cfg['matlab_bin'] != '.':
matlab_bin = cfg['matlab_bin'].strip('"')
else: # attempt to autodetect MATLAB filepath
matlab_bin = which_matlab()
if matlab_bin is None:
raise ValueError("specify 'matlab_bin' in cfg file")
# Get the extension
extcmd = esc(os.path.join(matlab_bin, "mexext"))
extension = subprocess.check_output(extcmd, shell=use_shell)
extension = extension.decode('utf-8').rstrip('\r\n')
# Build the mex file
mex = esc(os.path.join(matlab_bin, "mex"))
paths = "-L%(zmq_lib)s -I%(zmq_inc)s" % cfg
make_cmd = '%s -O %s -lzmq ./src/messenger.c' % (mex, paths)
if static:
make_cmd += ' -DZMQ_STATIC'
do_build(make_cmd, 'messenger.%s' % extension) | build the messenger mex for MATLAB
static : bool
Determines if the zmq library has been statically linked.
If so, it will append the command line option -DZMQ_STATIC
when compiling the mex so it matches libzmq. |
def load(self, path=None):
'''
Load configuration (from configuration files).
Parameters
----------
path : ~pathlib.Path or None
Path to configuration file, which must exist; or path to directory
containing a configuration file; or None.
Returns
-------
~typing.Dict[str, ~typing.Dict[str, str]]
The configuration as a dict of sections mapping section name to
options. Each options dict maps from option name to option value. The
``default`` section is not included. However, all options from the
``default`` section are included in each returned section.
Raises
------
ValueError
If ``path`` is a missing file; or if it is a directory which does not
contain the configuration file.
Examples
--------
>>> loader.load()
{
'section1': {
'option1': 'value',
'option2': 'value2',
}
}
'''
# Add path
paths = self._paths.copy()
if path:
if path.is_dir():
path /= '{}.conf'.format(self._configuration_name)
paths.append(path)
# Prepend file sys root to abs paths
paths = [(path_._root / str(x)[1:] if x.is_absolute() else x) for x in paths]
if path:
path = paths[-1]
# Passed path must exist
if not path.exists():
raise ValueError('Expected configuration file at {}'.format(path))
# Configure parser
config_parser = ConfigParser(
inline_comment_prefixes=('#', ';'),
empty_lines_in_values=False,
default_section='default',
interpolation=ExtendedInterpolation()
)
def option_transform(name):
return name.replace('-', '_').replace(' ', '_').lower()
config_parser.optionxform = option_transform
# Parse defaults and configs
with suppress(FileNotFoundError):
defaults_contents = resource_string(self._package_name, 'data/{}.defaults.conf'.format(self._configuration_name))
config_parser.read_string(defaults_contents.decode('UTF-8'))
config_parser.read([str(x) for x in paths]) # reads in given order
config = {k : dict(v) for k,v in config_parser.items()}
del config['default']
return config | Load configuration (from configuration files).
Parameters
----------
path : ~pathlib.Path or None
Path to configuration file, which must exist; or path to directory
containing a configuration file; or None.
Returns
-------
~typing.Dict[str, ~typing.Dict[str, str]]
The configuration as a dict of sections mapping section name to
options. Each options dict maps from option name to option value. The
``default`` section is not included. However, all options from the
``default`` section are included in each returned section.
Raises
------
ValueError
If ``path`` is a missing file; or if it is a directory which does not
contain the configuration file.
Examples
--------
>>> loader.load()
{
'section1': {
'option1': 'value',
'option2': 'value2',
}
} |
def from_pb(cls, operation_pb, client, **caller_metadata):
"""Factory: construct an instance from a protobuf.
:type operation_pb:
:class:`~google.longrunning.operations_pb2.Operation`
:param operation_pb: Protobuf to be parsed.
:type client: object: must provide ``_operations_stub`` accessor.
:param client: The client used to poll for the status of the operation.
:type caller_metadata: dict
:param caller_metadata: caller-assigned metadata about the operation
:rtype: :class:`Operation`
:returns: new instance, with attributes based on the protobuf.
"""
result = cls(operation_pb.name, client, **caller_metadata)
result._update_state(operation_pb)
result._from_grpc = True
return result | Factory: construct an instance from a protobuf.
:type operation_pb:
:class:`~google.longrunning.operations_pb2.Operation`
:param operation_pb: Protobuf to be parsed.
:type client: object: must provide ``_operations_stub`` accessor.
:param client: The client used to poll for the status of the operation.
:type caller_metadata: dict
:param caller_metadata: caller-assigned metadata about the operation
:rtype: :class:`Operation`
:returns: new instance, with attributes based on the protobuf. |
def get(self):
"""
Get a JSON-ready representation of this CustomArg.
:returns: This CustomArg, ready for use in a request body.
:rtype: dict
"""
custom_arg = {}
if self.key is not None and self.value is not None:
custom_arg[self.key] = self.value
return custom_arg | Get a JSON-ready representation of this CustomArg.
:returns: This CustomArg, ready for use in a request body.
:rtype: dict |
def infer_child_relations(graph, node: BaseEntity) -> List[str]:
"""Propagate causal relations to children."""
return list(_infer_child_relations_iter(graph, node)) | Propagate causal relations to children. |
def get_meta_attributes(self, **kwargs):
"""Determine the form attributes for the meta field."""
superuser = kwargs.get('superuser', False)
if (self.untl_object.qualifier == 'recordStatus'
or self.untl_object.qualifier == 'system'):
if superuser:
self.editable = True
self.repeatable = True
else:
self.editable = False
self.view_type = 'qualified-input'
elif self.untl_object.qualifier == 'hidden':
self.label = 'Object Hidden'
self.view_type = 'radio'
else:
self.editable = False
self.view_type = 'qualified-input' | Determine the form attributes for the meta field. |
def parseSOAPMessage(data, ipAddr):
"parse raw XML data string, return a (minidom) xml document"
try:
dom = minidom.parseString(data)
except Exception:
#print('Failed to parse message from %s\n"%s": %s' % (ipAddr, data, ex), file=sys.stderr)
return None
if dom.getElementsByTagNameNS(NS_S, "Fault"):
#print('Fault received from %s:' % (ipAddr, data), file=sys.stderr)
return None
soapAction = dom.getElementsByTagNameNS(NS_A, "Action")[0].firstChild.data.strip()
if soapAction == ACTION_PROBE:
return parseProbeMessage(dom)
elif soapAction == ACTION_PROBE_MATCH:
return parseProbeMatchMessage(dom)
elif soapAction == ACTION_RESOLVE:
return parseResolveMessage(dom)
elif soapAction == ACTION_RESOLVE_MATCH:
return parseResolveMatchMessage(dom)
elif soapAction == ACTION_BYE:
return parseByeMessage(dom)
elif soapAction == ACTION_HELLO:
return parseHelloMessage(dom) | parse raw XML data string, return a (minidom) xml document |
def get_acf(x, axis=0, fast=False):
"""
Estimate the autocorrelation function of a time series using the FFT.
:param x:
The time series. If multidimensional, set the time axis using the
``axis`` keyword argument and the function will be computed for every
other axis.
:param axis: (optional)
The time axis of ``x``. Assumed to be the first axis if not specified.
:param fast: (optional)
If ``True``, only use the largest ``2^n`` entries for efficiency.
(default: False)
"""
x = np.atleast_1d(x)
m = [slice(None), ] * len(x.shape)
# For computational efficiency, crop the chain to the largest power of
# two if requested.
if fast:
n = int(2 ** np.floor(np.log2(x.shape[axis])))
m[axis] = slice(0, n)
x = x
else:
n = x.shape[axis]
# Compute the FFT and then (from that) the auto-correlation function.
f = np.fft.fft(x - np.mean(x, axis=axis), n=2 * n, axis=axis)
m[axis] = slice(0, n)
acf = np.fft.ifft(f * np.conjugate(f), axis=axis)[tuple(m)].real
m[axis] = 0
return acf / acf[tuple(m)] | Estimate the autocorrelation function of a time series using the FFT.
:param x:
The time series. If multidimensional, set the time axis using the
``axis`` keyword argument and the function will be computed for every
other axis.
:param axis: (optional)
The time axis of ``x``. Assumed to be the first axis if not specified.
:param fast: (optional)
If ``True``, only use the largest ``2^n`` entries for efficiency.
(default: False) |
def load_credential_file(self, path):
"""Load a credential file as is setup like the Java utilities"""
c_data = StringIO.StringIO()
c_data.write("[Credentials]\n")
for line in open(path, "r").readlines():
c_data.write(line.replace("AWSAccessKeyId", "aws_access_key_id").replace("AWSSecretKey", "aws_secret_access_key"))
c_data.seek(0)
self.readfp(c_data) | Load a credential file as is setup like the Java utilities |
def _get_clause_words( sentence_text, clause_id ):
''' Collects clause with index *clause_id* from given *sentence_text*.
Returns a pair (clause, isEmbedded), where:
*clause* is a list of word tokens in the clause;
*isEmbedded* is a bool indicating whether the clause is embedded;
'''
clause = []
isEmbedded = False
indices = sentence_text.clause_indices
clause_anno = sentence_text.clause_annotations
for wid, token in enumerate(sentence_text[WORDS]):
if indices[wid] == clause_id:
if not clause and clause_anno[wid] == EMBEDDED_CLAUSE_START:
isEmbedded = True
clause.append((wid, token))
return clause, isEmbedded | Collects clause with index *clause_id* from given *sentence_text*.
Returns a pair (clause, isEmbedded), where:
*clause* is a list of word tokens in the clause;
*isEmbedded* is a bool indicating whether the clause is embedded; |
def make_heading_authors(self, authors):
"""
Constructs the Authors content for the Heading. This should display
directly after the Article Title.
Metadata element, content derived from FrontMatter
"""
author_element = etree.Element('h3', {'class': 'authors'})
#Construct content for the author element
first = True
for author in authors:
if first:
first = False
else:
append_new_text(author_element, ',', join_str='')
collab = author.find('collab')
anon = author.find('anon')
if collab is not None:
append_all_below(author_element, collab)
elif anon is not None: # If anonymous, just add "Anonymous"
append_new_text(author_element, 'Anonymous')
else: # Author is neither Anonymous or a Collaboration
author_name, _ = self.get_contrib_names(author)
append_new_text(author_element, author_name)
#TODO: Handle author footnote references, also put footnotes in the ArticleInfo
#Example: journal.pbio.0040370.xml
first = True
for xref in author.xpath("./xref[@ref-type='corresp' or @ref-type='aff']"):
_sup = xref.find('sup')
sup_text = all_text(_sup) if _sup is not None else ''
auth_sup = etree.SubElement(author_element, 'sup')
sup_link = etree.SubElement(auth_sup,
'a',
{'href': self.main_fragment.format(xref.attrib['rid'])})
sup_link.text = sup_text
if first:
first = False
else:
append_new_text(auth_sup, ', ', join_str='')
#for xref in author.findall('xref'):
#if xref.attrs['ref-type'] in ['corresp', 'aff']:
#try:
#sup_element = xref.sup[0].node
#except IndexError:
#sup_text = ''
#else:
#sup_text = all_text(sup_element)
#new_sup = etree.SubElement(author_element, 'sup')
#sup_link = etree.SubElement(new_sup, 'a')
#sup_link.attrib['href'] = self.main_fragment.format(xref.attrs['rid'])
#sup_link.text = sup_text
#if first:
#first = False
#else:
#new_sup.text = ','
return author_element | Constructs the Authors content for the Heading. This should display
directly after the Article Title.
Metadata element, content derived from FrontMatter |
def add_aacgm_coordinates(inst, glat_label='glat', glong_label='glong',
alt_label='alt'):
"""
Uses AACGMV2 package to add AACGM coordinates to instrument object.
The Altitude Adjusted Corrected Geomagnetic Coordinates library is used
to calculate the latitude, longitude, and local time
of the spacecraft with respect to the geomagnetic field.
Example
-------
# function added velow modifies the inst object upon every inst.load call
inst.custom.add(add_quasi_dipole_coordinates, 'modify', glat_label='custom_label')
Parameters
----------
inst : pysat.Instrument
Designed with pysat_sgp4 in mind
glat_label : string
label used in inst to identify WGS84 geodetic latitude (degrees N)
glong_label : string
label used in inst to identify WGS84 geodetic longitude (degrees E)
alt_label : string
label used in inst to identify WGS84 geodetic altitude (km, height above surface)
Returns
-------
inst
Input pysat.Instrument object modified to include quasi-dipole coordinates, 'aacgm_lat'
for magnetic latitude, 'aacgm_long' for longitude, and 'aacgm_mlt' for magnetic local time.
"""
import aacgmv2
aalat = []; aalon = []; mlt = []
for lat, lon, alt, time in zip(inst[glat_label], inst[glong_label], inst[alt_label],
inst.data.index):
# aacgmv2 latitude and longitude from geodetic coords
tlat, tlon, tmlt = aacgmv2.get_aacgm_coord(lat, lon, alt, time)
aalat.append(tlat)
aalon.append(tlon)
mlt.append(tmlt)
inst['aacgm_lat'] = aalat
inst['aacgm_long'] = aalon
inst['aacgm_mlt'] = mlt
inst.meta['aacgm_lat'] = {'units':'degrees','long_name':'AACGM latitude'}
inst.meta['aacgm_long'] = {'units':'degrees','long_name':'AACGM longitude'}
inst.meta['aacgm_mlt'] = {'units':'hrs','long_name':'AACGM Magnetic local time'}
return | Uses AACGMV2 package to add AACGM coordinates to instrument object.
The Altitude Adjusted Corrected Geomagnetic Coordinates library is used
to calculate the latitude, longitude, and local time
of the spacecraft with respect to the geomagnetic field.
Example
-------
# function added velow modifies the inst object upon every inst.load call
inst.custom.add(add_quasi_dipole_coordinates, 'modify', glat_label='custom_label')
Parameters
----------
inst : pysat.Instrument
Designed with pysat_sgp4 in mind
glat_label : string
label used in inst to identify WGS84 geodetic latitude (degrees N)
glong_label : string
label used in inst to identify WGS84 geodetic longitude (degrees E)
alt_label : string
label used in inst to identify WGS84 geodetic altitude (km, height above surface)
Returns
-------
inst
Input pysat.Instrument object modified to include quasi-dipole coordinates, 'aacgm_lat'
for magnetic latitude, 'aacgm_long' for longitude, and 'aacgm_mlt' for magnetic local time. |
def target_show(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/targets#show-target"
api_path = "/api/v2/targets/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) | https://developer.zendesk.com/rest_api/docs/core/targets#show-target |
def avail_locations(call=None):
'''
Return a dict of all available VM locations on the cloud provider with
relevant data
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-locations option'
)
ret = {}
conn = get_conn()
for item in conn.list_locations()['items']:
reg, loc = item['id'].split('/')
location = {'id': item['id']}
if reg not in ret:
ret[reg] = {}
ret[reg][loc] = location
return ret | Return a dict of all available VM locations on the cloud provider with
relevant data |
def _select_position(self, w, h):
"""
Select the position where the y coordinate of the top of the rectangle
is lower, if there are severtal pick the one with the smallest x
coordinate
"""
fitn = ((m.y+h, m.x, w, h, m) for m in self._max_rects
if self._rect_fitness(m, w, h) is not None)
fitr = ((m.y+w, m.x, h, w, m) for m in self._max_rects
if self._rect_fitness(m, h, w) is not None)
if not self.rot:
fitr = []
fit = itertools.chain(fitn, fitr)
try:
_, _, w, h, m = min(fit, key=first_item)
except ValueError:
return None, None
return Rectangle(m.x, m.y, w, h), m | Select the position where the y coordinate of the top of the rectangle
is lower, if there are severtal pick the one with the smallest x
coordinate |
def run(self, lines):
"""Filter method"""
# Nothing to do in this case
if (not self.adjust_path) and (not self.image_ext):
return lines
ret = []
for line in lines:
processed = {}
while True:
alt = ''
img_name = ''
match = re.search(r'!\[(.*?)\]\((.*?)\)', line)
# Make sure there is in fact an image file name
if match:
# Skip images we already processed
if match.group(0) in processed:
break
# Skip URLs
if re.match('\w+://', match.group(2)):
break
alt = match.group(1)
img_name = match.group(2)
else:
break
if self.image_ext:
img_name = re.sub(r'\.\w+$', '.' + self.image_ext, img_name)
if self.adjust_path and (self.image_path or self.filename):
# explicitely specified image path takes precedence over
# path relative to chapter
if self.image_path and self.filename:
img_name = os.path.join(
os.path.abspath(self.image_path),
os.path.dirname(self.filename),
img_name)
# generate image path relative to file name
if self.filename and (not self.image_path):
img_name = os.path.join(
os.path.abspath(
os.path.dirname(self.filename)),
img_name)
# handle Windows '\', although this adds a small amount of unnecessary work on Unix systems
img_name = img_name.replace(os.path.sep, '/')
line = re.sub(r'!\[(.*?)\]\((.*?)\)',
'' % (alt, img_name), line)
# Mark this image as processed
processed[match.group(0)] = True
ret.append(line)
return ret | Filter method |
def asFloat(self, maxval=1.0):
"""Return image pixels as per :meth:`asDirect` method, but scale
all pixel values to be floating point values between 0.0 and
*maxval*.
"""
x,y,pixels,info = self.asDirect()
sourcemaxval = 2**info['bitdepth']-1
del info['bitdepth']
info['maxval'] = float(maxval)
factor = float(maxval)/float(sourcemaxval)
def iterfloat():
for row in pixels:
yield map(factor.__mul__, row)
return x,y,iterfloat(),info | Return image pixels as per :meth:`asDirect` method, but scale
all pixel values to be floating point values between 0.0 and
*maxval*. |
def list_attributes(self):
"""
Returns the Node attributes names.
Usage::
>>> node_a = AbstractNode("MyNodeA", attributeA=Attribute(), attributeB=Attribute())
>>> node_a.list_attributes()
['attributeB', 'attributeA']
:return: Attributes names.
:rtype: list
"""
return [attribute for attribute, value in self.iteritems() if issubclass(value.__class__, Attribute)] | Returns the Node attributes names.
Usage::
>>> node_a = AbstractNode("MyNodeA", attributeA=Attribute(), attributeB=Attribute())
>>> node_a.list_attributes()
['attributeB', 'attributeA']
:return: Attributes names.
:rtype: list |
def _construct_from_permutation(self, significant_pathways):
"""Build the network from a dictionary of (side -> tuple lists),
where the side is specified as "pos" and/or "neg" (from the feature
gene signature(s)) and mapped to a tuple list of [(pathway, feature)].
Used during the PathCORE-T permutation test by applying the method
`permute_pathways_across_features` to an existing CoNetwork.
"""
for side, pathway_feature_tuples in significant_pathways.items():
feature_pathway_dict = self._collect_pathways_by_feature(
pathway_feature_tuples)
self._edges_from_permutation(feature_pathway_dict) | Build the network from a dictionary of (side -> tuple lists),
where the side is specified as "pos" and/or "neg" (from the feature
gene signature(s)) and mapped to a tuple list of [(pathway, feature)].
Used during the PathCORE-T permutation test by applying the method
`permute_pathways_across_features` to an existing CoNetwork. |
def __alterDocstring(self, tail='', writer=None):
"""
Runs eternally, processing docstring lines.
Parses docstring lines as they get fed in via send, applies appropriate
Doxygen tags, and passes them along in batches for writing.
"""
assert isinstance(tail, str) and isinstance(writer, GeneratorType)
lines = []
timeToSend = False
inCodeBlock = False
inCodeBlockObj = [False]
inSection = False
prefix = ''
firstLineNum = -1
sectionHeadingIndent = 0
codeChecker = self._checkIfCode(inCodeBlockObj)
while True:
lineNum, line = (yield)
if firstLineNum < 0:
firstLineNum = lineNum
# Don't bother doing extra work if it's a sentinel.
if line is not None:
# Also limit work if we're not parsing the docstring.
if self.options.autobrief:
for doxyTag, tagRE in AstWalker.__singleLineREs.items():
match = tagRE.search(line)
if match:
# We've got a simple one-line Doxygen command
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum - 1, lines))
lines = []
firstLineNum = lineNum
line = line.replace(match.group(1), doxyTag)
timeToSend = True
if inSection:
# The last line belonged to a section.
# Does this one too? (Ignoring empty lines.)
match = AstWalker.__blanklineRE.match(line)
if not match:
indent = len(line.expandtabs(self.options.tablength)) - \
len(line.expandtabs(self.options.tablength).lstrip())
if indent <= sectionHeadingIndent:
inSection = False
else:
if lines[-1] == '#':
# If the last line was empty, but we're still in a section
# then we need to start a new paragraph.
lines[-1] = '# @par'
match = AstWalker.__returnsStartRE.match(line)
if match:
# We've got a "returns" section
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
line = line.replace(match.group(0), ' @return\t').rstrip()
prefix = '@return\t'
else:
match = AstWalker.__argsStartRE.match(line)
if match:
# We've got an "arguments" section
line = line.replace(match.group(0), '').rstrip()
if 'attr' in match.group(0).lower():
prefix = '@property\t'
else:
prefix = '@param\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__argsRE.match(line)
if match and not inCodeBlock:
# We've got something that looks like an item /
# description pair.
if 'property' in prefix:
line = '# {0}\t{1[name]}{2}# {1[desc]}'.format(
prefix, match.groupdict(), linesep)
else:
line = ' {0}\t{1[name]}\t{1[desc]}'.format(
prefix, match.groupdict())
else:
match = AstWalker.__raisesStartRE.match(line)
if match:
line = line.replace(match.group(0), '').rstrip()
if 'see' in match.group(1).lower():
# We've got a "see also" section
prefix = '@sa\t'
else:
# We've got an "exceptions" section
prefix = '@exception\t'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
else:
match = AstWalker.__listRE.match(line)
if match and not inCodeBlock:
# We've got a list of something or another
itemList = []
for itemMatch in AstWalker.__listItemRE.findall(self._stripOutAnds(
match.group(0))):
itemList.append('# {0}\t{1}{2}'.format(
prefix, itemMatch, linesep))
line = ''.join(itemList)[1:]
else:
match = AstWalker.__examplesStartRE.match(line)
if match and lines[-1].strip() == '#' \
and self.options.autocode:
# We've got an "example" section
inCodeBlock = True
inCodeBlockObj[0] = True
line = line.replace(match.group(0),
' @b Examples{0}# @code'.format(linesep))
else:
match = AstWalker.__sectionStartRE.match(line)
if match:
# We've got an arbitrary section
prefix = ''
inSection = True
# What's the indentation of the section heading?
sectionHeadingIndent = len(line.expandtabs(self.options.tablength)) \
- len(line.expandtabs(self.options.tablength).lstrip())
line = line.replace(
match.group(0),
' @par {0}'.format(match.group(1))
)
if lines[-1] == '# @par':
lines[-1] = '#'
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append('#' + line)
continue
elif prefix:
match = AstWalker.__singleListItemRE.match(line)
if match and not inCodeBlock:
# Probably a single list item
line = ' {0}\t{1}'.format(
prefix, match.group(0))
elif self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
else:
if self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
# If we were passed a tail, append it to the docstring.
# Note that this means that we need a docstring for this
# item to get documented.
if tail and lineNum == len(self.docLines) - 1:
line = '{0}{1}# {2}'.format(line.rstrip(), linesep, tail)
# Add comment marker for every line.
line = '#{0}'.format(line.rstrip())
# Ensure the first line has the Doxygen double comment.
if lineNum == 0:
line = '#' + line
lines.append(line.replace(' ' + linesep, linesep))
else:
# If we get our sentinel value, send out what we've got.
timeToSend = True
if timeToSend:
lines[-1], inCodeBlock = self._endCodeIfNeeded(lines[-1],
inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum, lines))
lines = []
firstLineNum = -1
timeToSend = False | Runs eternally, processing docstring lines.
Parses docstring lines as they get fed in via send, applies appropriate
Doxygen tags, and passes them along in batches for writing. |
def update_long(self, **kwargs):
"""
Update the long optional arguments (those with two leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the long
argument names
Examples
--------
Setting::
>>> parser.update_long(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', long='s')
>>> parser.update_arg('something_else', long='se')
which in turn is basically comparable to::
>>> parser.add_argument('--s', dest='something', ...)
>>> parser.add_argument('--se', dest='something_else', ...)
See Also
--------
update_short, update_longf"""
for key, val in six.iteritems(kwargs):
self.update_arg(key, long=val) | Update the long optional arguments (those with two leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the long
argument names
Examples
--------
Setting::
>>> parser.update_long(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', long='s')
>>> parser.update_arg('something_else', long='se')
which in turn is basically comparable to::
>>> parser.add_argument('--s', dest='something', ...)
>>> parser.add_argument('--se', dest='something_else', ...)
See Also
--------
update_short, update_longf |
def e(message, exit_code=None):
"""Print an error log message."""
print_log(message, YELLOW, BOLD)
if exit_code is not None:
sys.exit(exit_code) | Print an error log message. |
def AAAA(host, nameserver=None):
'''
Return the AAAA record for ``host``.
Always returns a list.
CLI Example:
.. code-block:: bash
salt ns1 dig.AAAA www.google.com
'''
dig = ['dig', '+short', six.text_type(host), 'AAAA']
if nameserver is not None:
dig.append('@{0}'.format(nameserver))
cmd = __salt__['cmd.run_all'](dig, python_shell=False)
# In this case, 0 is not the same as False
if cmd['retcode'] != 0:
log.warning(
'dig returned exit code \'%s\'. Returning empty list as fallback.',
cmd['retcode']
)
return []
# make sure all entries are IPs
return [x for x in cmd['stdout'].split('\n') if check_ip(x)] | Return the AAAA record for ``host``.
Always returns a list.
CLI Example:
.. code-block:: bash
salt ns1 dig.AAAA www.google.com |
def disconnect(receiver, signal=Any, sender=Any, weak=True):
"""Disconnect receiver from sender for signal
receiver -- the registered receiver to disconnect
signal -- the registered signal to disconnect
sender -- the registered sender to disconnect
weak -- the weakref state to disconnect
disconnect reverses the process of connect,
the semantics for the individual elements are
logically equivalent to a tuple of
(receiver, signal, sender, weak) used as a key
to be deleted from the internal routing tables.
(The actual process is slightly more complex
but the semantics are basically the same).
Note:
Using disconnect is not required to cleanup
routing when an object is deleted, the framework
will remove routes for deleted objects
automatically. It's only necessary to disconnect
if you want to stop routing to a live object.
returns None, may raise DispatcherTypeError or
DispatcherKeyError
"""
if signal is None:
raise errors.DispatcherTypeError(
'Signal cannot be None (receiver=%r sender=%r)'%( receiver,sender)
)
if weak: receiver = saferef.safeRef(receiver)
senderkey = id(sender)
try:
signals = connections[senderkey]
receivers = signals[signal]
except KeyError:
raise errors.DispatcherKeyError(
"""No receivers found for signal %r from sender %r""" %(
signal,
sender
)
)
try:
# also removes from receivers
_removeOldBackRefs(senderkey, signal, receiver, receivers)
except ValueError:
raise errors.DispatcherKeyError(
"""No connection to receiver %s for signal %s from sender %s""" %(
receiver,
signal,
sender
)
)
_cleanupConnections(senderkey, signal) | Disconnect receiver from sender for signal
receiver -- the registered receiver to disconnect
signal -- the registered signal to disconnect
sender -- the registered sender to disconnect
weak -- the weakref state to disconnect
disconnect reverses the process of connect,
the semantics for the individual elements are
logically equivalent to a tuple of
(receiver, signal, sender, weak) used as a key
to be deleted from the internal routing tables.
(The actual process is slightly more complex
but the semantics are basically the same).
Note:
Using disconnect is not required to cleanup
routing when an object is deleted, the framework
will remove routes for deleted objects
automatically. It's only necessary to disconnect
if you want to stop routing to a live object.
returns None, may raise DispatcherTypeError or
DispatcherKeyError |
def filter(self, **kwargs):
"""
Add a filter to this C{readsAlignments}.
@param kwargs: Keyword arguments, as accepted by
C{ReadsAlignmentsFilter}.
@return: C{self}
"""
self._filters.append(ReadsAlignmentsFilter(**kwargs).filter)
return self | Add a filter to this C{readsAlignments}.
@param kwargs: Keyword arguments, as accepted by
C{ReadsAlignmentsFilter}.
@return: C{self} |
def sendReset(self, sequenceId=0):
"""
Sends a reset signal to the network.
"""
for col in xrange(self.numColumns):
self.sensorInputs[col].addResetToQueue(sequenceId)
self.network.run(1) | Sends a reset signal to the network. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.