code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def pull(self):
"""This action does some state checking (adds a object in the session
that will identify this chat participant and adds a coroutine to manage
it's state) and gets new messages or bail out in 10 seconds if there are
no messages."""
if not 'client' in session or session['client'].dead:
client = Client(str(request.environ['pylons.routes_dict']['id']))
print 'Adding new client:', client
session['client'] = client
session.save()
yield request.environ['cogen.core'].events.AddCoro(client.watch, prio=priority.CORO)
return
else:
client = session['client']
yield request.environ['cogen.call'](client.messages.get)(timeout=10)
if isinstance(request.environ['cogen.wsgi'].result, events.OperationTimeout):
pass
elif isinstance(request.environ['cogen.wsgi'].result, Exception):
import traceback
traceback.print_exception(*request.environ['cogen.wsgi'].exception)
else:
yield "%s\r\n"% '\r\n'.join(request.environ['cogen.wsgi'].result) | This action does some state checking (adds a object in the session
that will identify this chat participant and adds a coroutine to manage
it's state) and gets new messages or bail out in 10 seconds if there are
no messages. |
def GetKeyByPath(self, key_path):
"""Retrieves the key for a specific path.
Args:
key_path (str): Windows Registry key path.
Returns:
WinRegistryKey: Registry key or None if not available.
"""
key_path_upper = key_path.upper()
if key_path_upper.startswith(self._key_path_prefix_upper):
relative_key_path = key_path[self._key_path_prefix_length:]
elif key_path.startswith(definitions.KEY_PATH_SEPARATOR):
relative_key_path = key_path
key_path = ''.join([self._key_path_prefix, key_path])
else:
return None
try:
regf_key = self._regf_file.get_key_by_path(relative_key_path)
except IOError:
regf_key = None
if not regf_key:
return None
return REGFWinRegistryKey(regf_key, key_path=key_path) | Retrieves the key for a specific path.
Args:
key_path (str): Windows Registry key path.
Returns:
WinRegistryKey: Registry key or None if not available. |
def add_class(self, cssclass):
"""Adds a css class to this element."""
if self.has_class(cssclass):
return self
return self.toggle_class(cssclass) | Adds a css class to this element. |
def fetchmany(self, size=None):
"""Returns the next set of rows of a query result, returning a
list of tuples. When no more rows are available, it returns an
empty list.
The number of rows returned can be specified using the size argument,
which defaults to one
:param size: ``int`` number of rows to return
:returns: ``list`` of fetched rows
"""
self._check_executed()
fut = self._loop.create_future()
if self._rows is None:
fut.set_result([])
return fut
end = self._rownumber + (size or self._arraysize)
result = self._rows[self._rownumber:end]
self._rownumber = min(end, len(self._rows))
fut.set_result(result)
return fut | Returns the next set of rows of a query result, returning a
list of tuples. When no more rows are available, it returns an
empty list.
The number of rows returned can be specified using the size argument,
which defaults to one
:param size: ``int`` number of rows to return
:returns: ``list`` of fetched rows |
def _immediate_dominators(self, node, target_graph=None, reverse_graph=False):
"""
Get all immediate dominators of sub graph from given node upwards.
:param str node: id of the node to navigate forwards from.
:param networkx.classes.digraph.DiGraph target_graph: graph to analyse, default is self.graph.
:param bool reverse_graph: Whether the target graph should be reversed before analysation.
:return: each node of graph as index values, with element as respective node's immediate dominator.
:rtype: dict
"""
if target_graph is None:
target_graph = self.graph
if node not in target_graph:
raise AngrCFGError('Target node %s is not in graph.' % node)
graph = networkx.DiGraph(target_graph)
if reverse_graph:
# Reverse the graph without deepcopy
for n in target_graph.nodes():
graph.add_node(n)
for src, dst in target_graph.edges():
graph.add_edge(dst, src)
idom = {node: node}
order = list(networkx.dfs_postorder_nodes(graph, node))
dfn = {u: i for i, u in enumerate(order)}
order.pop()
order.reverse()
def intersect(u_, v_):
"""
Finds the highest (in postorder valuing) point of intersection above two node arguments.
:param str u_: nx node id.
:param str v_: nx node id.
:return: intersection of paths.
:rtype: str
"""
while u_ != v_:
while dfn[u_] < dfn[v_]:
u_ = idom[u_]
while dfn[u_] > dfn[v_]:
v_ = idom[v_]
return u_
changed = True
while changed:
changed = False
for u in order:
new_idom = reduce(intersect, (v for v in graph.pred[u] if v in idom))
if u not in idom or idom[u] != new_idom:
idom[u] = new_idom
changed = True
return idom | Get all immediate dominators of sub graph from given node upwards.
:param str node: id of the node to navigate forwards from.
:param networkx.classes.digraph.DiGraph target_graph: graph to analyse, default is self.graph.
:param bool reverse_graph: Whether the target graph should be reversed before analysation.
:return: each node of graph as index values, with element as respective node's immediate dominator.
:rtype: dict |
def _nested_unary_mul(nested_a, p):
"""Multiply `Tensors` in arbitrarily nested `Tensor` `nested_a` with `p`."""
def mul_with_broadcast(tensor):
ndims = tensor.shape.ndims
if ndims != 2:
p_reshaped = tf.reshape(p, [-1] + [1] * (ndims - 1))
return p_reshaped * tensor
else:
return p * tensor
return nest.map(mul_with_broadcast, nested_a) | Multiply `Tensors` in arbitrarily nested `Tensor` `nested_a` with `p`. |
def borrow_readwrite_instance(cls, working_dir, block_number, expected_snapshots={}):
"""
Get a read/write database handle to the blockstack db.
At most one such handle can exist within the program.
When the caller is done with the handle, it should call release_readwrite_instance()
Returns the handle on success
Returns None if we can't set up the db.
Aborts if there is another read/write handle out there somewhere.
"""
global blockstack_db, blockstack_db_lastblock, blockstack_db_lock
import virtualchain_hooks
db_path = virtualchain.get_db_filename(virtualchain_hooks, working_dir)
blockstack_db_lock.acquire()
try:
assert blockstack_db is None, "Borrowing violation"
except Exception, e:
log.exception(e)
log.error("FATAL: Borrowing violation")
os.abort()
db = BlockstackDB(db_path, DISPOSITION_RW, working_dir, get_genesis_block(), expected_snapshots=expected_snapshots)
rc = db.db_setup()
if not rc:
db.close()
blockstack_db_lock.release()
log.error("Failed to set up virtualchain state engine")
return None
blockstack_db = db
blockstack_db_lastblock = block_number
blockstack_db_lock.release()
return blockstack_db | Get a read/write database handle to the blockstack db.
At most one such handle can exist within the program.
When the caller is done with the handle, it should call release_readwrite_instance()
Returns the handle on success
Returns None if we can't set up the db.
Aborts if there is another read/write handle out there somewhere. |
def update(self, app_id, data):
"""Update app identified by app_id with data
:params:
* app_id (int) id in the marketplace received with :method:`create`
* data (dict) some keys are required:
* *name*: the title of the app. Maximum length 127
characters.
* *summary*: the summary of the app. Maximum length
255 characters.
* *categories*: a list of the categories, at least
two of the category ids provided from the category api
(see below).
* *support_email*: the email address for support.
* *device_types*: a list of the device types at least
one of: 'desktop', 'phone', 'tablet'.
* *payment_type*: only choice at this time is 'free'.
:returns: HttResponse:
* status_code (int) 202 if successful
* content (dict) or empty if successful
"""
assert ('name' in data
and data['name']
and 'summary' in data
and 'categories' in data
and data['categories']
and 'support_email' in data
and data['support_email']
and 'device_types' in data
and data['device_types']
and 'payment_type' in data
and data['payment_type']
and 'privacy_policy' in data
and data['privacy_policy'])
return self.conn.fetch('PUT', self.url('app') % app_id, data) | Update app identified by app_id with data
:params:
* app_id (int) id in the marketplace received with :method:`create`
* data (dict) some keys are required:
* *name*: the title of the app. Maximum length 127
characters.
* *summary*: the summary of the app. Maximum length
255 characters.
* *categories*: a list of the categories, at least
two of the category ids provided from the category api
(see below).
* *support_email*: the email address for support.
* *device_types*: a list of the device types at least
one of: 'desktop', 'phone', 'tablet'.
* *payment_type*: only choice at this time is 'free'.
:returns: HttResponse:
* status_code (int) 202 if successful
* content (dict) or empty if successful |
def where(cls, **kwargs):
"""
where(scope=None, **kwargs)
Like :py:meth:`.PanoptesObject.where`, but also allows setting the
query scope.
- **scope** can be any of the values given in the `Classification
Collection API documentation <http://docs.panoptes.apiary.io/#reference/classification/classification/list-all-classifications>`_
without the leading slash.
Examples::
my_classifications = Classification.where()
my_proj_123_classifications = Classification.where(project_id=123)
all_proj_123_classifications = Classification.where(
scope='project',
project_id=123,
)
"""
scope = kwargs.pop('scope', None)
if not scope:
return super(Classification, cls).where(**kwargs)
return cls.paginated_results(*cls.http_get(scope, params=kwargs)) | where(scope=None, **kwargs)
Like :py:meth:`.PanoptesObject.where`, but also allows setting the
query scope.
- **scope** can be any of the values given in the `Classification
Collection API documentation <http://docs.panoptes.apiary.io/#reference/classification/classification/list-all-classifications>`_
without the leading slash.
Examples::
my_classifications = Classification.where()
my_proj_123_classifications = Classification.where(project_id=123)
all_proj_123_classifications = Classification.where(
scope='project',
project_id=123,
) |
def login(self, username=None, password=None, token=None):
# type: (Optional[str], Optional[str], Optional[str]) -> None
"""Login into KE-chain with either username/password or token.
:param basestring username: username for your user from KE-chain
:param basestring password: password for your user from KE-chain
:param basestring token: user authentication token retrieved from KE-chain
Examples
--------
Using Token Authentication (retrieve user Token from the KE-chain instance)
>>> client = Client()
>>> client.login(token='<some-super-long-secret-token>')
Using Basic authentications (Username/Password)
>>> client = Client()
>>> client.login(username='user', password='pw')
>>> client = Client()
>>> client.login('username','password')
"""
if token:
self.headers['Authorization'] = 'Token {}'.format(token)
self.auth = None
elif username and password:
self.headers.pop('Authorization', None)
self.auth = (username, password) | Login into KE-chain with either username/password or token.
:param basestring username: username for your user from KE-chain
:param basestring password: password for your user from KE-chain
:param basestring token: user authentication token retrieved from KE-chain
Examples
--------
Using Token Authentication (retrieve user Token from the KE-chain instance)
>>> client = Client()
>>> client.login(token='<some-super-long-secret-token>')
Using Basic authentications (Username/Password)
>>> client = Client()
>>> client.login(username='user', password='pw')
>>> client = Client()
>>> client.login('username','password') |
def p_statement_randomize_expr(p):
""" statement : RANDOMIZE expr
"""
p[0] = make_sentence('RANDOMIZE', make_typecast(TYPE.ulong, p[2], p.lineno(1))) | statement : RANDOMIZE expr |
def _make_tagdict(self, sentences):
'''Make a tag dictionary for single-tag words.'''
counts = defaultdict(lambda: defaultdict(int))
for words, tags in sentences:
for word, tag in zip(words, tags):
counts[word][tag] += 1
self.classes.add(tag)
freq_thresh = 20
ambiguity_thresh = 0.97
for word, tag_freqs in counts.items():
tag, mode = max(tag_freqs.items(), key=lambda item: item[1])
n = sum(tag_freqs.values())
# Don't add rare words to the tag dictionary
# Only add quite unambiguous words
if n >= freq_thresh and (float(mode) / n) >= ambiguity_thresh:
self.tagdict[word] = tag | Make a tag dictionary for single-tag words. |
def create_feature_vectorizer(input_features, output_feature_name,
known_size_map = {}):
"""
Creates a feature vectorizer from input features, return the spec for
a feature vectorizer that puts everything into a single array of length
equal to the total size of all the input features. Returns a 2-tuple
`(spec, num_dimension)`
Parameters
----------
input_features: [list of 2-tuples]
Name(s) of the input features, given as a list of `('name', datatype)`
tuples. The datatypes entry is one of the data types defined in the
:ref:`datatypes` module. Allowed datatypes are :ref:`datatype.Int64`,
:ref:`datatype.Double`, :ref:`datatypes.Dictionary`,
or :ref:`datatype.Array`.
If the feature is a dictionary type, then the dictionary must have integer
keys, and the number of dimensions to expand it into must be given by
`known_size_map`.
Feature indices in the final array are counted sequentially from the
from 0 through the total number of features.
output_feature_name: str
The name of the output feature. The type is an Array
List of output feature of the network.
known_size_map:
A dictionary mapping the feature name to the expanded size in the final
array. This is most useful for specifying the size of sparse vectors
given as dictionaries of index to value.
"""
spec = _Model_pb2.Model()
spec.specificationVersion = SPECIFICATION_VERSION
input_features = process_or_validate_features(input_features)
feature_vectorizer = spec.featureVectorizer
num_output_dimensions = 0
for n, ft in input_features:
if n in known_size_map:
dim = known_size_map[n]
if ft.num_elements is not None:
if dim != ft.num_elements:
raise ValueError(("In feature %s, override size (%d) not "
"compatible with inherent value size (%d).")
% (n, dim, ft.num_elements))
else:
if ft.num_elements is None:
raise ValueError("In feature %s, inherent size unknown so must be manually supplied.")
dim = ft.num_elements
num_output_dimensions += dim
new_feature = feature_vectorizer.inputList.add()
new_feature.inputColumn = n
new_feature.inputDimensions = dim
if not isinstance(output_feature_name, _string_types):
if (is_valid_feature_list(output_feature_name)
and len(output_feature_name) == 1
and output_feature_name[0][1] == datatypes.Array(num_output_dimensions)):
output_feature_name = output_feature_name[0][0]
else:
raise TypeError("Output feature must be specified as a "
"feature name or correct output feature list.")
output_features = [(output_feature_name, datatypes.Array(num_output_dimensions))]
set_transform_interface_params(spec, input_features, output_features)
return spec, num_output_dimensions | Creates a feature vectorizer from input features, return the spec for
a feature vectorizer that puts everything into a single array of length
equal to the total size of all the input features. Returns a 2-tuple
`(spec, num_dimension)`
Parameters
----------
input_features: [list of 2-tuples]
Name(s) of the input features, given as a list of `('name', datatype)`
tuples. The datatypes entry is one of the data types defined in the
:ref:`datatypes` module. Allowed datatypes are :ref:`datatype.Int64`,
:ref:`datatype.Double`, :ref:`datatypes.Dictionary`,
or :ref:`datatype.Array`.
If the feature is a dictionary type, then the dictionary must have integer
keys, and the number of dimensions to expand it into must be given by
`known_size_map`.
Feature indices in the final array are counted sequentially from the
from 0 through the total number of features.
output_feature_name: str
The name of the output feature. The type is an Array
List of output feature of the network.
known_size_map:
A dictionary mapping the feature name to the expanded size in the final
array. This is most useful for specifying the size of sparse vectors
given as dictionaries of index to value. |
def get(self, name):
""" Return component by category name """
for c in self.comps:
if c.category == name:
return c
return None | Return component by category name |
def eclean_pkg(destructive=False, package_names=False, time_limit=0,
exclude_file='/etc/eclean/packages.exclude'):
'''
Clean obsolete binary packages
destructive
Only keep minimum for reinstallation
package_names
Protect all versions of installed packages. Only meaningful if used
with destructive=True
time_limit <time>
Don't delete distfiles files modified since <time>
<time> is an amount of time: "1y" is "one year", "2w" is
"two weeks", etc. Units are: y (years), m (months), w (weeks),
d (days) and h (hours).
exclude_file
Path to exclusion file. Default is /etc/eclean/packages.exclude
This is the same default eclean-pkg uses. Use None if this file
exists and you want to ignore.
Returns a dict containing the cleaned binary packages:
.. code-block:: python
{'cleaned': {<dist file>: <size>},
'total_cleaned': <size>}
CLI Example:
.. code-block:: bash
salt '*' gentoolkit.eclean_pkg destructive=True
'''
if exclude_file is None:
exclude = None
else:
try:
exclude = _parse_exclude(exclude_file)
except excludemod.ParseExcludeFileException as e:
ret = {e: 'Invalid exclusion file: {0}'.format(exclude_file)}
return ret
if time_limit != 0:
time_limit = cli.parseTime(time_limit)
clean_size = 0
# findPackages requires one arg, but does nothing with it.
# So we will just pass None in for the required arg
clean_me = search.findPackages(None, destructive=destructive,
package_names=package_names,
time_limit=time_limit, exclude=exclude,
pkgdir=search.pkgdir)
cleaned = dict()
def _eclean_progress_controller(size, key, *args):
cleaned[key] = _pretty_size(size)
return True
if clean_me:
cleaner = clean.CleanUp(_eclean_progress_controller)
clean_size = cleaner.clean_pkgs(clean_me, search.pkgdir)
ret = {'cleaned': cleaned,
'total_cleaned': _pretty_size(clean_size)}
return ret | Clean obsolete binary packages
destructive
Only keep minimum for reinstallation
package_names
Protect all versions of installed packages. Only meaningful if used
with destructive=True
time_limit <time>
Don't delete distfiles files modified since <time>
<time> is an amount of time: "1y" is "one year", "2w" is
"two weeks", etc. Units are: y (years), m (months), w (weeks),
d (days) and h (hours).
exclude_file
Path to exclusion file. Default is /etc/eclean/packages.exclude
This is the same default eclean-pkg uses. Use None if this file
exists and you want to ignore.
Returns a dict containing the cleaned binary packages:
.. code-block:: python
{'cleaned': {<dist file>: <size>},
'total_cleaned': <size>}
CLI Example:
.. code-block:: bash
salt '*' gentoolkit.eclean_pkg destructive=True |
def __find_star_in_col(self, col):
"""
Find the first starred element in the specified row. Returns
the row index, or -1 if no starred element was found.
"""
row = -1
for i in range(self.n):
if self.marked[i][col] == 1:
row = i
break
return row | Find the first starred element in the specified row. Returns
the row index, or -1 if no starred element was found. |
def __parse_fc_data(fc_data):
"""Parse the forecast data from the xml section."""
from buienradar.buienradar import condition_from_code
fc = []
for daycnt in range(1, 6):
daysection = __BRDAYFC % daycnt
if daysection in fc_data:
tmpsect = fc_data[daysection]
fcdatetime = datetime.now(pytz.timezone(__TIMEZONE))
fcdatetime = fcdatetime.replace(hour=12,
minute=0,
second=0,
microsecond=0)
# add daycnt days
fcdatetime = fcdatetime + timedelta(days=daycnt)
code = tmpsect.get(__BRICOON, []).get(__BRID)
fcdata = {
CONDITION: condition_from_code(code),
TEMPERATURE: __get_float(tmpsect, __BRMAXTEMP),
MIN_TEMP: __get_float(tmpsect, __BRMINTEMP),
MAX_TEMP: __get_float(tmpsect, __BRMAXTEMP),
SUN_CHANCE: __get_int(tmpsect, __BRKANSZON),
RAIN_CHANCE: __get_int(tmpsect, __BRKANSREGEN),
RAIN: __get_float(tmpsect, __BRMAXMMREGEN),
SNOW: __get_float(tmpsect, __BRSNEEUWCMS),
WINDFORCE: __get_int(tmpsect, __BRWINDKRACHT),
DATETIME: fcdatetime,
}
fcdata[CONDITION][IMAGE] = tmpsect.get(__BRICOON, []).get(__BRTEXT)
fc.append(fcdata)
return fc | Parse the forecast data from the xml section. |
def start_aikif():
"""
starts the web interface and possibly other processes
"""
if sys.platform[0:3] == 'win':
os.system("start go_web_aikif.bat")
else:
os.system("../aikif/web_app/web_aikif.py")
import webbrowser
import time
time.sleep(1)
webbrowser.open('http://127.0.0.1:5000') | starts the web interface and possibly other processes |
def filefind(self, names):
"""Return first found file matching name (case-insensitive).
Some packages have docs/HISTORY.txt and
package/name/HISTORY.txt. We make sure we only return the one
in the docs directory if no other can be found.
'names' can be a string or a list of strings; if you have both
a CHANGES.txt and a docs/HISTORY.txt, you want the top level
CHANGES.txt to be found first.
"""
if type(names) is str:
names = [names]
lower_names = []
for name in names:
lower_names.append(name.lower())
names = lower_names
files = self.list_files()
found = []
for fullpath in files:
filename = os.path.basename(fullpath)
if filename.lower() in names:
logger.debug("Found %s", fullpath)
if not os.path.exists(fullpath):
# Strange. It at least happens in the tests when
# we deliberately remove a CHANGES.txt file.
logger.warn("Found file %s in version control but not on "
"file system.", fullpath)
continue
found.append(fullpath)
if not found:
return
if len(found) > 1:
found.sort(key=len)
logger.warn("Found more than one file, picked the shortest one to "
"change: %s", ', '.join(found))
return found[0] | Return first found file matching name (case-insensitive).
Some packages have docs/HISTORY.txt and
package/name/HISTORY.txt. We make sure we only return the one
in the docs directory if no other can be found.
'names' can be a string or a list of strings; if you have both
a CHANGES.txt and a docs/HISTORY.txt, you want the top level
CHANGES.txt to be found first. |
def similarity(ctx, app_id, json_flag, query_pair, request_id):
# type: (Context, unicode, bool, List[unicode], unicode) -> None
""" Scoring the similarity of two words. """
app_id = clean_app_id(app_id)
api = GoolabsAPI(app_id)
ret = api.similarity(
query_pair=query_pair,
request_id=request_id
)
if json_flag:
click.echo(format_json(api.response.json()))
return
click.echo('{0:.16f}'.format(ret['score'])) | Scoring the similarity of two words. |
def split_path(path_):
"""
Split the requested path into (locale, path).
locale will be empty if it isn't found.
"""
path = path_.lstrip('/')
# Use partitition instead of split since it always returns 3 parts
first, _, rest = path.partition('/')
lang = first.lower()
if lang in settings.LANGUAGE_URL_MAP:
return settings.LANGUAGE_URL_MAP[lang], rest
else:
supported = find_supported(first)
if len(supported):
return supported[0], rest
else:
return '', path | Split the requested path into (locale, path).
locale will be empty if it isn't found. |
def reshape_range(tensor, i, j, shape):
"""Reshapes a tensor between dimensions i and j."""
t_shape = common_layers.shape_list(tensor)
target_shape = t_shape[:i] + shape + t_shape[j:]
return tf.reshape(tensor, target_shape) | Reshapes a tensor between dimensions i and j. |
def cmd_fence_move(self, args):
'''handle fencepoint move'''
if len(args) < 1:
print("Usage: fence move FENCEPOINTNUM")
return
if not self.have_list:
print("Please list fence points first")
return
idx = int(args[0])
if idx <= 0 or idx > self.fenceloader.count():
print("Invalid fence point number %u" % idx)
return
try:
latlon = self.module('map').click_position
except Exception:
print("No map available")
return
if latlon is None:
print("No map click position available")
return
# note we don't subtract 1, as first fence point is the return point
self.fenceloader.move(idx, latlon[0], latlon[1])
if self.send_fence():
print("Moved fence point %u" % idx) | handle fencepoint move |
def parses(self, words, S='S'):
"""Return a list of parses; words can be a list or string.
>>> chart = Chart(E_NP_)
>>> chart.parses('happy man', 'NP')
[[0, 2, 'NP', [('Adj', 'happy'), [1, 2, 'NP', [('N', 'man')], []]], []]]
"""
if isinstance(words, str):
words = words.split()
self.parse(words, S)
# Return all the parses that span the whole input
# 'span the whole input' => begin at 0, end at len(words)
return [[i, j, S, found, []]
for (i, j, lhs, found, expects) in self.chart[len(words)]
# assert j == len(words)
if i == 0 and lhs == S and expects == []] | Return a list of parses; words can be a list or string.
>>> chart = Chart(E_NP_)
>>> chart.parses('happy man', 'NP')
[[0, 2, 'NP', [('Adj', 'happy'), [1, 2, 'NP', [('N', 'man')], []]], []]] |
def run_step(self):
"""Write in to out, replacing strings per the replace_pairs."""
formatted_replacements = self.context.get_formatted_iterable(
self.replace_pairs)
iter = StreamReplacePairsRewriterStep.iter_replace_strings(
formatted_replacements)
rewriter = StreamRewriter(iter)
super().run_step(rewriter) | Write in to out, replacing strings per the replace_pairs. |
def publocus(args):
"""
%prog publocus idsfile > idsfiles.publocus
Given a list of model identifiers, convert each into a GenBank approved
pub_locus.
Example output:
Medtr1g007020.1 MTR_1g007020
Medtr1g007030.1 MTR_1g007030
Medtr1g007060.1 MTR_1g007060A
Medtr1g007060.2 MTR_1g007060B
"""
p = OptionParser(publocus.__doc__)
p.add_option("--locus_tag", default="MTR_",
help="GenBank locus tag [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
locus_tag = opts.locus_tag
index = AutoVivification()
idsfile, = args
fp = must_open(idsfile)
for row in fp:
locus, chrom, sep, rank, iso = atg_name(row, retval="locus,chr,sep,rank,iso")
if None in (locus, chrom, sep, rank, iso):
logging.warning("{0} is not a valid gene model identifier".format(row))
continue
if locus not in index.keys():
pub_locus = gene_name(chrom, rank, prefix=locus_tag, sep=sep)
index[locus]['pub_locus'] = pub_locus
index[locus]['isos'] = set()
index[locus]['isos'].add(int(iso))
for locus in index:
pub_locus = index[locus]['pub_locus']
index[locus]['isos'] = sorted(index[locus]['isos'])
if len(index[locus]['isos']) > 1:
new = [chr(n+64) for n in index[locus]['isos'] if n < 27]
for i, ni in zip(index[locus]['isos'], new):
print("\t".join(x for x in ("{0}.{1}".format(locus, i), \
"{0}{1}".format(pub_locus, ni))))
else:
print("\t".join(x for x in ("{0}.{1}".format(locus, index[locus]['isos'][0]), \
pub_locus))) | %prog publocus idsfile > idsfiles.publocus
Given a list of model identifiers, convert each into a GenBank approved
pub_locus.
Example output:
Medtr1g007020.1 MTR_1g007020
Medtr1g007030.1 MTR_1g007030
Medtr1g007060.1 MTR_1g007060A
Medtr1g007060.2 MTR_1g007060B |
def make_empty(self, axes=None):
""" return an empty BlockManager with the items axis of len 0 """
if axes is None:
axes = [ensure_index([])] + [ensure_index(a)
for a in self.axes[1:]]
# preserve dtype if possible
if self.ndim == 1:
blocks = np.array([], dtype=self.array_dtype)
else:
blocks = []
return self.__class__(blocks, axes) | return an empty BlockManager with the items axis of len 0 |
def ionic_radius(self):
"""
Ionic radius of specie. Returns None if data is not present.
"""
if self._oxi_state in self.ionic_radii:
return self.ionic_radii[self._oxi_state]
d = self._el.data
oxstr = str(int(self._oxi_state))
if oxstr in d.get("Ionic radii hs", {}):
warnings.warn("No default ionic radius for %s. Using hs data." %
self)
return d["Ionic radii hs"][oxstr]
elif oxstr in d.get("Ionic radii ls", {}):
warnings.warn("No default ionic radius for %s. Using ls data." %
self)
return d["Ionic radii ls"][oxstr]
warnings.warn("No ionic radius for {}!".format(self))
return None | Ionic radius of specie. Returns None if data is not present. |
def get_language():
"""
Wrapper around Django's `get_language` utility.
For Django >= 1.8, `get_language` returns None in case no translation is activate.
Here we patch this behavior e.g. for back-end functionality requiring access to translated fields
"""
from parler import appsettings
language = dj_get_language()
if language is None and appsettings.PARLER_DEFAULT_ACTIVATE:
return appsettings.PARLER_DEFAULT_LANGUAGE_CODE
else:
return language | Wrapper around Django's `get_language` utility.
For Django >= 1.8, `get_language` returns None in case no translation is activate.
Here we patch this behavior e.g. for back-end functionality requiring access to translated fields |
def _determine_profiles(self):
"""
Determine the WBEM management profiles advertised by the WBEM server,
by communicating with it and enumerating the instances of
`CIM_RegisteredProfile`.
If the profiles could be determined, this method sets the
:attr:`profiles` property of this object to the list of
`CIM_RegisteredProfile` instances (as :class:`~pywbem.CIMInstance`
objects), and returns.
Otherwise, it raises an exception.
Raises:
Exceptions raised by :class:`~pywbem.WBEMConnection`.
CIMError: CIM_ERR_NOT_FOUND, Interop namespace could not be
determined.
"""
mp_insts = self._conn.EnumerateInstances("CIM_RegisteredProfile",
namespace=self.interop_ns)
self._profiles = mp_insts | Determine the WBEM management profiles advertised by the WBEM server,
by communicating with it and enumerating the instances of
`CIM_RegisteredProfile`.
If the profiles could be determined, this method sets the
:attr:`profiles` property of this object to the list of
`CIM_RegisteredProfile` instances (as :class:`~pywbem.CIMInstance`
objects), and returns.
Otherwise, it raises an exception.
Raises:
Exceptions raised by :class:`~pywbem.WBEMConnection`.
CIMError: CIM_ERR_NOT_FOUND, Interop namespace could not be
determined. |
def dependencies_satisfied(self, plugin):
"""
Checks whether a plugin's dependencies are satisfied.
Logs an error if there is an unsatisfied dependencies
Returns: Bool
"""
for depends in plugin.dependencies:
if depends not in self.config['plugins']:
log.error("{0} depends on {1}, but {1} wasn't in the "
"config file. To use {0}, install {1} and add "
"it to the config.".format(plugin.name, depends))
return False
return True | Checks whether a plugin's dependencies are satisfied.
Logs an error if there is an unsatisfied dependencies
Returns: Bool |
def setRGB(self, pixel, r, g, b):
"""Set single pixel using individual RGB values instead of tuple"""
self.set(pixel, (r, g, b)) | Set single pixel using individual RGB values instead of tuple |
def _run_aws(cmd, region, opts, user, **kwargs):
'''
Runs the given command against AWS.
cmd
Command to run
region
Region to execute cmd in
opts
Pass in from salt
user
Pass in from salt
kwargs
Key-value arguments to pass to the command
'''
# These args need a specific key value that aren't
# valid python parameter keys
receipthandle = kwargs.pop('receipthandle', None)
if receipthandle:
kwargs['receipt-handle'] = receipthandle
num = kwargs.pop('num', None)
if num:
kwargs['max-number-of-messages'] = num
_formatted_args = [
'--{0} "{1}"'.format(k, v) for k, v in six.iteritems(kwargs)]
cmd = 'aws sqs {cmd} {args} {region} {out}'.format(
cmd=cmd,
args=' '.join(_formatted_args),
region=_region(region),
out=_OUTPUT)
rtn = __salt__['cmd.run'](cmd, runas=user, python_shell=False)
return salt.utils.json.loads(rtn) if rtn else '' | Runs the given command against AWS.
cmd
Command to run
region
Region to execute cmd in
opts
Pass in from salt
user
Pass in from salt
kwargs
Key-value arguments to pass to the command |
def israw(self):
"""
Returns True if the PTY should operate in raw mode.
If the container was not started with tty=True, this will return False.
"""
if self.raw is None:
info = self.container_info()
self.raw = self.stdout.isatty() and info['Config']['Tty']
return self.raw | Returns True if the PTY should operate in raw mode.
If the container was not started with tty=True, this will return False. |
def create_search_url(self):
""" Generates (urlencoded) query string from stored key-values tuples
:returns: A string containing all arguments in a url-encoded format
"""
url = '?'
for key, value in self.arguments.items():
url += '%s=%s&' % (quote_plus(key), quote_plus(value))
self.url = url[:-1]
return self.url | Generates (urlencoded) query string from stored key-values tuples
:returns: A string containing all arguments in a url-encoded format |
def e(self, eid):
"""Get an Entity
"""
ta = datetime.datetime.now()
rs = self.rest('GET', self.uri_db + '-/entity', data={'e':int(eid)}, parse=True)
tb = datetime.datetime.now() - ta
print cl('<<< fetched entity %s in %sms' % (eid, tb.microseconds/1000.0), 'cyan')
return rs | Get an Entity |
def summarize_notices(self, notices_json):
"""
The function for summarizing RDAP notices in to a unique list.
https://tools.ietf.org/html/rfc7483#section-4.3
Args:
notices_json (:obj:`dict`): A json mapping of notices from RDAP
results.
Returns:
list of dict: Unique RDAP notices information:
::
[{
'title' (str) - The title/header of the notice.
'description' (str) - The description/body of the notice.
'links' (list) - Unique links returned by
:obj:`ipwhois.rdap._RDAPCommon.summarize_links()`.
}]
"""
ret = []
for notices_dict in notices_json:
tmp = {
'title': None,
'description': None,
'links': None
}
try:
tmp['title'] = notices_dict['title']
except (KeyError, ValueError, TypeError):
pass
try:
tmp['description'] = '\n'.join(notices_dict['description'])
except (KeyError, ValueError, TypeError):
pass
try:
tmp['links'] = self.summarize_links(notices_dict['links'])
except (KeyError, ValueError, TypeError):
pass
if any(tmp.values()):
ret.append(tmp)
return ret | The function for summarizing RDAP notices in to a unique list.
https://tools.ietf.org/html/rfc7483#section-4.3
Args:
notices_json (:obj:`dict`): A json mapping of notices from RDAP
results.
Returns:
list of dict: Unique RDAP notices information:
::
[{
'title' (str) - The title/header of the notice.
'description' (str) - The description/body of the notice.
'links' (list) - Unique links returned by
:obj:`ipwhois.rdap._RDAPCommon.summarize_links()`.
}] |
def __update_state(self):
"""Fetches most up to date state from db."""
# Only if the job was not in a terminal state.
if self._state.active:
self._state = self.__get_state_by_id(self.job_config.job_id) | Fetches most up to date state from db. |
def cardinal_groupby(self):
"""
Group this object on it cardinal dimension (_cardinal).
Returns:
grpby: Pandas groupby object (grouped on _cardinal)
"""
g, t = self._cardinal
self[g] = self[g].astype(t)
grpby = self.groupby(g)
self[g] = self[g].astype('category')
return grpby | Group this object on it cardinal dimension (_cardinal).
Returns:
grpby: Pandas groupby object (grouped on _cardinal) |
def serialisable(cls, key, obj):
'''Determines what can be serialised and what shouldn't
'''
# ignore class method names
if key.startswith('_Serialisable'.format(cls.__name__)):
return False
if key in obj.__whitelist:
return True
# class variables will be prefixed with '_<cls.__name__>__variable'
# so let's remove these too
#if key.startswith('__'):
if '__' in key:
return False
# ignore our own class variables
#if key in ['_Serialisable__whitelist', '_Serialisable__blacklist']:
# return False
if key in obj.__blacklist:
return False
if callable(getattr(obj, key)):
return False
# check for properties
if hasattr(obj.__class__, key):
if isinstance(getattr(obj.__class__, key), property):
return False
return True | Determines what can be serialised and what shouldn't |
def send(self, **kwargs):
"""Create and send a specific request, and return the response.
For example: send(ping=sc_pb.RequestPing()) => sc_pb.ResponsePing
Args:
**kwargs: A single kwarg with the name and value to fill in to Request.
Returns:
The Response corresponding to your request.
"""
assert len(kwargs) == 1, "Must make a single request."
res = self.send_req(sc_pb.Request(**kwargs))
return getattr(res, list(kwargs.keys())[0]) | Create and send a specific request, and return the response.
For example: send(ping=sc_pb.RequestPing()) => sc_pb.ResponsePing
Args:
**kwargs: A single kwarg with the name and value to fill in to Request.
Returns:
The Response corresponding to your request. |
def save_dash(self, dashboard_id):
"""Save a dashboard's metadata"""
session = db.session()
dash = (session
.query(models.Dashboard)
.filter_by(id=dashboard_id).first())
check_ownership(dash, raise_if_false=True)
data = json.loads(request.form.get('data'))
self._set_dash_metadata(dash, data)
session.merge(dash)
session.commit()
session.close()
return json_success(json.dumps({'status': 'SUCCESS'})) | Save a dashboard's metadata |
def exists(self):
"""Check if a target exists
This function is called by :mod:`luigi` to check if a task output exists. By default,
:mod:`luigi` considers a task as complete if all it targets (outputs) exist.
Returns:
bool: ``True`` if target exists, ``False`` otherwise
"""
# get DB connection
session = client.get_client().create_session()
# query for target existence
ret = self._base_query(session).count() > 0
session.close()
return ret | Check if a target exists
This function is called by :mod:`luigi` to check if a task output exists. By default,
:mod:`luigi` considers a task as complete if all it targets (outputs) exist.
Returns:
bool: ``True`` if target exists, ``False`` otherwise |
def threshold_monitor_hidden_threshold_monitor_security_policy_area_timebase(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
threshold_monitor_hidden = ET.SubElement(config, "threshold-monitor-hidden", xmlns="urn:brocade.com:mgmt:brocade-threshold-monitor")
threshold_monitor = ET.SubElement(threshold_monitor_hidden, "threshold-monitor")
security = ET.SubElement(threshold_monitor, "security")
policy = ET.SubElement(security, "policy")
sec_policy_name_key = ET.SubElement(policy, "sec_policy_name")
sec_policy_name_key.text = kwargs.pop('sec_policy_name')
area = ET.SubElement(policy, "area")
sec_area_value_key = ET.SubElement(area, "sec_area_value")
sec_area_value_key.text = kwargs.pop('sec_area_value')
timebase = ET.SubElement(area, "timebase")
timebase.text = kwargs.pop('timebase')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
async def createWorkerType(self, *args, **kwargs):
"""
Create new Worker Type
Create a worker type. A worker type contains all the configuration
needed for the provisioner to manage the instances. Each worker type
knows which regions and which instance types are allowed for that
worker type. Remember that Capacity is the number of concurrent tasks
that can be run on a given EC2 resource and that Utility is the relative
performance rate between different instance types. There is no way to
configure different regions to have different sets of instance types
so ensure that all instance types are available in all regions.
This function is idempotent.
Once a worker type is in the provisioner, a back ground process will
begin creating instances for it based on its capacity bounds and its
pending task count from the Queue. It is the worker's responsibility
to shut itself down. The provisioner has a limit (currently 96hours)
for all instances to prevent zombie instances from running indefinitely.
The provisioner will ensure that all instances created are tagged with
aws resource tags containing the provisioner id and the worker type.
If provided, the secrets in the global, region and instance type sections
are available using the secrets api. If specified, the scopes provided
will be used to generate a set of temporary credentials available with
the other secrets.
This method takes input: ``http://schemas.taskcluster.net/aws-provisioner/v1/create-worker-type-request.json#``
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/get-worker-type-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["createWorkerType"], *args, **kwargs) | Create new Worker Type
Create a worker type. A worker type contains all the configuration
needed for the provisioner to manage the instances. Each worker type
knows which regions and which instance types are allowed for that
worker type. Remember that Capacity is the number of concurrent tasks
that can be run on a given EC2 resource and that Utility is the relative
performance rate between different instance types. There is no way to
configure different regions to have different sets of instance types
so ensure that all instance types are available in all regions.
This function is idempotent.
Once a worker type is in the provisioner, a back ground process will
begin creating instances for it based on its capacity bounds and its
pending task count from the Queue. It is the worker's responsibility
to shut itself down. The provisioner has a limit (currently 96hours)
for all instances to prevent zombie instances from running indefinitely.
The provisioner will ensure that all instances created are tagged with
aws resource tags containing the provisioner id and the worker type.
If provided, the secrets in the global, region and instance type sections
are available using the secrets api. If specified, the scopes provided
will be used to generate a set of temporary credentials available with
the other secrets.
This method takes input: ``http://schemas.taskcluster.net/aws-provisioner/v1/create-worker-type-request.json#``
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/get-worker-type-response.json#``
This method is ``stable`` |
def update(self, batch_webhook_id, data):
"""
Update a webhook that will fire whenever any batch request completes
processing.
:param batch_webhook_id: The unique id for the batch webhook.
:type batch_webhook_id: :py:class:`str`
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"url": string*
}
"""
self.batch_webhook_id = batch_webhook_id
if 'url' not in data:
raise KeyError('The batch webhook must have a valid url')
return self._mc_client._patch(url=self._build_path(batch_webhook_id), data=data) | Update a webhook that will fire whenever any batch request completes
processing.
:param batch_webhook_id: The unique id for the batch webhook.
:type batch_webhook_id: :py:class:`str`
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"url": string*
} |
def fresh_working_set():
"""return a pkg_resources "working set", representing the *currently* installed packages"""
class WorkingSetPlusEditableInstalls(pkg_resources.WorkingSet):
def __init__(self, *args, **kwargs):
self._normalized_name_mapping = {}
super(WorkingSetPlusEditableInstalls, self).__init__(*args, **kwargs)
def add_entry(self, entry):
"""Same as the original .add_entry, but sets only=False, so that egg-links are honored."""
logger.debug('working-set entry: %r', entry)
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in pkg_resources.find_distributions(entry, False):
# eggs override anything that's installed normally
# fun fact: pkg_resources.working_set's results depend on the
# ordering of os.listdir since the order of os.listdir is
# entirely arbitrary (an implemenation detail of file system),
# without calling site.main(), an .egg-link file may or may not
# be honored, depending on the filesystem
replace = (dist.precedence == pkg_resources.EGG_DIST)
self._normalized_name_mapping[normalize_name(dist.key)] = dist.key
self.add(dist, entry, False, replace=replace)
def find_normalized(self, req):
req = _package_req_to_pkg_resources_req(str(req))
req.key = self._normalized_name_mapping.get(normalize_name(req.key), req.key)
return self.find(req)
return WorkingSetPlusEditableInstalls() | return a pkg_resources "working set", representing the *currently* installed packages |
def display_matrix(self, matrix, interval=2.0, brightness=1.0, fading=False, ignore_duplicates=False):
"""
Displays an LED matrix on Nuimo's LED matrix display.
:param matrix: the matrix to display
:param interval: interval in seconds until the matrix disappears again
:param brightness: led brightness between 0..1
:param fading: if True, the previous matrix fades into the new matrix
:param ignore_duplicates: if True, the matrix is not sent again if already being displayed
"""
self._matrix_writer.write(
matrix=matrix,
interval=interval,
brightness=brightness,
fading=fading,
ignore_duplicates=ignore_duplicates
) | Displays an LED matrix on Nuimo's LED matrix display.
:param matrix: the matrix to display
:param interval: interval in seconds until the matrix disappears again
:param brightness: led brightness between 0..1
:param fading: if True, the previous matrix fades into the new matrix
:param ignore_duplicates: if True, the matrix is not sent again if already being displayed |
def GetTZInfo(tzname='UTC', utcOffset=None, dst=None):
""" Get / Add timezone info """
key = (tzname, utcOffset, dst)
tzInfo = TZManager._tzInfos.get(key)
if not tzInfo:
tzInfo = TZInfo(tzname, utcOffset, dst)
TZManager._tzInfos[key] = tzInfo
return tzInfo | Get / Add timezone info |
async def psetex(self, name, time_ms, value):
"""
Set the value of key ``name`` to ``value`` that expires in ``time_ms``
milliseconds. ``time_ms`` can be represented by an integer or a Python
timedelta object
"""
if isinstance(time_ms, datetime.timedelta):
ms = int(time_ms.microseconds / 1000)
time_ms = (time_ms.seconds + time_ms.days * 24 * 3600) * 1000 + ms
return await self.execute_command('PSETEX', name, time_ms, value) | Set the value of key ``name`` to ``value`` that expires in ``time_ms``
milliseconds. ``time_ms`` can be represented by an integer or a Python
timedelta object |
def create(self, size):
"""
Creates and return a thumbnail of a given size.
"""
thumbnail = images.create(self.source_image.name, size,
self.metadata_backend, self.storage)
return thumbnail | Creates and return a thumbnail of a given size. |
def remove_entry_listener(self, registration_id):
"""
Removes the specified entry listener. Returns silently if there is no such listener added before.
:param registration_id: (str), id of registered listener.
:return: (bool), ``true`` if registration is removed, ``false`` otherwise.
"""
return self._stop_listening(registration_id,
lambda i: multi_map_remove_entry_listener_codec.encode_request(self.name, i)) | Removes the specified entry listener. Returns silently if there is no such listener added before.
:param registration_id: (str), id of registered listener.
:return: (bool), ``true`` if registration is removed, ``false`` otherwise. |
def createZone(self, zone, zoneFile=None, callback=None, errback=None,
**kwargs):
"""
Create a new zone, and return an associated high level Zone object.
Several optional keyword arguments are available to configure the SOA
record.
If zoneFile is specified, upload the specific zone definition file
to populate the zone with.
:param str zone: zone name, like 'example.com'
:param str zoneFile: absolute path of a zone file
:keyword int retry: retry time
:keyword int refresh: refresh ttl
:keyword int expiry: expiry ttl
:keyword int nx_ttl: nxdomain TTL
:rtype: :py:class:`ns1.zones.Zone`
"""
import ns1.zones
zone = ns1.zones.Zone(self.config, zone)
return zone.create(zoneFile=zoneFile, callback=callback,
errback=errback, **kwargs) | Create a new zone, and return an associated high level Zone object.
Several optional keyword arguments are available to configure the SOA
record.
If zoneFile is specified, upload the specific zone definition file
to populate the zone with.
:param str zone: zone name, like 'example.com'
:param str zoneFile: absolute path of a zone file
:keyword int retry: retry time
:keyword int refresh: refresh ttl
:keyword int expiry: expiry ttl
:keyword int nx_ttl: nxdomain TTL
:rtype: :py:class:`ns1.zones.Zone` |
def dimensionNames(self):
""" Returns a list with the dimension names of the underlying NCDF variable
"""
nSubDims = len(self._subArrayShape)
subArrayDims = ['SubDim{}'.format(dimNr) for dimNr in range(nSubDims)]
return list(self._ncVar.dimensions + tuple(subArrayDims)) | Returns a list with the dimension names of the underlying NCDF variable |
def random_str(Nchars=6, randstrbase='0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'):
"""Return a random string of <Nchars> characters. Characters are sampled
uniformly from <randstrbase>.
"""
return ''.join([randstrbase[random.randint(0, len(randstrbase) - 1)] for i in range(Nchars)]) | Return a random string of <Nchars> characters. Characters are sampled
uniformly from <randstrbase>. |
def create_lazy_user(self):
""" Create a lazy user. Returns a 2-tuple of the underlying User
object (which may be of a custom class), and the username.
"""
user_class = self.model.get_user_class()
username = self.generate_username(user_class)
user = user_class.objects.create_user(username, '')
self.create(user=user)
return user, username | Create a lazy user. Returns a 2-tuple of the underlying User
object (which may be of a custom class), and the username. |
def listify(*args):
"""
Convert args to a list, unless there's one arg and it's a
function, then acts a decorator.
"""
if (len(args) == 1) and callable(args[0]):
func = args[0]
@wraps(func)
def _inner(*args, **kwargs):
return list(func(*args, **kwargs))
return _inner
else:
return list(args) | Convert args to a list, unless there's one arg and it's a
function, then acts a decorator. |
def _create_window_function(name, doc=''):
""" Create a window function by name """
def _():
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)()
return Column(jc)
_.__name__ = name
_.__doc__ = 'Window function: ' + doc
return _ | Create a window function by name |
def timeseries(self):
"""
Time series of storage operation
Parameters
----------
ts : :pandas:`pandas.DataFrame<dataframe>`
DataFrame containing active power the storage is charged (negative)
and discharged (positive) with (on the grid side) in kW in column
'p' and reactive power in kvar in column 'q'. When 'q' is positive,
reactive power is supplied (behaving as a capacitor) and when 'q'
is negative reactive power is consumed (behaving as an inductor).
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
See parameter `timeseries`.
"""
# check if time series for reactive power is given, otherwise
# calculate it
if 'q' in self._timeseries.columns:
return self._timeseries
else:
self._timeseries['q'] = abs(self._timeseries.p) * self.q_sign * \
tan(acos(self.power_factor))
return self._timeseries.loc[
self.grid.network.timeseries.timeindex, :] | Time series of storage operation
Parameters
----------
ts : :pandas:`pandas.DataFrame<dataframe>`
DataFrame containing active power the storage is charged (negative)
and discharged (positive) with (on the grid side) in kW in column
'p' and reactive power in kvar in column 'q'. When 'q' is positive,
reactive power is supplied (behaving as a capacitor) and when 'q'
is negative reactive power is consumed (behaving as an inductor).
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
See parameter `timeseries`. |
def publish_scene_name(self, scene_id, name):
"""publish a changed scene name"""
self.sequence_number += 1
self.publisher.send_multipart(msgs.MessageBuilder.scene_name(self.sequence_number, scene_id, name))
return self.sequence_number | publish a changed scene name |
def camelify(self):
"""turn a string to CamelCase, omitting non-word characters"""
outstring = self.titleify(allwords=True)
outstring = re.sub(r"&[^;]+;", " ", outstring)
outstring = re.sub(r"\W+", "", outstring)
return String(outstring) | turn a string to CamelCase, omitting non-word characters |
def open(filename, frame='unspecified'):
"""Create a Direction from data saved in a file.
Parameters
----------
filename : :obj:`str`
The file to load data from.
frame : :obj:`str`
The frame to apply to the created Direction.
Returns
-------
:obj:`Direction`
A Direction created from the data in the file.
"""
data = BagOfPoints.load_data(filename)
return Direction(data, frame) | Create a Direction from data saved in a file.
Parameters
----------
filename : :obj:`str`
The file to load data from.
frame : :obj:`str`
The frame to apply to the created Direction.
Returns
-------
:obj:`Direction`
A Direction created from the data in the file. |
def update_process_behavior(self, behavior_data, process_id, behavior_ref_name):
"""UpdateProcessBehavior.
[Preview API] Replaces a behavior in the process.
:param :class:`<ProcessBehaviorUpdateRequest> <azure.devops.v5_0.work_item_tracking_process.models.ProcessBehaviorUpdateRequest>` behavior_data:
:param str process_id: The ID of the process
:param str behavior_ref_name: The reference name of the behavior
:rtype: :class:`<ProcessBehavior> <azure.devops.v5_0.work_item_tracking_process.models.ProcessBehavior>`
"""
route_values = {}
if process_id is not None:
route_values['processId'] = self._serialize.url('process_id', process_id, 'str')
if behavior_ref_name is not None:
route_values['behaviorRefName'] = self._serialize.url('behavior_ref_name', behavior_ref_name, 'str')
content = self._serialize.body(behavior_data, 'ProcessBehaviorUpdateRequest')
response = self._send(http_method='PUT',
location_id='d1800200-f184-4e75-a5f2-ad0b04b4373e',
version='5.0-preview.2',
route_values=route_values,
content=content)
return self._deserialize('ProcessBehavior', response) | UpdateProcessBehavior.
[Preview API] Replaces a behavior in the process.
:param :class:`<ProcessBehaviorUpdateRequest> <azure.devops.v5_0.work_item_tracking_process.models.ProcessBehaviorUpdateRequest>` behavior_data:
:param str process_id: The ID of the process
:param str behavior_ref_name: The reference name of the behavior
:rtype: :class:`<ProcessBehavior> <azure.devops.v5_0.work_item_tracking_process.models.ProcessBehavior>` |
def batch_delete_intents(self,
parent,
intents,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Deletes intents in the specified agent.
Operation <response: ``google.protobuf.Empty``>
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.IntentsClient()
>>>
>>> parent = client.project_agent_path('[PROJECT]')
>>>
>>> # TODO: Initialize ``intents``:
>>> intents = []
>>>
>>> response = client.batch_delete_intents(parent, intents)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): Required. The name of the agent to delete all entities types for. Format:
``projects/<Project ID>/agent``.
intents (list[Union[dict, ~google.cloud.dialogflow_v2.types.Intent]]): Required. The collection of intents to delete. Only intent ``name`` must be
filled in.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.Intent`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'batch_delete_intents' not in self._inner_api_calls:
self._inner_api_calls[
'batch_delete_intents'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.batch_delete_intents,
default_retry=self._method_configs[
'BatchDeleteIntents'].retry,
default_timeout=self._method_configs['BatchDeleteIntents']
.timeout,
client_info=self._client_info,
)
request = intent_pb2.BatchDeleteIntentsRequest(
parent=parent,
intents=intents,
)
operation = self._inner_api_calls['batch_delete_intents'](
request, retry=retry, timeout=timeout, metadata=metadata)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=struct_pb2.Struct,
) | Deletes intents in the specified agent.
Operation <response: ``google.protobuf.Empty``>
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.IntentsClient()
>>>
>>> parent = client.project_agent_path('[PROJECT]')
>>>
>>> # TODO: Initialize ``intents``:
>>> intents = []
>>>
>>> response = client.batch_delete_intents(parent, intents)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): Required. The name of the agent to delete all entities types for. Format:
``projects/<Project ID>/agent``.
intents (list[Union[dict, ~google.cloud.dialogflow_v2.types.Intent]]): Required. The collection of intents to delete. Only intent ``name`` must be
filled in.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.Intent`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. |
def list_local():
'''
List the locally installed overlays.
Return a list of installed overlays:
CLI Example:
.. code-block:: bash
salt '*' layman.list_local
'''
cmd = 'layman --quietness=1 --list-local --nocolor'
out = __salt__['cmd.run'](cmd, python_shell=False).split('\n')
ret = [line.split()[1] for line in out if len(line.split()) > 2]
return ret | List the locally installed overlays.
Return a list of installed overlays:
CLI Example:
.. code-block:: bash
salt '*' layman.list_local |
def volumes(self):
"""This property prepares the list of volumes
:return a list of volumes.
"""
return sys_volumes.VolumeCollection(
self._conn, utils.get_subresource_path_by(self, 'Volumes'),
redfish_version=self.redfish_version) | This property prepares the list of volumes
:return a list of volumes. |
def where(self, **kwargs):
"""Return a new Dataset refined using the given condition
:param kwargs: a map of `dimension` => `condition` to filter the elements
of the dataset. `condition` can either be an exact value or a
callable returning a boolean value. If `condition` is a value, it is
converted to a string, then sanitized. If `condition` is a callable, note that it will
be passed sanitized values -- i.e., characters outside [a-zA-Z0-9_.] are converted
to `_`.
"""
clauses = copy(self.clauses)
for dimension, condition in kwargs.items():
if dimension in self.clauses:
raise Exception('There should be only one clause for {}'.format(dimension))
if dimension not in self.schema:
raise Exception('The dimension {} doesn\'t exist'.format(dimension))
if isfunction(condition) or isinstance(condition, functools.partial):
clauses[dimension] = condition
else:
clauses[dimension] = functools.partial((lambda x, y: x == y), self._sanitize_dimension(str(condition)))
return self._copy(clauses=clauses) | Return a new Dataset refined using the given condition
:param kwargs: a map of `dimension` => `condition` to filter the elements
of the dataset. `condition` can either be an exact value or a
callable returning a boolean value. If `condition` is a value, it is
converted to a string, then sanitized. If `condition` is a callable, note that it will
be passed sanitized values -- i.e., characters outside [a-zA-Z0-9_.] are converted
to `_`. |
def createEditor(self, parent, option, index):
"""Create editor widget"""
model = index.model()
value = model.get_value(index)
if model._data.dtype.name == "bool":
value = not value
model.setData(index, to_qvariant(value))
return
elif value is not np.ma.masked:
editor = QLineEdit(parent)
editor.setFont(get_font(font_size_delta=DEFAULT_SMALL_DELTA))
editor.setAlignment(Qt.AlignCenter)
if is_number(self.dtype):
validator = QDoubleValidator(editor)
validator.setLocale(QLocale('C'))
editor.setValidator(validator)
editor.returnPressed.connect(self.commitAndCloseEditor)
return editor | Create editor widget |
def from_pycbc(cls, pycbcseries, copy=True):
"""Convert a `pycbc.types.timeseries.TimeSeries` into a `TimeSeries`
Parameters
----------
pycbcseries : `pycbc.types.timeseries.TimeSeries`
the input PyCBC `~pycbc.types.timeseries.TimeSeries` array
copy : `bool`, optional, default: `True`
if `True`, copy these data to a new array
Returns
-------
timeseries : `TimeSeries`
a GWpy version of the input timeseries
"""
return cls(pycbcseries.data, t0=pycbcseries.start_time,
dt=pycbcseries.delta_t, copy=copy) | Convert a `pycbc.types.timeseries.TimeSeries` into a `TimeSeries`
Parameters
----------
pycbcseries : `pycbc.types.timeseries.TimeSeries`
the input PyCBC `~pycbc.types.timeseries.TimeSeries` array
copy : `bool`, optional, default: `True`
if `True`, copy these data to a new array
Returns
-------
timeseries : `TimeSeries`
a GWpy version of the input timeseries |
def proc_check_guard(self, instance, sql):
"""
check to see if the guard SQL returns a single column containing 0 or 1
We return true if 1, else False
"""
self.open_db_connections(instance, self.PROC_GUARD_DB_KEY)
cursor = self.get_cursor(instance, self.PROC_GUARD_DB_KEY)
should_run = False
try:
cursor.execute(sql, ())
result = cursor.fetchone()
should_run = result[0] == 1
except Exception as e:
self.log.error("Failed to run proc_only_if sql {} : {}".format(sql, e))
self.close_cursor(cursor)
self.close_db_connections(instance, self.PROC_GUARD_DB_KEY)
return should_run | check to see if the guard SQL returns a single column containing 0 or 1
We return true if 1, else False |
def fire_event(self, event_name, service_name, default=None):
"""
Fire a data_ready, data_lost, start, or stop event on a given service.
"""
service = self.get_service(service_name)
callbacks = service.get(event_name, default)
if not callbacks:
return
if not isinstance(callbacks, Iterable):
callbacks = [callbacks]
for callback in callbacks:
if isinstance(callback, ManagerCallback):
callback(self, service_name, event_name)
else:
callback(service_name) | Fire a data_ready, data_lost, start, or stop event on a given service. |
def parent(self):
""" Parent of current object
:rtype: Collection
"""
parent = list(self.graph.objects(self.asNode(), RDF_NAMESPACES.DTS.parent))
if parent:
return self.parent_class(parent[0])
return None | Parent of current object
:rtype: Collection |
def update_usage_plan(plan_id, throttle=None, quota=None, region=None, key=None, keyid=None, profile=None):
'''
Updates an existing usage plan with throttling and quotas
.. versionadded:: 2017.7.0
plan_id
Id of the created usage plan
throttle
A dictionary consisting of the following keys:
rateLimit
requests per second at steady rate, float
burstLimit
maximum number of requests per second, integer
quota
A dictionary consisting of the following keys:
limit
number of allowed requests per specified quota period [required if quota parameter is present]
offset
number of requests to be subtracted from limit at the beginning of the period [optional]
period
quota period, must be one of DAY, WEEK, or MONTH. [required if quota parameter is present
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.update_usage_plan plan_id='usage plan id' throttle='{"rateLimit": 10.0, "burstLimit": 10}'
'''
try:
_validate_throttle(throttle)
_validate_quota(quota)
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
patchOperations = []
if throttle is None:
patchOperations.append({'op': 'remove', 'path': '/throttle'})
else:
if 'rateLimit' in throttle:
patchOperations.append({'op': 'replace', 'path': '/throttle/rateLimit', 'value': str(throttle['rateLimit'])}) # future lint: disable=blacklisted-function
if 'burstLimit' in throttle:
patchOperations.append({'op': 'replace', 'path': '/throttle/burstLimit', 'value': str(throttle['burstLimit'])}) # future lint: disable=blacklisted-function
if quota is None:
patchOperations.append({'op': 'remove', 'path': '/quota'})
else:
patchOperations.append({'op': 'replace', 'path': '/quota/period', 'value': str(quota['period'])}) # future lint: disable=blacklisted-function
patchOperations.append({'op': 'replace', 'path': '/quota/limit', 'value': str(quota['limit'])}) # future lint: disable=blacklisted-function
if 'offset' in quota:
patchOperations.append({'op': 'replace', 'path': '/quota/offset', 'value': str(quota['offset'])}) # future lint: disable=blacklisted-function
if patchOperations:
res = conn.update_usage_plan(usagePlanId=plan_id,
patchOperations=patchOperations)
return {'updated': True, 'result': res}
return {'updated': False}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
except (TypeError, ValueError) as e:
return {'error': six.text_type(e)} | Updates an existing usage plan with throttling and quotas
.. versionadded:: 2017.7.0
plan_id
Id of the created usage plan
throttle
A dictionary consisting of the following keys:
rateLimit
requests per second at steady rate, float
burstLimit
maximum number of requests per second, integer
quota
A dictionary consisting of the following keys:
limit
number of allowed requests per specified quota period [required if quota parameter is present]
offset
number of requests to be subtracted from limit at the beginning of the period [optional]
period
quota period, must be one of DAY, WEEK, or MONTH. [required if quota parameter is present
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.update_usage_plan plan_id='usage plan id' throttle='{"rateLimit": 10.0, "burstLimit": 10}' |
def update(self, obj, size):
'''Update this profile.
'''
self.number += 1
self.total += size
if self.high < size: # largest
self.high = size
try: # prefer using weak ref
self.objref, self.weak = Weakref.ref(obj), True
except TypeError:
self.objref, self.weak = obj, False | Update this profile. |
def interfaces(root):
'''
Generate a dictionary with all available interfaces relative to root.
Symlinks are not followed.
CLI example:
.. code-block:: bash
salt '*' sysfs.interfaces block/bcache0/bcache
Output example:
.. code-block:: json
{
"r": [
"state",
"partial_stripes_expensive",
"writeback_rate_debug",
"stripe_size",
"dirty_data",
"stats_total/cache_hits",
"stats_total/cache_bypass_misses",
"stats_total/bypassed",
"stats_total/cache_readaheads",
"stats_total/cache_hit_ratio",
"stats_total/cache_miss_collisions",
"stats_total/cache_misses",
"stats_total/cache_bypass_hits",
],
"rw": [
"writeback_rate",
"writeback_rate_update_seconds",
"cache_mode",
"writeback_delay",
"label",
"writeback_running",
"writeback_metadata",
"running",
"writeback_rate_p_term_inverse",
"sequential_cutoff",
"writeback_percent",
"writeback_rate_d_term",
"readahead"
],
"w": [
"stop",
"clear_stats",
"attach",
"detach"
]
}
.. note::
* 'r' interfaces are read-only
* 'w' interfaces are write-only (e.g. actions)
* 'rw' are interfaces that can both be read or written
'''
root = target(root)
if root is False or not os.path.isdir(root):
log.error('SysFS %s not a dir', root)
return False
readwrites = []
reads = []
writes = []
for path, _, files in salt.utils.path.os_walk(root, followlinks=False):
for afile in files:
canpath = os.path.join(path, afile)
if not os.path.isfile(canpath):
continue
stat_mode = os.stat(canpath).st_mode
is_r = bool(stat.S_IRUSR & stat_mode)
is_w = bool(stat.S_IWUSR & stat_mode)
relpath = os.path.relpath(canpath, root)
if is_w:
if is_r:
readwrites.append(relpath)
else:
writes.append(relpath)
elif is_r:
reads.append(relpath)
else:
log.warning('Unable to find any interfaces in %s', canpath)
return {
'r': reads,
'w': writes,
'rw': readwrites
} | Generate a dictionary with all available interfaces relative to root.
Symlinks are not followed.
CLI example:
.. code-block:: bash
salt '*' sysfs.interfaces block/bcache0/bcache
Output example:
.. code-block:: json
{
"r": [
"state",
"partial_stripes_expensive",
"writeback_rate_debug",
"stripe_size",
"dirty_data",
"stats_total/cache_hits",
"stats_total/cache_bypass_misses",
"stats_total/bypassed",
"stats_total/cache_readaheads",
"stats_total/cache_hit_ratio",
"stats_total/cache_miss_collisions",
"stats_total/cache_misses",
"stats_total/cache_bypass_hits",
],
"rw": [
"writeback_rate",
"writeback_rate_update_seconds",
"cache_mode",
"writeback_delay",
"label",
"writeback_running",
"writeback_metadata",
"running",
"writeback_rate_p_term_inverse",
"sequential_cutoff",
"writeback_percent",
"writeback_rate_d_term",
"readahead"
],
"w": [
"stop",
"clear_stats",
"attach",
"detach"
]
}
.. note::
* 'r' interfaces are read-only
* 'w' interfaces are write-only (e.g. actions)
* 'rw' are interfaces that can both be read or written |
def smoothMLS1D(actor, f=0.2, showNLines=0):
"""
Smooth actor or points with a `Moving Least Squares` variant.
The list ``actor.info['variances']`` contain the residue calculated for each point.
Input actor's polydata is modified.
:param float f: smoothing factor - typical range is [0,2].
:param int showNLines: build an actor showing the fitting line for N random points.
.. hint:: |moving_least_squares1D| |moving_least_squares1D.py|_
|skeletonize| |skeletonize.py|_
"""
coords = actor.coordinates()
ncoords = len(coords)
Ncp = int(ncoords * f / 10)
nshow = int(ncoords)
if showNLines:
ndiv = int(nshow / showNLines)
if Ncp < 3:
vc.printc("~target Please choose a fraction higher than " + str(f), c=1)
Ncp = 3
poly = actor.GetMapper().GetInput()
vpts = poly.GetPoints()
locator = vtk.vtkPointLocator()
locator.SetDataSet(poly)
locator.BuildLocator()
vtklist = vtk.vtkIdList()
variances, newline, acts = [], [], []
for i, p in enumerate(coords):
locator.FindClosestNPoints(Ncp, p, vtklist)
points = []
for j in range(vtklist.GetNumberOfIds()):
trgp = [0, 0, 0]
vpts.GetPoint(vtklist.GetId(j), trgp)
points.append(trgp)
if len(points) < 2:
continue
points = np.array(points)
pointsmean = points.mean(axis=0) # plane center
uu, dd, vv = np.linalg.svd(points - pointsmean)
newp = np.dot(p - pointsmean, vv[0]) * vv[0] + pointsmean
variances.append(dd[1] + dd[2])
newline.append(newp)
if showNLines and not i % ndiv:
fline = fitLine(points, lw=4) # fitting plane
iapts = vs.Points(points) # blue points
acts += [fline, iapts]
for i in range(ncoords):
vpts.SetPoint(i, newline[i])
if showNLines:
apts = vs.Points(newline, c="r 0.6", r=2)
ass = Assembly([apts] + acts)
return ass # NB: a demo actor is returned
actor.info["variances"] = np.array(variances)
return actor | Smooth actor or points with a `Moving Least Squares` variant.
The list ``actor.info['variances']`` contain the residue calculated for each point.
Input actor's polydata is modified.
:param float f: smoothing factor - typical range is [0,2].
:param int showNLines: build an actor showing the fitting line for N random points.
.. hint:: |moving_least_squares1D| |moving_least_squares1D.py|_
|skeletonize| |skeletonize.py|_ |
def create_database():
""" creates database if necessary """
db = get_db()
response = db.query('SHOW DATABASES')
items = list(response.get_points('databases'))
databases = [database['name'] for database in items]
# if database does not exists, create it
if settings.INFLUXDB_DATABASE not in databases:
db.create_database(settings.INFLUXDB_DATABASE)
print('Created inlfuxdb database {0}'.format(settings.INFLUXDB_DATABASE)) | creates database if necessary |
def evaluate_model(filepath,
train_start=0, train_end=60000, test_start=0,
test_end=10000, batch_size=128,
testing=False, num_threads=None):
"""
Run evaluation on a saved model
:param filepath: path to model to evaluate
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:param batch_size: size of evaluation batches
"""
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
# Set logging level to see debug information
set_log_level(logging.INFO)
# Create TF session
if num_threads:
config_args = dict(intra_op_parallelism_threads=1)
else:
config_args = {}
sess = tf.Session(config=tf.ConfigProto(**config_args))
# Get MNIST test data
mnist = MNIST(train_start=train_start, train_end=train_end,
test_start=test_start, test_end=test_end)
x_train, y_train = mnist.get_set('train')
x_test, y_test = mnist.get_set('test')
# Use Image Parameters
img_rows, img_cols, nchannels = x_train.shape[1:4]
nb_classes = y_train.shape[1]
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols,
nchannels))
y = tf.placeholder(tf.float32, shape=(None, nb_classes))
eval_params = {'batch_size': batch_size}
fgsm_params = {
'eps': 0.3,
'clip_min': 0.,
'clip_max': 1.
}
def do_eval(preds, x_set, y_set, report_key, is_adv=None):
acc = model_eval(sess, x, y, preds, x_set, y_set, args=eval_params)
if is_adv is None:
report_text = None
elif is_adv:
report_text = 'adversarial'
else:
report_text = 'legitimate'
if report_text:
print('Test accuracy on %s examples: %0.4f' % (report_text, acc))
with sess.as_default():
model = load(filepath)
assert len(model.get_params()) > 0
# Initialize the Fast Gradient Sign Method (FGSM) attack object and
# graph
fgsm = FastGradientMethod(model, sess=sess)
adv_x = fgsm.generate(x, **fgsm_params)
preds_adv = model.get_logits(adv_x)
preds = model.get_logits(x)
# Evaluate the accuracy of the MNIST model on adversarial examples
do_eval(preds, x_test, y_test, 'train_clean_train_clean_eval', False)
do_eval(preds_adv, x_test, y_test, 'clean_train_adv_eval', True) | Run evaluation on a saved model
:param filepath: path to model to evaluate
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:param batch_size: size of evaluation batches |
def epubcheck_help():
"""Return epubcheck.jar commandline help text.
:return unicode: helptext from epubcheck.jar
"""
# tc = locale.getdefaultlocale()[1]
with open(os.devnull, "w") as devnull:
p = subprocess.Popen(
[c.JAVA, '-Duser.language=en', '-jar', c.EPUBCHECK, '-h'],
stdout=subprocess.PIPE,
stderr=devnull,
)
result = p.communicate()[0]
return result.decode() | Return epubcheck.jar commandline help text.
:return unicode: helptext from epubcheck.jar |
def restore(name, run_path=None, replace=False, root="."):
""" Downloads the specified file from cloud storage into the current run directory
if it doesn exist.
name: the name of the file
run_path: optional path to a different run to pull files from
replace: whether to download the file even if it already exists locally
root: the directory to download the file to. Defaults to the current
directory or the run directory if wandb.init was called.
returns None if it can't find the file, otherwise a file object open for reading
raises wandb.CommError if it can't find the run
"""
if run_path is None and run is None:
raise ValueError(
"You must call `wandb.init` before calling restore or specify a run_path")
api = Api()
api_run = api.run(run_path or run.path)
root = run.dir if run else root
path = os.path.exists(os.path.join(root, name))
if path and replace == False:
return open(path, "r")
files = api_run.files([name])
if len(files) == 0:
return None
return files[0].download(root=root, replace=True) | Downloads the specified file from cloud storage into the current run directory
if it doesn exist.
name: the name of the file
run_path: optional path to a different run to pull files from
replace: whether to download the file even if it already exists locally
root: the directory to download the file to. Defaults to the current
directory or the run directory if wandb.init was called.
returns None if it can't find the file, otherwise a file object open for reading
raises wandb.CommError if it can't find the run |
def random_split(self, weights):
"""
Random split imageframes according to weights
:param weights: weights for each ImageFrame
:return:
"""
jvalues = self.image_frame.random_split(weights)
return [ImageFrame(jvalue) for jvalue in jvalues] | Random split imageframes according to weights
:param weights: weights for each ImageFrame
:return: |
def _match_errors_queues(self, query):
"""Tries to match in error queues
:param query: message tuple
:type query: Tuple[bytes]
:return: response if found or None
:rtype: Tuple[bytes] | None
"""
if query in self._error_queues:
queue = self._error_queues[query]
response = queue.value
logger.debug('Found response in error queue: %s',
repr(response))
return response | Tries to match in error queues
:param query: message tuple
:type query: Tuple[bytes]
:return: response if found or None
:rtype: Tuple[bytes] | None |
def env(*_vars, **kwargs):
"""Search for the first defined of possibly many env vars.
Returns the first environment variable defined in vars, or
returns the default defined in kwargs.
"""
for v in _vars:
value = os.environ.get(v, None)
if value:
return value
return kwargs.get('default', '') | Search for the first defined of possibly many env vars.
Returns the first environment variable defined in vars, or
returns the default defined in kwargs. |
def set_blocks(self, list=None, dict=None, fill_air=False):
"""
Sets all blocks in this chunk, using either a list or dictionary.
Blocks not explicitly set can be filled to air by setting fill_air to True.
"""
if list:
# Inputting a list like self.blocksList
self.blocksList = list
elif dict:
# Inputting a dictionary like result of self.get_blocks_struct()
list = []
for x in range(16):
for z in range(16):
for y in range(128):
coord = x,y,z
offset = y + z*128 + x*128*16
if (coord in dict):
list.append(dict[coord])
else:
if (self.blocksList[offset] and not fill_air):
list.append(self.blocksList[offset])
else:
list.append(0) # Air
self.blocksList = list
else:
# None of the above...
return False
return True | Sets all blocks in this chunk, using either a list or dictionary.
Blocks not explicitly set can be filled to air by setting fill_air to True. |
def visitPrefixDecl(self, ctx: ShExDocParser.PrefixDeclContext):
""" prefixDecl: KW_PREFIX PNAME_NS IRIREF """
iri = self.context.iriref_to_shexj_iriref(ctx.IRIREF())
prefix = ctx.PNAME_NS().getText()
if iri not in self.context.ld_prefixes:
self.context.prefixes.setdefault(prefix, iri.val) | prefixDecl: KW_PREFIX PNAME_NS IRIREF |
def get_methods(self):
"""
Returns a list of `MethodClassAnalysis` objects
"""
for c in self.classes.values():
for m in c.get_methods():
yield m | Returns a list of `MethodClassAnalysis` objects |
def convert(self, input_path=None, output_path=None, markup=None,
break_lines=False, divide_works=False, latin=False,
extra_args=None):
"""
:param input_path: TLG filepath to convert.
:param output_path: filepath of new converted text.
:param markup: Specificity of inline markup. Default None removes all
numerical markup; 'full' gives most detailed, with reference numbers
included before each text line.
:param break_lines: No spaces; removes line ends and hyphens before an
ID code; hyphens and spaces before page and column ends are retained.
:param divide_works: Each work (book) is output as a separate file in
the form output_file-xxx.txt; if an output file is not specified, this
option has no effect.
:param latin: Primarily Latin text (PHI). Some TLG texts, notably
doccan1.txt and doccan2.txt are mostly roman texts lacking explicit
language change codes. Setting this option will force a change to
Latin text after each citation block is encountered.
:param extra_args: Any other tlgu args to be passed, in list form and
without dashes, e.g.: ['p', 'b', 'B'].
"""
# setup file paths
input_path = os.path.expanduser(input_path)
output_path = os.path.expanduser(output_path)
# check input path exists
assert os.path.isfile(input_path), 'File {0} does not exist.'.format(input_path)
# setup tlgu flags
tlgu_options = []
if markup == 'full':
full_args = ['v', 'w', 'x', 'y', 'z']
[tlgu_options.append(x) for x in full_args] # pylint: disable=W0106
if break_lines:
tlgu_options.append('N')
if divide_works:
tlgu_options.append('W')
if latin:
tlgu_options.append('r')
# setup extra args
if extra_args is None:
extra_args = []
else:
try:
extra_args = list(extra_args)
except Exception as exc:
logger.error("Argument 'extra_args' must be a list: %s.", exc)
raise
tlgu_options = tlgu_options + extra_args
# assemble all tlgu flags
tlgu_options = list(set(tlgu_options))
if tlgu_options:
tlgu_flags = '-' + ' -'.join(tlgu_options)
else:
tlgu_flags = ''
# make tlgu call
tlgu_call = 'tlgu {0} {1} {2}'.format(tlgu_flags,
input_path,
output_path)
logger.info(tlgu_call)
try:
p_out = subprocess.call(tlgu_call, shell=True)
if p_out == 1:
logger.error('Failed to convert %s to %s.',
input_path,
output_path)
except Exception as exc:
logger.error('Failed to convert %s to %s: %s',
input_path,
output_path,
exc)
raise | :param input_path: TLG filepath to convert.
:param output_path: filepath of new converted text.
:param markup: Specificity of inline markup. Default None removes all
numerical markup; 'full' gives most detailed, with reference numbers
included before each text line.
:param break_lines: No spaces; removes line ends and hyphens before an
ID code; hyphens and spaces before page and column ends are retained.
:param divide_works: Each work (book) is output as a separate file in
the form output_file-xxx.txt; if an output file is not specified, this
option has no effect.
:param latin: Primarily Latin text (PHI). Some TLG texts, notably
doccan1.txt and doccan2.txt are mostly roman texts lacking explicit
language change codes. Setting this option will force a change to
Latin text after each citation block is encountered.
:param extra_args: Any other tlgu args to be passed, in list form and
without dashes, e.g.: ['p', 'b', 'B']. |
def _validate_rule(self, rule):
# type: (Type[Rule]) -> None
"""
Validate rule. Valid rule must inherit from Rule and have valid syntax.
:param rule: Rule to validate.
:raise NotRuleException: If the parameter doesn't inherit from Rule.
"""
if not inspect.isclass(rule) or not issubclass(rule, Rule):
raise NotRuleException(rule)
rule.validate(self._grammar) | Validate rule. Valid rule must inherit from Rule and have valid syntax.
:param rule: Rule to validate.
:raise NotRuleException: If the parameter doesn't inherit from Rule. |
def read_stream(stream, output, prebuffer, chunk_size=8192):
"""Reads data from stream and then writes it to the output."""
is_player = isinstance(output, PlayerOutput)
is_http = isinstance(output, HTTPServer)
is_fifo = is_player and output.namedpipe
show_progress = isinstance(output, FileOutput) and output.fd is not stdout and sys.stdout.isatty()
show_record_progress = hasattr(output, "record") and isinstance(output.record, FileOutput) and output.record.fd is not stdout and sys.stdout.isatty()
stream_iterator = chain(
[prebuffer],
iter(partial(stream.read, chunk_size), b"")
)
if show_progress:
stream_iterator = progress(stream_iterator,
prefix=os.path.basename(args.output))
elif show_record_progress:
stream_iterator = progress(stream_iterator,
prefix=os.path.basename(args.record))
try:
for data in stream_iterator:
# We need to check if the player process still exists when
# using named pipes on Windows since the named pipe is not
# automatically closed by the player.
if is_win32 and is_fifo:
output.player.poll()
if output.player.returncode is not None:
log.info("Player closed")
break
try:
output.write(data)
except IOError as err:
if is_player and err.errno in ACCEPTABLE_ERRNO:
log.info("Player closed")
elif is_http and err.errno in ACCEPTABLE_ERRNO:
log.info("HTTP connection closed")
else:
console.exit("Error when writing to output: {0}, exiting", err)
break
except IOError as err:
console.exit("Error when reading from stream: {0}, exiting", err)
finally:
stream.close()
log.info("Stream ended") | Reads data from stream and then writes it to the output. |
def finalise_same_chip_constraints(substitutions, placements):
"""Given a set of placements containing the supplied
:py:class:`MergedVertex`, remove the merged vertices replacing them with
their constituent vertices (changing the placements inplace).
"""
for merged_vertex in reversed(substitutions):
placement = placements.pop(merged_vertex)
for v in merged_vertex.vertices:
placements[v] = placement | Given a set of placements containing the supplied
:py:class:`MergedVertex`, remove the merged vertices replacing them with
their constituent vertices (changing the placements inplace). |
def load_secrets(self, secret_path):
"""render secrets into config object"""
self._config = p_config.render_secrets(self.config_path, secret_path) | render secrets into config object |
def clear(self, *resource_types):
"""Clear cache for each provided APIResource class, or all resources if no classes are provided"""
resource_types = resource_types or tuple(self.__caches.keys())
for cls in resource_types:
# Clear and delete cache instances to guarantee no lingering references
self.__caches[cls].clear()
del self.__caches[cls] | Clear cache for each provided APIResource class, or all resources if no classes are provided |
def _get_fault_type_dummy_variables(self, rup):
"""
Fault type (Strike-slip, Normal, Thrust/reverse) is
derived from rake angle.
Rakes angles within 30 of horizontal are strike-slip,
angles from 30 to 150 are reverse, and angles from
-30 to -150 are normal.
Note that the 'Unspecified' case is not considered,
because rake is always given.
"""
U, SS, NS, RS = 0, 0, 0, 0
if np.abs(rup.rake) <= 30.0 or (180.0 - np.abs(rup.rake)) <= 30.0:
# strike-slip
SS = 1
elif rup.rake > 30.0 and rup.rake < 150.0:
# reverse
RS = 1
else:
# normal
NS = 1
return U, SS, NS, RS | Fault type (Strike-slip, Normal, Thrust/reverse) is
derived from rake angle.
Rakes angles within 30 of horizontal are strike-slip,
angles from 30 to 150 are reverse, and angles from
-30 to -150 are normal.
Note that the 'Unspecified' case is not considered,
because rake is always given. |
def parse_cidr (addr, infer=True, allow_host=False):
"""
Takes a CIDR address or plain dotted-quad, and returns a tuple of address
and count-of-network-bits.
Can infer the network bits based on network classes if infer=True.
Can also take a string in the form 'address/netmask', as long as the
netmask is representable in CIDR.
FIXME: This function is badly named.
"""
def check (r0, r1):
a = int(r0)
b = r1
if (not allow_host) and (a & ((1<<b)-1)):
raise RuntimeError("Host part of CIDR address is not zero (%s)"
% (addr,))
return (r0,32-r1)
addr = addr.split('/', 2)
if len(addr) == 1:
if infer is False:
return check(IPAddr(addr[0]), 0)
addr = IPAddr(addr[0])
b = 32-infer_netmask(addr)
m = (1<<b)-1
if (int(addr) & m) == 0:
# All bits in wildcarded part are 0, so we'll use the wildcard
return check(addr, b)
else:
# Some bits in the wildcarded part are set, so we'll assume it's a host
return check(addr, 0)
try:
wild = 32-int(addr[1])
except:
# Maybe they passed a netmask
m = int(IPAddr(addr[1]))
b = 0
while m & (1<<31):
b += 1
m <<= 1
if m & 0x7fffffff != 0:
raise RuntimeError("Netmask " + str(addr[1]) + " is not CIDR-compatible")
wild = 32-b
if not (wild >= 0 and wild <= 32):
raise RuntimeError("Invalid mask length")
return check(IPAddr(addr[0]), wild)
if not (wild >= 0 and wild <= 32):
raise RuntimeError("Invalid mask length")
return check(IPAddr(addr[0]), wild) | Takes a CIDR address or plain dotted-quad, and returns a tuple of address
and count-of-network-bits.
Can infer the network bits based on network classes if infer=True.
Can also take a string in the form 'address/netmask', as long as the
netmask is representable in CIDR.
FIXME: This function is badly named. |
def compute_loss_curves_maps(filename, builder, rlzi, monitor):
"""
:param filename: path to the datastore
:param builder: LossCurvesMapsBuilder instance
:param rlzi: realization index
:param monitor: Monitor instance
:returns: rlzi, (curves, maps)
"""
with datastore.read(filename) as dstore:
rlzs = dstore['losses_by_event']['rlzi']
losses = dstore['losses_by_event'][rlzs == rlzi]['loss']
return rlzi, builder.build_curves_maps(losses, rlzi) | :param filename: path to the datastore
:param builder: LossCurvesMapsBuilder instance
:param rlzi: realization index
:param monitor: Monitor instance
:returns: rlzi, (curves, maps) |
def Focus(self):
"""Brings the client window into focus.
"""
self._Skype._Api.allow_focus(self._Skype.Timeout)
self._Skype._DoCommand('FOCUS') | Brings the client window into focus. |
def get_client(self, request=None):
"""Return the client from the OAuth parameters."""
if not isinstance(request, oauth.Request):
request = self.get_oauth_request()
client_key = request.get_parameter('oauth_consumer_key')
if not client_key:
raise Exception('Missing "oauth_consumer_key" parameter in ' \
'OAuth "Authorization" header')
client = models.Client.get_by_key_name(client_key)
if not client:
raise Exception('Client "%s" not found.' % client_key)
return client | Return the client from the OAuth parameters. |
def compat_kwargs(kwargs):
"""To keep backwards compat change the kwargs to new names"""
warn_deprecations(kwargs)
for old, new in RENAMED_VARS.items():
if old in kwargs:
kwargs[new] = kwargs[old]
# update cross references
for c_old, c_new in RENAMED_VARS.items():
if c_new == new:
kwargs[c_old] = kwargs[new] | To keep backwards compat change the kwargs to new names |
def plot_predict(self, h=5, past_values=20, intervals=True,**kwargs):
""" Plots forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
past_values : int (default : 20)
How many past observations to show on the forecast graph?
intervals : Boolean
Would you like to show 95% prediction intervals for the forecast?
Returns
----------
- Plot of the forecast
- Error bars, forecasted_values, plot_values, plot_index
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
predictions, variance, lower, upper = self._construct_predict(self.latent_variables.get_z_values(),h)
full_predictions = np.append(self.data,predictions)
full_lower = np.append(self.data,lower)
full_upper = np.append(self.data,upper)
date_index = self.shift_dates(h)
# Plot values (how far to look back)
plot_values = full_predictions[-h-past_values:]*self._norm_std + self._norm_mean
plot_index = date_index[-h-past_values:]
# Lower and upper intervals
lower = np.append(full_predictions[-h-1],lower)
upper = np.append(full_predictions[-h-1],upper)
plt.figure(figsize=figsize)
if intervals == True:
plt.fill_between(date_index[-h-1:],
lower*self._norm_std + self._norm_mean,
upper*self._norm_std + self._norm_mean,
alpha=0.2)
plt.plot(plot_index,plot_values)
plt.title("Forecast for " + self.data_name)
plt.xlabel("Time")
plt.ylabel(self.data_name)
plt.show() | Plots forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
past_values : int (default : 20)
How many past observations to show on the forecast graph?
intervals : Boolean
Would you like to show 95% prediction intervals for the forecast?
Returns
----------
- Plot of the forecast
- Error bars, forecasted_values, plot_values, plot_index |
def docx_process_table(table: DOCX_TABLE_TYPE,
config: TextProcessingConfig) -> str:
"""
Converts a DOCX table to text.
Structure representing a DOCX table:
.. code-block:: none
table
.rows[]
.cells[]
.paragraphs[]
.text
That's the structure of a :class:`docx.table.Table` object, but also of our
homebrew creation, :class:`CustomDocxTable`.
The ``plain`` option optimizes for natural language processing, by:
- removing vertical lines:
.. code-block:: none
+-------------+-------------+
| AAA AAA | BBB BBB |
| AAA AAA | BBB BBB |
+-------------+-------------+
becomes
.. code-block:: none
-----------------------------
AAA AAA BBB BBB
AAA AAA BBB BBB
-----------------------------
- and offsetting cells:
.. code-block:: none
AAA AAA BBB BBB CCC CCC
AAA AAA BBB BBB CCC CCC
becomes
.. code-block:: none
AAA AAA
AAA AAA
BBB BBB
BBB BBB
CCC CCC
CCC CCC
- Note also that the grids in DOCX files can have varying number of cells
per row, e.g.
.. code-block:: none
+---+---+---+
| 1 | 2 | 3 |
+---+---+---+
| 1 | 2 |
+---+---+
"""
def get_cell_text(cell_) -> str:
cellparagraphs = [paragraph.text.strip()
for paragraph in cell_.paragraphs]
cellparagraphs = [x for x in cellparagraphs if x]
return '\n\n'.join(cellparagraphs)
ncols = 1
# noinspection PyTypeChecker
for row in table.rows:
ncols = max(ncols, len(row.cells))
pt = prettytable.PrettyTable(
field_names=list(range(ncols)),
encoding=ENCODING,
header=False,
border=True,
hrules=prettytable.ALL,
vrules=prettytable.NONE if config.plain else prettytable.ALL,
)
pt.align = 'l'
pt.valign = 't'
pt.max_width = max(config.width // ncols, config.min_col_width)
if config.plain:
# noinspection PyTypeChecker
for row in table.rows:
for i, cell in enumerate(row.cells):
n_before = i
n_after = ncols - i - 1
# ... use ncols, not len(row.cells), since "cells per row" is
# not constant, but prettytable wants a fixed number.
# (changed in v0.2.8)
ptrow = (
[''] * n_before +
[get_cell_text(cell)] +
[''] * n_after
)
assert(len(ptrow) == ncols)
pt.add_row(ptrow)
else:
# noinspection PyTypeChecker
for row in table.rows:
ptrow = [] # type: List[str]
# noinspection PyTypeChecker
for cell in row.cells:
ptrow.append(get_cell_text(cell))
ptrow += [''] * (ncols - len(ptrow)) # added in v0.2.8
assert (len(ptrow) == ncols)
pt.add_row(ptrow)
return pt.get_string() | Converts a DOCX table to text.
Structure representing a DOCX table:
.. code-block:: none
table
.rows[]
.cells[]
.paragraphs[]
.text
That's the structure of a :class:`docx.table.Table` object, but also of our
homebrew creation, :class:`CustomDocxTable`.
The ``plain`` option optimizes for natural language processing, by:
- removing vertical lines:
.. code-block:: none
+-------------+-------------+
| AAA AAA | BBB BBB |
| AAA AAA | BBB BBB |
+-------------+-------------+
becomes
.. code-block:: none
-----------------------------
AAA AAA BBB BBB
AAA AAA BBB BBB
-----------------------------
- and offsetting cells:
.. code-block:: none
AAA AAA BBB BBB CCC CCC
AAA AAA BBB BBB CCC CCC
becomes
.. code-block:: none
AAA AAA
AAA AAA
BBB BBB
BBB BBB
CCC CCC
CCC CCC
- Note also that the grids in DOCX files can have varying number of cells
per row, e.g.
.. code-block:: none
+---+---+---+
| 1 | 2 | 3 |
+---+---+---+
| 1 | 2 |
+---+---+ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.