code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def _parse_title_url(html_chunk):
"""
Parse title/name of the book and URL of the book.
Args:
html_chunk (obj): HTMLElement containing slice of the page with details.
Returns:
tuple: (title, url), both as strings.
"""
url = None
title_tags = html_chunk.match(
["div", {"class": "polozka_nazev"}],
["a", None, has_param("href")]
)
if not title_tags:
return _parse_alt_title(html_chunk), _parse_alt_url(html_chunk)
title = title_tags[0]
url = normalize_url(BASE_URL, title.params["href"])
title = title.getContent()
if not title:
title = _parse_alt_title(html_chunk)
return title, url | Parse title/name of the book and URL of the book.
Args:
html_chunk (obj): HTMLElement containing slice of the page with details.
Returns:
tuple: (title, url), both as strings. |
def fetch_official_missions(data_dir, start_date, end_date):
"""
:param data_dir: (str) directory in which the output file will be saved
:param start_date: (datetime) first date of the range to be scraped
:param end_date: (datetime) last date of the range to be scraped
"""
official_missions = OfficialMissionsDataset()
df = official_missions.fetch(start_date, end_date)
save_to_csv(df, data_dir, "official-missions")
return df | :param data_dir: (str) directory in which the output file will be saved
:param start_date: (datetime) first date of the range to be scraped
:param end_date: (datetime) last date of the range to be scraped |
def logstats(self):
"""
Print the node's current statistics to log.
"""
lines = [
"node {} current stats".format(self),
"--------------------------------------------------------",
"node inbox size : {}".format(len(self.nodeInBox)),
"client inbox size : {}".format(len(self.clientInBox)),
"age (seconds) : {}".format(time.time() - self.created),
"next check for reconnect: {}".format(time.perf_counter() -
self.nodestack.nextCheck),
"node connections : {}".format(self.nodestack.conns),
"f : {}".format(self.f),
"master instance : {}".format(self.instances.masterId),
"replicas : {}".format(len(self.replicas)),
"view no : {}".format(self.viewNo),
"rank : {}".format(self.rank),
"msgs to replicas : {}".format(self.replicas.sum_inbox_len),
"msgs to view changer : {}".format(len(self.msgsToViewChanger)),
"action queue : {} {}".format(len(self.actionQueue),
id(self.actionQueue)),
"action queue stash : {} {}".format(len(self.aqStash),
id(self.aqStash)),
]
logger.info("\n".join(lines), extra={"cli": False}) | Print the node's current statistics to log. |
def get(self, collection_id):
"""
Retrieve a single collection.
To view a user’s private collections, the 'read_collections' scope is required.
:param collection_id [string]: The collections’s ID. Required.
:return: [Collection]: The Unsplash Collection.
"""
url = "/collections/%s" % collection_id
result = self._get(url)
return CollectionModel.parse(result) | Retrieve a single collection.
To view a user’s private collections, the 'read_collections' scope is required.
:param collection_id [string]: The collections’s ID. Required.
:return: [Collection]: The Unsplash Collection. |
def get_open_filenames(self):
"""Get the list of open files in the current stack"""
editorstack = self.editorstacks[0]
filenames = []
filenames += [finfo.filename for finfo in editorstack.data]
return filenames | Get the list of open files in the current stack |
def prompt(text, default=None, show_default=True, invisible=False,
confirm=False, skip=False, type=None, input_function=None):
'''Prompts for input from the user.
'''
t = determine_type(type, default)
input_function = get_input_fn(input_function, invisible)
if default is not None and show_default:
text = '{} [{}]: '.format(text, default)
while True:
val = prompt_fn(input_function, text, default, t, skip, repeat=True)
if not confirm or (skip and val is None):
return val
if val == prompt_fn(input_function, 'Confirm: ', default, t, repeat=True):
return val
echo('Error: The two values you entered do not match', True) | Prompts for input from the user. |
def compute_uncertainty_reward(logits, predictions):
"""Uncertainty reward based on logits."""
# TODO(rsepassi): Add support for L1/L2 loss models. Current code only
# works for softmax models.
vocab_size = logits.shape[-1]
assert vocab_size > 1
log_probs = common_layers.log_prob_from_logits(logits)
max_log_probs = common_layers.index_last_dim_with_indices(log_probs,
predictions)
# Threshold
neg_log_prob = tf.nn.relu(-max_log_probs - 0.02)
# Sum across all but the batch dimension
reduce_dims = list(range(len(neg_log_prob.shape)))[1:]
summed = tf.reduce_sum(neg_log_prob, axis=reduce_dims)
return summed / 10 | Uncertainty reward based on logits. |
def shrink_indexes_in_place(self, triples):
"""Uses a union find to find segment."""
_ent_roots = self.UnionFind(self._ent_id)
_rel_roots = self.UnionFind(self._rel_id)
for t in triples:
_ent_roots.add(t.head)
_ent_roots.add(t.tail)
_rel_roots.add(t.relation)
for i, t in enumerate(triples):
h = _ent_roots.find(t.head)
r = _rel_roots.find(t.relation)
t = _ent_roots.find(t.tail)
triples[i] = kgedata.TripleIndex(h, r, t)
ents = bidict()
available_ent_idx = 0
for previous_idx, ent_exist in enumerate(_ent_roots.roots()):
if not ent_exist:
self._ents.inverse.pop(previous_idx)
else:
ents[self._ents.inverse[previous_idx]] = available_ent_idx
available_ent_idx += 1
rels = bidict()
available_rel_idx = 0
for previous_idx, rel_exist in enumerate(_rel_roots.roots()):
if not rel_exist:
self._rels.inverse.pop(previous_idx)
else:
rels[self._rels.inverse[previous_idx]] = available_rel_idx
available_rel_idx += 1
self._ents = ents
self._rels = rels
self._ent_id = available_ent_idx
self._rel_id = available_rel_idx | Uses a union find to find segment. |
def bitstring_probs_to_z_moments(p):
"""
Convert between bitstring probabilities and joint Z moment expectations.
:param np.array p: An array that enumerates bitstring probabilities. When
flattened out ``p = [p_00...0, p_00...1, ...,p_11...1]``. The total number of elements must
therefore be a power of 2. The canonical shape has a separate axis for each qubit, such that
``p[i,j,...,k]`` gives the estimated probability of bitstring ``ij...k``.
:return: ``z_moments``, an np.array with one length-2 axis per qubit which contains the
expectations of all monomials in ``{I, Z_0, Z_1, ..., Z_{n-1}}``. The expectations of each
monomial can be accessed via::
<Z_0^j_0 Z_1^j_1 ... Z_m^j_m> = z_moments[j_0,j_1,...,j_m]
:rtype: np.array
"""
zmat = np.array([[1, 1],
[1, -1]])
return _apply_local_transforms(p, (zmat for _ in range(p.ndim))) | Convert between bitstring probabilities and joint Z moment expectations.
:param np.array p: An array that enumerates bitstring probabilities. When
flattened out ``p = [p_00...0, p_00...1, ...,p_11...1]``. The total number of elements must
therefore be a power of 2. The canonical shape has a separate axis for each qubit, such that
``p[i,j,...,k]`` gives the estimated probability of bitstring ``ij...k``.
:return: ``z_moments``, an np.array with one length-2 axis per qubit which contains the
expectations of all monomials in ``{I, Z_0, Z_1, ..., Z_{n-1}}``. The expectations of each
monomial can be accessed via::
<Z_0^j_0 Z_1^j_1 ... Z_m^j_m> = z_moments[j_0,j_1,...,j_m]
:rtype: np.array |
def _CallEventHandler(self, Event, *Args, **KwArgs):
"""Calls all event handlers defined for given Event, additional parameters
will be passed unchanged to event handlers, all event handlers are fired on
separate threads.
:Parameters:
Event : str
Name of the event.
Args
Positional arguments for the event handlers.
KwArgs
Keyword arguments for the event handlers.
"""
if Event not in self._EventHandlers:
raise ValueError('%s is not a valid %s event name' % (Event, self.__class__.__name__))
args = map(repr, Args) + ['%s=%s' % (key, repr(value)) for key, value in KwArgs.items()]
self.__Logger.debug('calling %s: %s', Event, ', '.join(args))
# Get a list of handlers for this event.
try:
handlers = [self._DefaultEventHandlers[Event]]
except KeyError:
handlers = []
try:
handlers.append(getattr(self._EventHandlerObject, Event))
except AttributeError:
pass
handlers.extend(self._EventHandlers[Event])
# Proceed only if there are handlers.
if handlers:
# Get the last thread for this event.
after = self._EventThreads.get(Event, None)
# Create a new thread, pass the last one to it so it can wait until it is finished.
thread = EventSchedulerThread(Event, after, handlers, Args, KwArgs)
# Store a weak reference to the new thread for this event.
self._EventThreads[Event] = thread
# Start the thread.
thread.start() | Calls all event handlers defined for given Event, additional parameters
will be passed unchanged to event handlers, all event handlers are fired on
separate threads.
:Parameters:
Event : str
Name of the event.
Args
Positional arguments for the event handlers.
KwArgs
Keyword arguments for the event handlers. |
def submit(self):
"""
Try to find element with ID "[FORM_ID]_submit" and click on it. If no
element doesn't exists it will call default behaviour - submiting of
form by pressing enter.
"""
elm_name = '%s_submit' % self.get_attribute('id')
try:
self.click(elm_name)
except NoSuchElementException:
super(Form, self).submit() | Try to find element with ID "[FORM_ID]_submit" and click on it. If no
element doesn't exists it will call default behaviour - submiting of
form by pressing enter. |
def main(argv=None):
''' Runs the program and handles command line options '''
parser = get_parser()
# Parse arguments and run the function
global args
args = parser.parse_args(argv)
args.func() | Runs the program and handles command line options |
def skip(self):
"""Skip this py-pdb command to avoid attaching within the same loop."""
line = self.line
self.line = ''
# 'line' is the statement line of the previous py-pdb command.
if line in self.lines:
if not self.skipping:
self.skipping = True
printflush('Skipping lines', end='')
printflush('.', end='')
return True
elif line:
self.lines.append(line)
if len(self.lines) > 30:
self.lines.popleft()
return False | Skip this py-pdb command to avoid attaching within the same loop. |
def symbol(name: str=None, symbol_type: Type[Symbol]=Symbol) -> 'SymbolWildcard':
"""Create a `SymbolWildcard` that matches a single `Symbol` argument.
Args:
name:
Optional variable name for the wildcard.
symbol_type:
An optional subclass of `Symbol` to further limit which kind of symbols are
matched by the wildcard.
Returns:
A `SymbolWildcard` that matches the *symbol_type*.
"""
if isinstance(name, type) and issubclass(name, Symbol) and symbol_type is Symbol:
return SymbolWildcard(name)
return SymbolWildcard(symbol_type, variable_name=name) | Create a `SymbolWildcard` that matches a single `Symbol` argument.
Args:
name:
Optional variable name for the wildcard.
symbol_type:
An optional subclass of `Symbol` to further limit which kind of symbols are
matched by the wildcard.
Returns:
A `SymbolWildcard` that matches the *symbol_type*. |
def clips_value(self, dvalue):
"""Convert a Python type into CLIPS."""
try:
return VALUES[type(dvalue)](self._env, dvalue)
except KeyError:
if isinstance(dvalue, (list, tuple)):
return self.list_to_multifield(dvalue)
if isinstance(dvalue, (clips.facts.Fact)):
return dvalue._fact
if isinstance(dvalue, (clips.classes.Instance)):
return dvalue._ist
return ffi.NULL | Convert a Python type into CLIPS. |
def parametrize(self):
r"""
Reads all data and discretizes it into discrete trajectories.
"""
for element in self._chain:
if not element.is_reader and not element._estimated:
element.estimate(element.data_producer, stride=self.param_stride, chunksize=self.chunksize)
self._estimated = True | r"""
Reads all data and discretizes it into discrete trajectories. |
def rename(self, old_file_path, new_file_path, force_replace=False):
"""Renames a FakeFile object at old_file_path to new_file_path,
preserving all properties.
Args:
old_file_path: Path to filesystem object to rename.
new_file_path: Path to where the filesystem object will live
after this call.
force_replace: If set and destination is an existing file, it
will be replaced even under Windows if the user has
permissions, otherwise replacement happens under Unix only.
Raises:
OSError: if old_file_path does not exist.
OSError: if new_file_path is an existing directory
(Windows, or Posix if old_file_path points to a regular file)
OSError: if old_file_path is a directory and new_file_path a file
OSError: if new_file_path is an existing file and force_replace
not set (Windows only).
OSError: if new_file_path is an existing file and could not be
removed (Posix, or Windows with force_replace set).
OSError: if dirname(new_file_path) does not exist.
OSError: if the file would be moved to another filesystem
(e.g. mount point).
"""
ends_with_sep = self.ends_with_path_separator(old_file_path)
old_file_path = self.absnormpath(old_file_path)
new_file_path = self.absnormpath(new_file_path)
if not self.exists(old_file_path, check_link=True):
self.raise_os_error(errno.ENOENT, old_file_path, 2)
if ends_with_sep:
self._handle_broken_link_with_trailing_sep(old_file_path)
old_object = self.lresolve(old_file_path)
if not self.is_windows_fs:
self._handle_posix_dir_link_errors(
new_file_path, old_file_path, ends_with_sep)
if self.exists(new_file_path, check_link=True):
new_file_path = self._rename_to_existing_path(
force_replace, new_file_path, old_file_path,
old_object, ends_with_sep)
if not new_file_path:
return
old_dir, old_name = self.splitpath(old_file_path)
new_dir, new_name = self.splitpath(new_file_path)
if not self.exists(new_dir):
self.raise_os_error(errno.ENOENT, new_dir)
old_dir_object = self.resolve(old_dir)
new_dir_object = self.resolve(new_dir)
if old_dir_object.st_dev != new_dir_object.st_dev:
self.raise_os_error(errno.EXDEV, old_file_path)
if not S_ISDIR(new_dir_object.st_mode):
self.raise_os_error(
errno.EACCES if self.is_windows_fs else errno.ENOTDIR,
new_file_path)
if new_dir_object.has_parent_object(old_object):
self.raise_os_error(errno.EINVAL, new_file_path)
object_to_rename = old_dir_object.get_entry(old_name)
old_dir_object.remove_entry(old_name, recursive=False)
object_to_rename.name = new_name
new_name = new_dir_object._normalized_entryname(new_name)
if new_name in new_dir_object.contents:
# in case of overwriting remove the old entry first
new_dir_object.remove_entry(new_name)
new_dir_object.add_entry(object_to_rename) | Renames a FakeFile object at old_file_path to new_file_path,
preserving all properties.
Args:
old_file_path: Path to filesystem object to rename.
new_file_path: Path to where the filesystem object will live
after this call.
force_replace: If set and destination is an existing file, it
will be replaced even under Windows if the user has
permissions, otherwise replacement happens under Unix only.
Raises:
OSError: if old_file_path does not exist.
OSError: if new_file_path is an existing directory
(Windows, or Posix if old_file_path points to a regular file)
OSError: if old_file_path is a directory and new_file_path a file
OSError: if new_file_path is an existing file and force_replace
not set (Windows only).
OSError: if new_file_path is an existing file and could not be
removed (Posix, or Windows with force_replace set).
OSError: if dirname(new_file_path) does not exist.
OSError: if the file would be moved to another filesystem
(e.g. mount point). |
def check(self, dsm, **kwargs):
"""
Check if matrix and its mediation matrix are compliant.
It means that number of dependencies for each (line, column) is either
0 if the mediation matrix (line, column) is 0, or >0 if the mediation
matrix (line, column) is 1.
Args:
dsm (:class:`DesignStructureMatrix`): the DSM to check.
Returns:
bool: True if compliant, else False
"""
# generate complete_mediation_matrix according to each category
med_matrix = CompleteMediation.generate_mediation_matrix(dsm)
return CompleteMediation.matrices_compliance(dsm, med_matrix) | Check if matrix and its mediation matrix are compliant.
It means that number of dependencies for each (line, column) is either
0 if the mediation matrix (line, column) is 0, or >0 if the mediation
matrix (line, column) is 1.
Args:
dsm (:class:`DesignStructureMatrix`): the DSM to check.
Returns:
bool: True if compliant, else False |
def db_exists(name, user=None, password=None, host=None, port=None):
'''
Checks if a database exists in Influxdb
name
Database name to create
user
The user to connect as
password
The password of the user
host
The host to connect to
port
The port to connect to
CLI Example:
.. code-block:: bash
salt '*' influxdb08.db_exists <name>
salt '*' influxdb08.db_exists <name> <user> <password> <host> <port>
'''
dbs = db_list(user, password, host, port)
if not isinstance(dbs, list):
return False
return name in [db['name'] for db in dbs] | Checks if a database exists in Influxdb
name
Database name to create
user
The user to connect as
password
The password of the user
host
The host to connect to
port
The port to connect to
CLI Example:
.. code-block:: bash
salt '*' influxdb08.db_exists <name>
salt '*' influxdb08.db_exists <name> <user> <password> <host> <port> |
def step(self, compute=True):
"""Context manager to gradually build a history row, then commit it at the end.
To reduce the number of conditionals needed, code can check run.history.compute:
with run.history.step(batch_idx % log_interval == 0):
run.history.add({"nice": "ok"})
if run.history.compute:
# Something expensive here
"""
if self.batched: # we're already in a context manager
raise wandb.Error("Nested History step contexts aren't supported")
self.batched = True
self.compute = compute
yield self
if compute:
self._write()
compute = True | Context manager to gradually build a history row, then commit it at the end.
To reduce the number of conditionals needed, code can check run.history.compute:
with run.history.step(batch_idx % log_interval == 0):
run.history.add({"nice": "ok"})
if run.history.compute:
# Something expensive here |
def save_settings(file_path, record_details, overwrite=False, secret_key=''):
''' a method to save dictionary typed data to a local file
:param file_path: string with path to settings file
:param record_details: dictionary with record details
:param overwrite: [optional] boolean to overwrite existing file data
:param secret_key: [optional] string with key to decrypt drep file
:return: string with file path
'''
# validate inputs
title = 'save_settings'
try:
_path_arg = '%s(file_path=%s)' % (title, str(file_path))
except:
raise ValueError('%s(file_path=...) must be a string.' % title)
_details_arg = '%s(record_details={...})' % title
if not isinstance(record_details, dict):
raise ValueError('%s must be a dictionary.' % _details_arg)
if secret_key:
try:
_secret_arg = '%s(secret_key=%s)' % (title, str(secret_key))
except:
raise ValueError('%s(secret_key=...) must be a string.' % title)
# parse extension type
ext_map = {}
file_extensions = {
"json": ".+\\.json$",
"json.gz": ".+\\.json\\.gz$",
"yaml": ".+\\.ya?ml$",
"yaml.gz": ".+\\.ya?ml\\.gz$",
"drep": ".+\\.drep$"
}
import re
for key, value in file_extensions.items():
file_pattern = re.compile(value)
if file_pattern.findall(file_path):
ext_map[key] = True
else:
ext_map[key] = False
# construct file data
file_time = 0
file_data = ''.encode('utf-8')
if ext_map['json']:
import json
file_data = json.dumps(record_details, indent=2).encode('utf-8')
elif ext_map['yaml']:
import yaml
file_data = yaml.dump(record_details).encode('utf-8')
elif ext_map['json.gz']:
import json
import gzip
file_bytes = json.dumps(record_details).encode('utf-8')
file_data = gzip.compress(file_bytes)
elif ext_map['yaml.gz']:
import yaml
import gzip
file_bytes = yaml.dump(record_details).encode('utf-8')
file_data = gzip.compress(file_bytes)
elif ext_map['drep']:
from labpack.compilers import drep
file_data = drep.dump(record_details, secret_key)
file_time = 1
else:
raise ValueError('%s must be one of %s file types.' % (_path_arg, list(ext_map.keys())))
# check overwrite exception
import os
if not overwrite:
if os.path.exists(file_path):
raise Exception('%s already exists. To overwrite %s, set overwrite=True' % (_path_arg, _path_arg))
# create directories in path to file
dir_path = os.path.split(file_path)
if dir_path[0]:
if not os.path.exists(dir_path[0]):
os.makedirs(dir_path[0])
# write data to file
with open(file_path, 'wb') as f:
f.write(file_data)
f.close()
# eliminate update and access time metadata (for drep files)
if file_time:
os.utime(file_path, times=(file_time, file_time))
# TODO add windows creation time wiping
# http://stackoverflow.com/questions/4996405/how-do-i-change-the-file-creation-date-of-a-windows-file-from-python
return file_path | a method to save dictionary typed data to a local file
:param file_path: string with path to settings file
:param record_details: dictionary with record details
:param overwrite: [optional] boolean to overwrite existing file data
:param secret_key: [optional] string with key to decrypt drep file
:return: string with file path |
def get_statements(self):
"""Process reader output to produce INDRA Statements."""
for k, v in self.reader_output.items():
for interaction in v['interactions']:
self._process_interaction(k, interaction, v['text'], self.pmid,
self.extra_annotations) | Process reader output to produce INDRA Statements. |
def get_variable_accesses(self, variable, same_name=False):
"""
Get a list of all references to the given variable.
:param SimVariable variable: The variable.
:param bool same_name: Whether to include all variables with the same variable name, or just
based on the variable identifier.
:return: All references to the variable.
:rtype: list
"""
if variable.region == 'global':
return self.global_manager.get_variable_accesses(variable, same_name=same_name)
elif variable.region in self.function_managers:
return self.function_managers[variable.region].get_variable_accesses(variable, same_name=same_name)
l.warning('get_variable_accesses(): Region %s is not found.', variable.region)
return [ ] | Get a list of all references to the given variable.
:param SimVariable variable: The variable.
:param bool same_name: Whether to include all variables with the same variable name, or just
based on the variable identifier.
:return: All references to the variable.
:rtype: list |
def image_to_string(image, lang=None, boxes=False):
'''
Runs tesseract on the specified image. First, the image is written to disk,
and then the tesseract command is run on the image. Resseract's result is
read, and the temporary files are erased.
'''
input_file_name = '%s.bmp' % tempnam()
output_file_name_base = tempnam()
if not boxes:
output_file_name = '%s.txt' % output_file_name_base
else:
output_file_name = '%s.box' % output_file_name_base
try:
image.save(input_file_name)
status, error_string = run_tesseract(input_file_name,
output_file_name_base,
lang=lang,
boxes=boxes)
if status:
errors = get_errors(error_string)
raise TesseractError(status, errors)
f = file(output_file_name)
try:
return f.read().strip()
finally:
f.close()
finally:
cleanup(input_file_name)
cleanup(output_file_name) | Runs tesseract on the specified image. First, the image is written to disk,
and then the tesseract command is run on the image. Resseract's result is
read, and the temporary files are erased. |
def get_command(self, ctx, name):
"""Get a callable command object."""
if name not in self.daemon_class.list_actions():
return None
# The context object is a Daemon object
daemon = ctx.obj
def subcommand(debug=False):
"""Call a daemonocle action."""
if daemon.detach and debug:
daemon.detach = False
daemon.do_action(name)
# Override the docstring for the function so that it shows up
# correctly in the help output
subcommand.__doc__ = daemon.get_action(name).__doc__
if name == 'start':
# Add a --debug option for start
subcommand = click.option(
'--debug', is_flag=True,
help='Do NOT detach and run in the background.'
)(subcommand)
# Make it into a click command
subcommand = click.command(
name, options_metavar=self.options_metavar)(subcommand)
return subcommand | Get a callable command object. |
def get_gradebook_ids_by_grade_system(self, grade_system_id):
"""Gets the list of ``Gradebook`` ``Ids`` mapped to a ``GradeSystem``.
arg: grade_system_id (osid.id.Id): ``Id`` of a
``GradeSystem``
return: (osid.id.IdList) - list of gradebook ``Ids``
raise: NotFound - ``grade_system_id`` is not found
raise: NullArgument - ``grade_system_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_bin_ids_by_resource
mgr = self._get_provider_manager('GRADING', local=True)
lookup_session = mgr.get_grade_system_lookup_session(proxy=self._proxy)
lookup_session.use_federated_gradebook_view()
grade_system = lookup_session.get_grade_system(grade_system_id)
id_list = []
for idstr in grade_system._my_map['assignedGradebookIds']:
id_list.append(Id(idstr))
return IdList(id_list) | Gets the list of ``Gradebook`` ``Ids`` mapped to a ``GradeSystem``.
arg: grade_system_id (osid.id.Id): ``Id`` of a
``GradeSystem``
return: (osid.id.IdList) - list of gradebook ``Ids``
raise: NotFound - ``grade_system_id`` is not found
raise: NullArgument - ``grade_system_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
def power(args):
"""
%prog power
Compare performances of various variant callers on simulated STR datasets.
This compares the power of various evidence types.
"""
p = OptionParser(power.__doc__)
p.add_option('--maxinsert', default=300, type="int",
help="Maximum number of repeats")
add_simulate_options(p)
opts, args, iopts = p.set_image_options(args, figsize="10x10", format="png")
if len(args) != 0:
sys.exit(not p.print_help())
max_insert = opts.maxinsert
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(ncols=2, nrows=2,
figsize=(iopts.w, iopts.h))
plt.tight_layout(pad=3)
color = "lightslategray"
# ax1: Spanning
tredparse_results = parse_results("tredparse_results_het-spanning.txt")
title = SIMULATED_DIPLOID + " (Sub-model 1: Spanning reads)"
plot_compare(ax1, title, tredparse_results, None, color=color,
max_insert=max_insert, risk=False)
# ax2: Partial
tredparse_results = parse_results("tredparse_results_het-partial.txt", exclude=20)
title = SIMULATED_DIPLOID + " (Sub-model 2: Partial reads)"
plot_compare(ax2, title, tredparse_results, None, color=color,
max_insert=max_insert, risk=False)
# ax3: Repeat
tredparse_results = parse_results("tredparse_results_het-repeat.txt", exclude=20)
# HACK (repeat reads won't work under 50)
tredparse_results = [x for x in tredparse_results if x[0] > 50]
title = SIMULATED_DIPLOID + " (Sub-model 3: Repeat-only reads)"
plot_compare(ax3, title, tredparse_results, None, color=color,
max_insert=max_insert, risk=False)
# ax4: Pair
tredparse_results = parse_results("tredparse_results_het-pair.txt", exclude=20)
title = SIMULATED_DIPLOID + " (Sub-model 4: Paired-end reads)"
plot_compare(ax4, title, tredparse_results, None, color=color,
max_insert=max_insert, risk=False)
for ax in (ax1, ax2, ax3, ax4):
ax.set_xlim(0, max_insert)
ax.set_ylim(0, max_insert)
root = fig.add_axes([0, 0, 1, 1])
pad = .03
panel_labels(root, ((pad / 2, 1 - pad, "A"), (1 / 2., 1 - pad, "B"),
(pad / 2, 1 / 2. , "C"), (1 / 2., 1 / 2. , "D")))
normalize_axes(root)
image_name = "power." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) | %prog power
Compare performances of various variant callers on simulated STR datasets.
This compares the power of various evidence types. |
def _set_label(self, which, label, **kwargs):
"""Private method for setting labels.
Args:
which (str): The indicator of which part of the plots
to adjust. This currently handles `xlabel`/`ylabel`,
and `title`.
label (str): The label to be added.
fontsize (int, optional): Fontsize for associated label. Default
is None.
"""
prop_default = {
'fontsize': 18,
}
for prop, default in prop_default.items():
kwargs[prop] = kwargs.get(prop, default)
setattr(self.label, which, label)
setattr(self.label, which + '_kwargs', kwargs)
return | Private method for setting labels.
Args:
which (str): The indicator of which part of the plots
to adjust. This currently handles `xlabel`/`ylabel`,
and `title`.
label (str): The label to be added.
fontsize (int, optional): Fontsize for associated label. Default
is None. |
def rcm_vertex_order(vertices_resources, nets):
"""A generator which iterates over the vertices in Reverse-Cuthill-McKee
order.
For use as a vertex ordering for the sequential placer.
"""
vertices_neighbours = _get_vertices_neighbours(nets)
for subgraph_vertices in _get_connected_subgraphs(vertices_resources,
vertices_neighbours):
cm_order = _cuthill_mckee(subgraph_vertices, vertices_neighbours)
for vertex in reversed(cm_order):
yield vertex | A generator which iterates over the vertices in Reverse-Cuthill-McKee
order.
For use as a vertex ordering for the sequential placer. |
def get_plugins():
"""Gets available plugins by looking into the plugins/ directory"""
if os.path.exists('plugins'):
for filename in sorted([f for f in os.listdir('plugins')
if not os.path.isdir(f) and f.endswith(".py")]):
plugin_name = filename[:-3]
try:
plugin = import_plugin(plugin_name)
except SystemExit as e:
description = "Plugin has a syntax error"
else:
description = plugin.__doc__ or "No description found"
yield {plugin_name: description} | Gets available plugins by looking into the plugins/ directory |
def from_project_config(cls, project_dict, packages_dict=None):
"""Create a project from its project and package configuration, as read
by yaml.safe_load().
:param project_dict dict: The dictionary as read from disk
:param packages_dict Optional[dict]: If it exists, the packages file as
read from disk.
:raises DbtProjectError: If the project is missing or invalid, or if
the packages file exists and is invalid.
:returns Project: The project, with defaults populated.
"""
try:
project_dict = cls._preprocess(project_dict)
except RecursionException:
raise DbtProjectError(
'Cycle detected: Project input has a reference to itself',
project=project_dict
)
# just for validation.
try:
ProjectContract(**project_dict)
except ValidationException as e:
raise DbtProjectError(str(e))
# name/version are required in the Project definition, so we can assume
# they are present
name = project_dict['name']
version = project_dict['version']
# this is added at project_dict parse time and should always be here
# once we see it.
project_root = project_dict['project-root']
# this is only optional in the sense that if it's not present, it needs
# to have been a cli argument.
profile_name = project_dict.get('profile')
# these are optional
source_paths = project_dict.get('source-paths', ['models'])
macro_paths = project_dict.get('macro-paths', ['macros'])
data_paths = project_dict.get('data-paths', ['data'])
test_paths = project_dict.get('test-paths', ['test'])
analysis_paths = project_dict.get('analysis-paths', [])
docs_paths = project_dict.get('docs-paths', source_paths[:])
target_path = project_dict.get('target-path', 'target')
archive_paths = project_dict.get('archive-paths', ['archives'])
# should this also include the modules path by default?
clean_targets = project_dict.get('clean-targets', [target_path])
log_path = project_dict.get('log-path', 'logs')
modules_path = project_dict.get('modules-path', 'dbt_modules')
# in the default case we'll populate this once we know the adapter type
quoting = project_dict.get('quoting', {})
models = project_dict.get('models', {})
on_run_start = project_dict.get('on-run-start', [])
on_run_end = project_dict.get('on-run-end', [])
archive = project_dict.get('archive', [])
seeds = project_dict.get('seeds', {})
dbt_raw_version = project_dict.get('require-dbt-version', '>=0.0.0')
try:
dbt_version = _parse_versions(dbt_raw_version)
except SemverException as e:
raise DbtProjectError(str(e))
packages = package_config_from_data(packages_dict)
project = cls(
project_name=name,
version=version,
project_root=project_root,
profile_name=profile_name,
source_paths=source_paths,
macro_paths=macro_paths,
data_paths=data_paths,
test_paths=test_paths,
analysis_paths=analysis_paths,
docs_paths=docs_paths,
target_path=target_path,
archive_paths=archive_paths,
clean_targets=clean_targets,
log_path=log_path,
modules_path=modules_path,
quoting=quoting,
models=models,
on_run_start=on_run_start,
on_run_end=on_run_end,
archive=archive,
seeds=seeds,
dbt_version=dbt_version,
packages=packages
)
# sanity check - this means an internal issue
project.validate()
return project | Create a project from its project and package configuration, as read
by yaml.safe_load().
:param project_dict dict: The dictionary as read from disk
:param packages_dict Optional[dict]: If it exists, the packages file as
read from disk.
:raises DbtProjectError: If the project is missing or invalid, or if
the packages file exists and is invalid.
:returns Project: The project, with defaults populated. |
def q(cls, **kwargs):
''' Creates an iterator over the members of this class that applies the
given filters and returns only the elements matching them '''
redis = cls.get_redis()
return QuerySet(cls, redis.sscan_iter(cls.members_key())) | Creates an iterator over the members of this class that applies the
given filters and returns only the elements matching them |
def loglike(self, y, f, var=None):
r"""
Gaussian log likelihood.
Parameters
----------
y: ndarray
array of 0, 1 valued integers of targets
f: ndarray
latent function from the GLM prior (:math:`\mathbf{f} =
\boldsymbol\Phi \mathbf{w}`)
var: float, ndarray, optional
The variance of the distribution, if not input, the initial value
of variance is used.
Returns
-------
logp: ndarray
the log likelihood of each y given each f under this
likelihood.
"""
# way faster than calling norm.logpdf
var = self._check_param(var)
y, f = np.broadcast_arrays(y, f)
ll = - 0.5 * (np.log(2 * np.pi * var) + (y - f)**2 / var)
return ll | r"""
Gaussian log likelihood.
Parameters
----------
y: ndarray
array of 0, 1 valued integers of targets
f: ndarray
latent function from the GLM prior (:math:`\mathbf{f} =
\boldsymbol\Phi \mathbf{w}`)
var: float, ndarray, optional
The variance of the distribution, if not input, the initial value
of variance is used.
Returns
-------
logp: ndarray
the log likelihood of each y given each f under this
likelihood. |
def sdram_alloc_as_filelike(self, size, tag=0, x=Required, y=Required,
app_id=Required, clear=False):
"""Like :py:meth:`.sdram_alloc` but returns a :py:class:`file-like
object <.MemoryIO>` which allows safe reading and writing to the block
that is allocated.
Returns
-------
:py:class:`.MemoryIO`
File-like object which allows accessing the newly allocated region
of memory. For example::
>>> # Read, write and seek through the allocated memory just
>>> # like a file
>>> mem = mc.sdram_alloc_as_filelike(12) # doctest: +SKIP
>>> mem.write(b"Hello, world") # doctest: +SKIP
12
>>> mem.seek(0) # doctest: +SKIP
>>> mem.read(5) # doctest: +SKIP
b"Hello"
>>> mem.read(7) # doctest: +SKIP
b", world"
>>> # Reads and writes are truncated to the allocated region,
>>> # preventing accidental clobbering/access of memory.
>>> mem.seek(0) # doctest: +SKIP
>>> mem.write(b"How are you today?") # doctest: +SKIP
12
>>> mem.seek(0) # doctest: +SKIP
>>> mem.read(100) # doctest: +SKIP
b"How are you "
See the :py:class:`.MemoryIO` class for details of other features
of these file-like views of SpiNNaker's memory.
Raises
------
rig.machine_control.machine_controller.SpiNNakerMemoryError
If the memory cannot be allocated, or the tag is already taken or
invalid.
"""
# Perform the malloc
start_address = self.sdram_alloc(size, tag, x, y, app_id, clear)
return MemoryIO(self, x, y, start_address, start_address + size) | Like :py:meth:`.sdram_alloc` but returns a :py:class:`file-like
object <.MemoryIO>` which allows safe reading and writing to the block
that is allocated.
Returns
-------
:py:class:`.MemoryIO`
File-like object which allows accessing the newly allocated region
of memory. For example::
>>> # Read, write and seek through the allocated memory just
>>> # like a file
>>> mem = mc.sdram_alloc_as_filelike(12) # doctest: +SKIP
>>> mem.write(b"Hello, world") # doctest: +SKIP
12
>>> mem.seek(0) # doctest: +SKIP
>>> mem.read(5) # doctest: +SKIP
b"Hello"
>>> mem.read(7) # doctest: +SKIP
b", world"
>>> # Reads and writes are truncated to the allocated region,
>>> # preventing accidental clobbering/access of memory.
>>> mem.seek(0) # doctest: +SKIP
>>> mem.write(b"How are you today?") # doctest: +SKIP
12
>>> mem.seek(0) # doctest: +SKIP
>>> mem.read(100) # doctest: +SKIP
b"How are you "
See the :py:class:`.MemoryIO` class for details of other features
of these file-like views of SpiNNaker's memory.
Raises
------
rig.machine_control.machine_controller.SpiNNakerMemoryError
If the memory cannot be allocated, or the tag is already taken or
invalid. |
def _validate_derived_from(cursor, model):
"""Given a database cursor and model, check the derived-from
value accurately points to content in the archive.
The value can be nothing or must point to existing content.
"""
derived_from_uri = model.metadata.get('derived_from_uri')
if derived_from_uri is None:
return # bail out early
# Can we parse the value?
try:
ident_hash = parse_archive_uri(derived_from_uri)
uuid_, version = split_ident_hash(ident_hash, split_version=True)
except (ValueError, IdentHashSyntaxError, IdentHashShortId) as exc:
raise exceptions.InvalidMetadata('derived_from_uri', derived_from_uri,
original_exception=exc)
# Is the ident-hash a valid pointer?
args = [uuid_]
table = 'modules'
version_condition = ''
if version != (None, None,):
args.extend(version)
table = 'modules'
version_condition = " AND major_version = %s" \
" AND minor_version {} %s" \
.format(version[1] is None and 'is' or '=')
cursor.execute("""SELECT 't' FROM {} WHERE uuid::text = %s{}"""
.format(table, version_condition), args)
try:
_exists = cursor.fetchone()[0] # noqa
except TypeError: # None type
raise exceptions.InvalidMetadata('derived_from_uri', derived_from_uri)
# Assign the derived_from value so that we don't have to split it again.
model.metadata['derived_from'] = ident_hash | Given a database cursor and model, check the derived-from
value accurately points to content in the archive.
The value can be nothing or must point to existing content. |
def proc_ovrds(**kwargs):
"""
Bloomberg overrides
Args:
**kwargs: overrides
Returns:
list of tuples
Examples:
>>> proc_ovrds(DVD_Start_Dt='20180101')
[('DVD_Start_Dt', '20180101')]
>>> proc_ovrds(DVD_Start_Dt='20180101', cache=True, has_date=True)
[('DVD_Start_Dt', '20180101')]
"""
return [
(k, v) for k, v in kwargs.items()
if k not in list(ELEM_KEYS.keys()) + list(ELEM_KEYS.values()) + PRSV_COLS
] | Bloomberg overrides
Args:
**kwargs: overrides
Returns:
list of tuples
Examples:
>>> proc_ovrds(DVD_Start_Dt='20180101')
[('DVD_Start_Dt', '20180101')]
>>> proc_ovrds(DVD_Start_Dt='20180101', cache=True, has_date=True)
[('DVD_Start_Dt', '20180101')] |
def get_session(self, token=None, signature=None):
'''
If provided a `token` parameter, tries to retrieve a stored
`rauth.OAuth1Session` instance. Otherwise generates a new session
instance with the :class:`rauth.OAuth1Service.consumer_key` and
:class:`rauth.OAuth1Service.consumer_secret` stored on the
`rauth.OAuth1Service` instance.
:param token: A tuple of strings with which to memoize the session
object instance.
:type token: tuple
'''
if token is not None:
access_token, access_token_secret = token
session = self.session_obj(self.consumer_key,
self.consumer_secret,
access_token,
access_token_secret,
signature or self.signature_obj,
service=self)
else: # pragma: no cover
signature = signature or self.signature_obj
session = self.session_obj(self.consumer_key,
self.consumer_secret,
signature=signature,
service=self)
return session | If provided a `token` parameter, tries to retrieve a stored
`rauth.OAuth1Session` instance. Otherwise generates a new session
instance with the :class:`rauth.OAuth1Service.consumer_key` and
:class:`rauth.OAuth1Service.consumer_secret` stored on the
`rauth.OAuth1Service` instance.
:param token: A tuple of strings with which to memoize the session
object instance.
:type token: tuple |
def attention_lm_attention_moe_tiny():
"""Cheap model for debugging.
Returns:
an hparams object.
"""
hparams = attention_lm_moe_small()
hparams.moe_layers = ""
hparams.attention_num_experts = 128
hparams.filter_size = 8192
hparams.attention_type = AttentionType.LOCAL_EXPERTS
return hparams | Cheap model for debugging.
Returns:
an hparams object. |
def is_used(self, regs, i, top=None):
""" Checks whether any of the given regs are required from the given point
to the end or not.
"""
if i < 0:
i = 0
if self.lock:
return True
regs = list(regs) # make a copy
if top is None:
top = len(self)
else:
top -= 1
for ii in range(i, top):
for r in self.mem[ii].requires:
if r in regs:
return True
for r in self.mem[ii].destroys:
if r in regs:
regs.remove(r)
if not regs:
return False
self.lock = True
result = self.goes_requires(regs)
self.lock = False
return result | Checks whether any of the given regs are required from the given point
to the end or not. |
def get_variable_days(self, year):
"""
Add Late Summer holiday (First Monday of September)
"""
days = super(LateSummer, self).get_variable_days(year)
days.append((
self.get_nth_weekday_in_month(year, 9, MON),
"Late Summer Holiday"
))
return days | Add Late Summer holiday (First Monday of September) |
def create_service(self, *args, **kwargs):
"""Create a service to current scope.
See :class:`pykechain.Client.create_service` for available parameters.
.. versionadded:: 1.13
"""
return self._client.create_service(*args, scope=self.id, **kwargs) | Create a service to current scope.
See :class:`pykechain.Client.create_service` for available parameters.
.. versionadded:: 1.13 |
def items(self):
"""
Returns a list of the items that are linked to this layer.
:return [<XNode> || <XNodeConnection>, ..]
"""
from projexui.widgets.xnodewidget import XNode, XNodeConnection
output = []
for item in self.scene().items():
if not (isinstance(item, XNode) or
isinstance(item, XNodeConnection)):
continue
if item.layer() == self:
output.append(item)
return output | Returns a list of the items that are linked to this layer.
:return [<XNode> || <XNodeConnection>, ..] |
def _serialize(self):
"""
A helper method to build a dict of all mutable Properties of
this object
"""
result = { a: getattr(self, a) for a in type(self).properties
if type(self).properties[a].mutable }
for k, v in result.items():
if isinstance(v, Base):
result[k] = v.id
return result | A helper method to build a dict of all mutable Properties of
this object |
def make_steam64(id=0, *args, **kwargs):
"""
Returns steam64 from various other representations.
.. code:: python
make_steam64() # invalid steamid
make_steam64(12345) # accountid
make_steam64('12345')
make_steam64(id=12345, type='Invalid', universe='Invalid', instance=0)
make_steam64(103582791429521412) # steam64
make_steam64('103582791429521412')
make_steam64('STEAM_1:0:2') # steam2
make_steam64('[g:1:4]') # steam3
"""
accountid = id
etype = EType.Invalid
universe = EUniverse.Invalid
instance = None
if len(args) == 0 and len(kwargs) == 0:
value = str(accountid)
# numeric input
if value.isdigit():
value = int(value)
# 32 bit account id
if 0 < value < 2**32:
accountid = value
etype = EType.Individual
universe = EUniverse.Public
# 64 bit
elif value < 2**64:
return value
# textual input e.g. [g:1:4]
else:
result = steam2_to_tuple(value) or steam3_to_tuple(value)
if result:
(accountid,
etype,
universe,
instance,
) = result
else:
accountid = 0
elif len(args) > 0:
length = len(args)
if length == 1:
etype, = args
elif length == 2:
etype, universe = args
elif length == 3:
etype, universe, instance = args
else:
raise TypeError("Takes at most 4 arguments (%d given)" % length)
if len(kwargs) > 0:
etype = kwargs.get('type', etype)
universe = kwargs.get('universe', universe)
instance = kwargs.get('instance', instance)
etype = (EType(etype)
if isinstance(etype, (int, EType))
else EType[etype]
)
universe = (EUniverse(universe)
if isinstance(universe, (int, EUniverse))
else EUniverse[universe]
)
if instance is None:
instance = 1 if etype in (EType.Individual, EType.GameServer) else 0
assert instance <= 0xffffF, "instance larger than 20bits"
return (universe << 56) | (etype << 52) | (instance << 32) | accountid | Returns steam64 from various other representations.
.. code:: python
make_steam64() # invalid steamid
make_steam64(12345) # accountid
make_steam64('12345')
make_steam64(id=12345, type='Invalid', universe='Invalid', instance=0)
make_steam64(103582791429521412) # steam64
make_steam64('103582791429521412')
make_steam64('STEAM_1:0:2') # steam2
make_steam64('[g:1:4]') # steam3 |
def __get_league_object():
"""Returns the xml object corresponding to the league
Only designed for internal use"""
# get data
data = mlbgame.data.get_properties()
# return league object
return etree.parse(data).getroot().find('leagues').find('league') | Returns the xml object corresponding to the league
Only designed for internal use |
def _commit(self):
"""
:return: (dict) Response object content
"""
assert self.uri is not None, Exception("BadArgument: uri property cannot be None")
url = '{}/{}'.format(self.uri, self.__class__.__name__)
serialized_json = jsonpickle.encode(self, unpicklable=False, )
headers = {'Content-Type': 'application/json', 'Content-Length': str(len(serialized_json))}
response = Http.post(url=url, data=serialized_json, headers=headers)
if response.status_code != 200:
from ArubaCloud.base.Errors import MalformedJsonRequest
raise MalformedJsonRequest("Request: {}, Status Code: {}".format(serialized_json, response.status_code))
content = jsonpickle.decode(response.content.decode("utf-8"))
if content['ResultCode'] == 17:
from ArubaCloud.base.Errors import OperationAlreadyEnqueued
raise OperationAlreadyEnqueued("{} already enqueued".format(self.__class__.__name__))
if content['Success'] is False:
from ArubaCloud.base.Errors import RequestFailed
raise RequestFailed("Request: {}, Response: {}".format(serialized_json, response.content))
return content | :return: (dict) Response object content |
def clean_line(str, delimiter):
"""Split string on given delimiter, remove whitespace from each field."""
return [x.strip() for x in str.strip().split(delimiter) if x != ''] | Split string on given delimiter, remove whitespace from each field. |
def resample(self, section_length):
"""
Resample this line into sections.
The first point in the resampled line corresponds
to the first point in the original line.
Starting from the first point in the original line, a line
segment is defined as the line connecting the last point in the
resampled line and the next point in the original line.
The line segment is then split into sections of length equal to
``section_length``. The resampled line is obtained
by concatenating all sections.
The number of sections in a line segment is calculated as follows:
``round(segment_length / section_length)``.
Note that the resulting line has a length that is an exact multiple of
``section_length``, therefore its length is in general smaller
or greater (depending on the rounding) than the length
of the original line.
For a straight line, the difference between the resulting length
and the original length is at maximum half of the ``section_length``.
For a curved line, the difference my be larger,
because of corners getting cut.
:param section_length:
The length of the section, in km.
:type section_length:
float
:returns:
A new line resampled into sections based on the given length.
:rtype:
An instance of :class:`Line`
"""
if len(self.points) < 2:
return Line(self.points)
resampled_points = []
# 1. Resample the first section. 2. Loop over the remaining points
# in the line and resample the remaining sections.
# 3. Extend the list with the resampled points, except the first one
# (because it's already contained in the previous set of
# resampled points).
resampled_points.extend(
self.points[0].equally_spaced_points(self.points[1],
section_length)
)
# Skip the first point, it's already resampled
for i in range(2, len(self.points)):
points = resampled_points[-1].equally_spaced_points(
self.points[i], section_length
)
resampled_points.extend(points[1:])
return Line(resampled_points) | Resample this line into sections.
The first point in the resampled line corresponds
to the first point in the original line.
Starting from the first point in the original line, a line
segment is defined as the line connecting the last point in the
resampled line and the next point in the original line.
The line segment is then split into sections of length equal to
``section_length``. The resampled line is obtained
by concatenating all sections.
The number of sections in a line segment is calculated as follows:
``round(segment_length / section_length)``.
Note that the resulting line has a length that is an exact multiple of
``section_length``, therefore its length is in general smaller
or greater (depending on the rounding) than the length
of the original line.
For a straight line, the difference between the resulting length
and the original length is at maximum half of the ``section_length``.
For a curved line, the difference my be larger,
because of corners getting cut.
:param section_length:
The length of the section, in km.
:type section_length:
float
:returns:
A new line resampled into sections based on the given length.
:rtype:
An instance of :class:`Line` |
def expand_hostname_range(line = None):
'''
A helper function that expands a given line that contains a pattern
specified in top docstring, and returns a list that consists of the
expanded version.
The '[' and ']' characters are used to maintain the pseudo-code
appearance. They are replaced in this function with '|' to ease
string splitting.
References: http://ansible.github.com/patterns.html#hosts-and-groups
'''
all_hosts = []
if line:
# A hostname such as db[1:6]-node is considered to consists
# three parts:
# head: 'db'
# nrange: [1:6]; range() is a built-in. Can't use the name
# tail: '-node'
(head, nrange, tail) = line.replace('[','|').replace(']','|').split('|')
bounds = nrange.split(":")
if len(bounds) != 2:
raise errors.AnsibleError("host range incorrectly specified")
beg = bounds[0]
end = bounds[1]
if not beg:
beg = "0"
if not end:
raise errors.AnsibleError("host range end value missing")
if beg[0] == '0' and len(beg) > 1:
rlen = len(beg) # range length formatting hint
if rlen != len(end):
raise errors.AnsibleError("host range format incorrectly specified!")
fill = lambda _: str(_).zfill(rlen) # range sequence
else:
fill = str
try:
i_beg = string.ascii_letters.index(beg)
i_end = string.ascii_letters.index(end)
if i_beg > i_end:
raise errors.AnsibleError("host range format incorrectly specified!")
seq = string.ascii_letters[i_beg:i_end+1]
except ValueError: # not a alpha range
seq = range(int(beg), int(end)+1)
for rseq in seq:
hname = ''.join((head, fill(rseq), tail))
all_hosts.append(hname)
return all_hosts | A helper function that expands a given line that contains a pattern
specified in top docstring, and returns a list that consists of the
expanded version.
The '[' and ']' characters are used to maintain the pseudo-code
appearance. They are replaced in this function with '|' to ease
string splitting.
References: http://ansible.github.com/patterns.html#hosts-and-groups |
def decide_k(airport_code):
"""A function to decide if a leading 'K' is throwing off an airport match and return the correct code."""
if airport_code[:1].upper() == 'K':
try: # if there's a match without the K that's likely what it is.
return Airport.objects.get(location_identifier__iexact=airport_code[1:]).location_identifier
except Airport.DoesNotExist:
return airport_code
else:
return airport_code | A function to decide if a leading 'K' is throwing off an airport match and return the correct code. |
def strip(self, text):
'''Return string with markup tags removed.'''
tags, results = [], []
return self.re_tag.sub(lambda m: self.clear_tag(m, tags, results), text) | Return string with markup tags removed. |
def plot_diagnostics(self, variable=0, lags=10, fig=None, figsize=None):
"""Plot an ARIMA's diagnostics.
Diagnostic plots for standardized residuals of one endogenous variable
Parameters
----------
variable : integer, optional
Index of the endogenous variable for which the diagnostic plots
should be created. Default is 0.
lags : integer, optional
Number of lags to include in the correlogram. Default is 10.
fig : Matplotlib Figure instance, optional
If given, subplots are created in this figure instead of in a new
figure. Note that the 2x2 grid will be created in the provided
figure using `fig.add_subplot()`.
figsize : tuple, optional
If a figure is created, this argument allows specifying a size.
The tuple is (width, height).
Notes
-----
Produces a 2x2 plot grid with the following plots (ordered clockwise
from top left):
1. Standardized residuals over time
2. Histogram plus estimated density of standardized residulas, along
with a Normal(0,1) density plotted for reference.
3. Normal Q-Q plot, with Normal reference line.
4. Correlogram
See Also
--------
statsmodels.graphics.gofplots.qqplot
pmdarima.utils.visualization.plot_acf
References
----------
.. [1] https://www.statsmodels.org/dev/_modules/statsmodels/tsa/statespace/mlemodel.html#MLEResults.plot_diagnostics # noqa: E501
"""
# implicitly checks whether installed, and does our backend magic:
_get_plt()
# We originally delegated down to SARIMAX model wrapper, but
# statsmodels makes it difficult to trust their API, so we just re-
# implemented a common method for all results wrappers.
from statsmodels.graphics.utils import create_mpl_fig
fig = create_mpl_fig(fig, figsize)
res_wpr = self.arima_res_
data = res_wpr.data
# Eliminate residuals associated with burned or diffuse likelihoods.
# The statsmodels code for the Kalman Filter takes the loglik_burn
# as a parameter:
# loglikelihood_burn : int, optional
# The number of initial periods during which the loglikelihood is
# not recorded. Default is 0.
# If the class has it, it's a SARIMAX and we'll use it. Otherwise we
# will just access the residuals as we normally would...
if hasattr(res_wpr, 'loglikelihood_burn'):
# This is introduced in the bleeding edge version, but is not
# backwards compatible with 0.9.0 and less:
d = res_wpr.loglikelihood_burn
if hasattr(res_wpr, 'nobs_diffuse'):
d = np.maximum(d, res_wpr.nobs_diffuse)
resid = res_wpr.filter_results\
.standardized_forecasts_error[variable, d:]
else:
# This gets the residuals, but they need to be standardized
d = 0
r = res_wpr.resid
resid = (r - np.nanmean(r)) / np.nanstd(r)
# Top-left: residuals vs time
ax = fig.add_subplot(221)
if hasattr(data, 'dates') and data.dates is not None:
x = data.dates[d:]._mpl_repr()
else:
x = np.arange(len(resid))
ax.plot(x, resid)
ax.hlines(0, x[0], x[-1], alpha=0.5)
ax.set_xlim(x[0], x[-1])
ax.set_title('Standardized residual')
# Top-right: histogram, Gaussian kernel density, Normal density
# Can only do histogram and Gaussian kernel density on the non-null
# elements
resid_nonmissing = resid[~(np.isnan(resid))]
ax = fig.add_subplot(222)
# temporarily disable Deprecation warning, normed -> density
# hist needs to use `density` in future when minimum matplotlib has it
with warnings.catch_warnings(record=True):
ax.hist(resid_nonmissing, normed=True, label='Hist')
kde = gaussian_kde(resid_nonmissing)
xlim = (-1.96 * 2, 1.96 * 2)
x = np.linspace(xlim[0], xlim[1])
ax.plot(x, kde(x), label='KDE')
ax.plot(x, norm.pdf(x), label='N(0,1)')
ax.set_xlim(xlim)
ax.legend()
ax.set_title('Histogram plus estimated density')
# Bottom-left: QQ plot
ax = fig.add_subplot(223)
from statsmodels.graphics.gofplots import qqplot
qqplot(resid_nonmissing, line='s', ax=ax)
ax.set_title('Normal Q-Q')
# Bottom-right: Correlogram
ax = fig.add_subplot(224)
from statsmodels.graphics.tsaplots import plot_acf
plot_acf(resid, ax=ax, lags=lags)
ax.set_title('Correlogram')
ax.set_ylim(-1, 1)
return fig | Plot an ARIMA's diagnostics.
Diagnostic plots for standardized residuals of one endogenous variable
Parameters
----------
variable : integer, optional
Index of the endogenous variable for which the diagnostic plots
should be created. Default is 0.
lags : integer, optional
Number of lags to include in the correlogram. Default is 10.
fig : Matplotlib Figure instance, optional
If given, subplots are created in this figure instead of in a new
figure. Note that the 2x2 grid will be created in the provided
figure using `fig.add_subplot()`.
figsize : tuple, optional
If a figure is created, this argument allows specifying a size.
The tuple is (width, height).
Notes
-----
Produces a 2x2 plot grid with the following plots (ordered clockwise
from top left):
1. Standardized residuals over time
2. Histogram plus estimated density of standardized residulas, along
with a Normal(0,1) density plotted for reference.
3. Normal Q-Q plot, with Normal reference line.
4. Correlogram
See Also
--------
statsmodels.graphics.gofplots.qqplot
pmdarima.utils.visualization.plot_acf
References
----------
.. [1] https://www.statsmodels.org/dev/_modules/statsmodels/tsa/statespace/mlemodel.html#MLEResults.plot_diagnostics # noqa: E501 |
def clearDevice(self):
"""Remove the current stream
"""
print(self.pre, "clearDevice")
if not self.device:
return
self.filterchain.delViewPort(self.viewport)
self.filterchain = None
self.device = None
self.video.update() | Remove the current stream |
def members_from_score_range_in(
self, leaderboard_name, minimum_score, maximum_score, **options):
'''
Retrieve members from the named leaderboard within a given score range.
@param leaderboard_name [String] Name of the leaderboard.
@param minimum_score [float] Minimum score (inclusive).
@param maximum_score [float] Maximum score (inclusive).
@param options [Hash] Options to be used when retrieving the data from the leaderboard.
@return members from the leaderboard that fall within the given score range.
'''
raw_leader_data = []
if self.order == self.DESC:
raw_leader_data = self.redis_connection.zrevrangebyscore(
leaderboard_name,
maximum_score,
minimum_score)
else:
raw_leader_data = self.redis_connection.zrangebyscore(
leaderboard_name,
minimum_score,
maximum_score)
return self._parse_raw_members(
leaderboard_name, raw_leader_data, **options) | Retrieve members from the named leaderboard within a given score range.
@param leaderboard_name [String] Name of the leaderboard.
@param minimum_score [float] Minimum score (inclusive).
@param maximum_score [float] Maximum score (inclusive).
@param options [Hash] Options to be used when retrieving the data from the leaderboard.
@return members from the leaderboard that fall within the given score range. |
def set_language(self, language):
""" Set self.language to internal lang. repr. code from str or Language object. """
if isinstance(language, str):
language_obj = languages.getlang(language)
if language_obj:
self.language = language_obj.code
else:
raise TypeError("Language code {} not found".format(language))
if isinstance(language, languages.Language):
self.language = language.code | Set self.language to internal lang. repr. code from str or Language object. |
def adjust_learning_rate(optimizer, epoch, gammas, schedule):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.learning_rate
assert len(gammas) == len(schedule), "length of gammas and schedule should be equal"
for (gamma, step) in zip(gammas, schedule):
if (epoch >= step):
lr = lr * gamma
else:
break
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr | Sets the learning rate to the initial LR decayed by 10 every 30 epochs |
def remove(self, auto_confirm=False, verbose=False):
"""Remove paths in ``self.paths`` with confirmation (unless
``auto_confirm`` is True)."""
if not self.paths:
logger.info(
"Can't uninstall '%s'. No files were found to uninstall.",
self.dist.project_name,
)
return
dist_name_version = (
self.dist.project_name + "-" + self.dist.version
)
logger.info('Uninstalling %s:', dist_name_version)
with indent_log():
if auto_confirm or self._allowed_to_proceed(verbose):
moved = self._moved_paths
for_rename = compress_for_rename(self.paths)
for path in sorted(compact(for_rename)):
moved.stash(path)
logger.debug('Removing file or directory %s', path)
for pth in self.pth.values():
pth.remove()
logger.info('Successfully uninstalled %s', dist_name_version) | Remove paths in ``self.paths`` with confirmation (unless
``auto_confirm`` is True). |
def dict(self):
"""
calls the overridden method and adds provenance and summary data
:return: dictionary representation of the metadata
:rtype: dict
"""
metadata = super(ImpactLayerMetadata, self).dict
metadata['provenance'] = self.provenance
metadata['summary_data'] = self.summary_data
return metadata | calls the overridden method and adds provenance and summary data
:return: dictionary representation of the metadata
:rtype: dict |
def process_content(self, content, filename=None, content_type=None):
"""Standard implementation of :meth:`.DepotFileInfo.process_content`
This is the standard depot implementation of files upload, it will
store the file on the default depot and will provide the standard
attributes.
Subclasses will need to call this method to ensure the standard
set of attributes is provided.
"""
file_path, file_id = self.store_content(content, filename, content_type)
self['file_id'] = file_id
self['path'] = file_path
saved_file = self.file
self['filename'] = saved_file.filename
self['content_type'] = saved_file.content_type
self['uploaded_at'] = saved_file.last_modified.strftime('%Y-%m-%d %H:%M:%S')
self['_public_url'] = saved_file.public_url | Standard implementation of :meth:`.DepotFileInfo.process_content`
This is the standard depot implementation of files upload, it will
store the file on the default depot and will provide the standard
attributes.
Subclasses will need to call this method to ensure the standard
set of attributes is provided. |
def parse_parameters(self, parameters):
"""Parses and sets parameters in the model."""
self.parameters = []
for param_name, param_value in parameters.items():
p = Parameter(param_name, param_value)
if p:
self.parameters.append(p) | Parses and sets parameters in the model. |
def output_data(self):
"""Get a buffer of data that needs to be written to the network.
"""
c = self.has_output
if c <= 0:
return None
try:
buf = self._pn_transport.peek(c)
except Exception as e:
self._connection_failed(str(e))
return None
return buf | Get a buffer of data that needs to be written to the network. |
def cleanUp(self):
""" Delete files that are written by CommandLineApplication from disk
WARNING: after cleanUp() you may still have access to part of
your result data, but you should be aware that if the file
size exceeds the size of the buffer you will only have part
of the file. To be safe, you should not use cleanUp() until
you are done with the file or have copied it to a different
location.
"""
file_keys = self.file_keys
for item in file_keys:
if self[item] is not None:
self[item].close()
remove(self[item].name)
# remove input handler temp files
if hasattr(self, "_input_filename"):
remove(self._input_filename) | Delete files that are written by CommandLineApplication from disk
WARNING: after cleanUp() you may still have access to part of
your result data, but you should be aware that if the file
size exceeds the size of the buffer you will only have part
of the file. To be safe, you should not use cleanUp() until
you are done with the file or have copied it to a different
location. |
def _delete_vlan_profile(self, handle, vlan_id, ucsm_ip):
"""Deletes VLAN Profile from UCS Manager."""
vlan_name = self.make_vlan_name(vlan_id)
vlan_profile_dest = (const.VLAN_PATH + const.VLAN_PROFILE_PATH_PREFIX +
vlan_name)
try:
obj = handle.query_dn(vlan_profile_dest)
if obj:
handle.remove_mo(obj)
handle.commit()
except Exception as e:
# Raise a Neutron exception. Include a description of
# the original exception.
raise cexc.UcsmConfigFailed(config=vlan_id,
ucsm_ip=ucsm_ip, exc=e) | Deletes VLAN Profile from UCS Manager. |
async def zrangebylex(self, name, min, max, start=None, num=None):
"""
Return the lexicographical range of values from sorted set ``name``
between ``min`` and ``max``.
If ``start`` and ``num`` are specified, then return a slice of the
range.
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise RedisError("``start`` and ``num`` must both be specified")
pieces = ['ZRANGEBYLEX', name, min, max]
if start is not None and num is not None:
pieces.extend([b('LIMIT'), start, num])
return await self.execute_command(*pieces) | Return the lexicographical range of values from sorted set ``name``
between ``min`` and ``max``.
If ``start`` and ``num`` are specified, then return a slice of the
range. |
def save(self):
""" Save changes """
if self.__session:
self.session.commit()
else:
self.logger.warning("Save called but no session open.") | Save changes |
def _get_area_rates(self, source, mmin, mmax=np.inf):
"""
Adds the rates from the area source by discretising the source
to a set of point sources
:param source:
Area source as instance of :class:
openquake.hazardlib.source.area.AreaSource
"""
points = list(source)
for point in points:
self._get_point_rates(point, mmin, mmax) | Adds the rates from the area source by discretising the source
to a set of point sources
:param source:
Area source as instance of :class:
openquake.hazardlib.source.area.AreaSource |
def delete(self):
"""Delete the task.
>>> from pytodoist import todoist
>>> user = todoist.login('[email protected]', 'password')
>>> project = user.get_project('Homework')
>>> task = project.add_task('Read Chapter 4')
>>> task.delete()
"""
args = {'ids': [self.id]}
_perform_command(self.project.owner, 'item_delete', args)
del self.project.owner.tasks[self.id] | Delete the task.
>>> from pytodoist import todoist
>>> user = todoist.login('[email protected]', 'password')
>>> project = user.get_project('Homework')
>>> task = project.add_task('Read Chapter 4')
>>> task.delete() |
def _validate_jp2c(self, boxes):
"""Validate the codestream box in relation to other boxes."""
# jp2c must be preceeded by jp2h
jp2h_lst = [idx for (idx, box) in enumerate(boxes)
if box.box_id == 'jp2h']
jp2h_idx = jp2h_lst[0]
jp2c_lst = [idx for (idx, box) in enumerate(boxes)
if box.box_id == 'jp2c']
if len(jp2c_lst) == 0:
msg = ("A codestream box must be defined in the outermost "
"list of boxes.")
raise IOError(msg)
jp2c_idx = jp2c_lst[0]
if jp2h_idx >= jp2c_idx:
msg = "The codestream box must be preceeded by a jp2 header box."
raise IOError(msg) | Validate the codestream box in relation to other boxes. |
def print_stream(file, name):
"""Print stream from file to logger."""
logger = logging.getLogger('xenon.{}'.format(name))
for line in file:
logger.info('[{}] {}'.format(name, line.strip())) | Print stream from file to logger. |
def accept_record(self, record):
"""Accept a record for inclusion in the community.
:param record: Record object.
"""
with db.session.begin_nested():
req = InclusionRequest.get(self.id, record.id)
if req is None:
raise InclusionRequestMissingError(community=self,
record=record)
req.delete()
self.add_record(record)
self.last_record_accepted = datetime.utcnow() | Accept a record for inclusion in the community.
:param record: Record object. |
def add_arg(self,arg, prepend=False):
"""Append an arg to the arg list"""
self.args = [arg_.strip() for arg_ in self.args if arg_.strip()]
if arg.title() not in self.args:
if prepend:
self.args = [arg.title()] + self.args
else:
self.args.append(arg.title()) | Append an arg to the arg list |
def _from_sql(self, soql):
"""Create Force.com SOQL tree structure from SOQL"""
# pylint:disable=too-many-branches,too-many-nested-blocks
assert not self.soql, "Don't use _from_sql method directly"
self.soql = soql
soql, self.subqueries = split_subquery(soql)
match_parse = re.match(r'SELECT (.*) FROM (\w+)\b(.*)$', soql, re.I)
if not match_parse:
raise ProgrammingError('Invalid SQL: %s' % self.soql)
fields_sql, self.root_table, self.extra_soql = match_parse.groups()
fields = [x.strip() for x in fields_sql.split(',')]
self.is_aggregation = bool(pattern_groupby.search(self.extra_soql) or
pattern_aggregation.search(fields[0]))
self.is_plain_count = fields[0].upper() == 'COUNT()'
consumed_subqueries = 0
expr_alias_counter = 0
#
if not self.is_plain_count:
for field in fields:
if self.is_aggregation:
match = re.search(r'\b\w+$', field)
if match:
alias = match.group()
assert alias not in RESERVED_WORDS, "invalid alias name"
if match.start() > 0 and field[match.start() - 1] == ' ':
field = field[match.start() - 1]
else:
alias = 'expr{}'.format(expr_alias_counter)
expr_alias_counter += 1
assert '&' not in field, "Subquery not expected as field in aggregation query"
elif '&' in field:
assert field == '(&)' # verify that the subquery was in parentheses
subquery = QQuery(self.subqueries[consumed_subqueries][0])
consumed_subqueries += 1
self.has_child_rel_field = True
field = subquery
# TODO more child relationships to the same table
alias = subquery.root_table
else:
alias = field
if '.' in alias:
if alias.split('.', 1)[0].lower() == self.root_table.lower():
alias = alias.split('.', 1)[1]
if '.' in alias:
# prepare paths for possible empty outer joins
subroots = self.subroots
root_crumbs = alias.lower().split('.')[:-1]
for scrumb in root_crumbs:
subroots.setdefault(scrumb, {})
subroots = subroots[scrumb]
self.aliases.append(alias)
self.fields.append(field) | Create Force.com SOQL tree structure from SOQL |
def organizer(self):
'''
Since events can be organized for registration in different ways (e.g. by month,
by session, or the interaction of the two), this property is used to make it easy
for templates to include necessary organizing information. Note that this method
has nothing to do with the sorting of any queryset in use, which still has to be
handled elsewhere.
'''
rule = getConstant('registration__orgRule')
# Default grouping is "Other", in case session, month, or weekday are not specified.
org = {
'name': _('Other'),
'nameFirst': {'name': _('Other'), 'sorter': _('Other')},
'nameSecond': {'name': '', 'sorter': ''},
'id': None,
}
def updateForMonth(self, org):
''' Function to avoid repeated code '''
if self.month:
org.update({
'name': _(month_name[self.month]),
'nameFirst': {'name': _(month_name[self.month]), 'sorter': self.month},
'id': 'month_%s' % self.month,
})
return org
def updateForSession(self, org):
''' Function to avoid repeated code '''
if self.session:
org.update({
'name': self.session.name,
'nameFirst': {'name': _(self.session.name), 'sorter': _(self.session.name)},
'id': self.session.pk,
})
return org
if rule in ['SessionFirst', 'SessionAlphaFirst']:
org = updateForSession(self, org)
if not org.get('id'):
org = updateForMonth(self, org)
elif rule == 'Month':
org = updateForMonth(self, org)
elif rule in ['Session','SessionAlpha']:
org = updateForSession(self, org)
elif rule in ['SessionMonth','SessionAlphaMonth']:
if self.session and self.month:
org.update({
'name': _('%s: %s' % (month_name[self.month], self.session.name)),
'nameFirst': {'name': _(month_name[self.month]), 'sorter': self.month},
'nameSecond': {'name': _(self.session.name), 'sorter': _(self.session.name)},
'id': 'month_%s_session_%s' % (self.month, self.session.pk),
})
elif not self.month:
org = updateForSession(self, org)
elif not self.session:
org = updateForMonth(self, org)
elif rule == 'Weekday':
w = self.weekday
d = day_name[w]
if w is not None:
org.update({
'name': _(d),
'nameFirst': {'name': _(d), 'sorter': w},
'id': w,
})
elif rule == 'MonthWeekday':
w = self.weekday
d = day_name[w]
m = self.month
mn = month_name[m]
if w is not None and m:
org.update({
'name': _('%ss in %s' % (d, mn)),
'nameFirst': {'name': _(mn), 'sorter': m},
'nameSecond': {'name': _('%ss' % d), 'sorter': w},
'id': 'month_%s_weekday_%s' % (m, w)
})
return org | Since events can be organized for registration in different ways (e.g. by month,
by session, or the interaction of the two), this property is used to make it easy
for templates to include necessary organizing information. Note that this method
has nothing to do with the sorting of any queryset in use, which still has to be
handled elsewhere. |
def _get_timestamp(dirname_full, remove):
"""
Get the timestamp from the timestamp file.
Optionally mark it for removal if we're going to write another one.
"""
record_filename = os.path.join(dirname_full, RECORD_FILENAME)
if not os.path.exists(record_filename):
return None
mtime = os.stat(record_filename).st_mtime
mtime_str = datetime.fromtimestamp(mtime)
print('Found timestamp {}:{}'.format(dirname_full, mtime_str))
if Settings.record_timestamp and remove:
OLD_TIMESTAMPS.add(record_filename)
return mtime | Get the timestamp from the timestamp file.
Optionally mark it for removal if we're going to write another one. |
def set_attributes(self, **attributes_dict):
"""
Set the value of multiple attributes.
:param attributes_dict dict: a dictionary containing key-value pairs as attribute names and values to be set
:returns: the resource itself
"""
for attr_name, attr_value in attributes_dict.items():
self.set_attr(attr_name, attr_value)
return self | Set the value of multiple attributes.
:param attributes_dict dict: a dictionary containing key-value pairs as attribute names and values to be set
:returns: the resource itself |
def convert_predict_response(pred, serving_bundle):
"""Converts a PredictResponse to ClassificationResponse or RegressionResponse.
Args:
pred: PredictResponse to convert.
serving_bundle: A `ServingBundle` object that contains the information about
the serving request that the response was generated by.
Returns:
A ClassificationResponse or RegressionResponse.
"""
output = pred.outputs[serving_bundle.predict_output_tensor]
raw_output = output.float_val
if serving_bundle.model_type == 'classification':
values = []
for example_index in range(output.tensor_shape.dim[0].size):
start = example_index * output.tensor_shape.dim[1].size
values.append(raw_output[start:start + output.tensor_shape.dim[1].size])
else:
values = raw_output
return convert_prediction_values(values, serving_bundle, pred.model_spec) | Converts a PredictResponse to ClassificationResponse or RegressionResponse.
Args:
pred: PredictResponse to convert.
serving_bundle: A `ServingBundle` object that contains the information about
the serving request that the response was generated by.
Returns:
A ClassificationResponse or RegressionResponse. |
def cbv_decorator(function_decorator):
"""Allows a function-based decorator to be used on a CBV."""
def class_decorator(View):
View.dispatch = method_decorator(function_decorator)(View.dispatch)
return View
return class_decorator | Allows a function-based decorator to be used on a CBV. |
def delete(self, symbol, date_range=None):
"""
Delete all chunks for a symbol.
Which are, for the moment, fully contained in the passed in
date_range.
Parameters
----------
symbol : `str`
symbol name for the item
date_range : `date.DateRange`
DateRange to delete ticks in
"""
query = {SYMBOL: symbol}
date_range = to_pandas_closed_closed(date_range)
if date_range is not None:
assert date_range.start and date_range.end
query[START] = {'$gte': date_range.start}
query[END] = {'$lte': date_range.end}
else:
# delete metadata on complete deletion
self._metadata.delete_one({SYMBOL: symbol})
return self._collection.delete_many(query) | Delete all chunks for a symbol.
Which are, for the moment, fully contained in the passed in
date_range.
Parameters
----------
symbol : `str`
symbol name for the item
date_range : `date.DateRange`
DateRange to delete ticks in |
def generate_aead(hsm, args, password):
"""
Generate an AEAD using the YubiHSM.
"""
try:
pw = password.ljust(args.min_len, chr(0x0))
return hsm.generate_aead_simple(args.nonce.decode('hex'), args.key_handle, pw)
except pyhsm.exception.YHSM_CommandFailed, e:
if e.status_str == 'YHSM_FUNCTION_DISABLED':
print "ERROR: The key handle %s is not permitted to YSM_AEAD_GENERATE." % (args.key_handle)
return None
else:
print "ERROR: %s" % (e.reason) | Generate an AEAD using the YubiHSM. |
def get_errors(self):
"""If there were any business errors fetching data for this property,
returns the error messages.
Returns:
string - the error message, or None if there was no error.
"""
return [{cr.component_name: cr.get_error()}
for cr in self.component_results if cr.has_error()] | If there were any business errors fetching data for this property,
returns the error messages.
Returns:
string - the error message, or None if there was no error. |
def attachedimage_form_factory(lang='en', debug=False):
''' Returns ModelForm class to be used in admin.
'lang' is the language for GearsUploader (can be 'en' and 'ru' at the
moment).
'''
yui = '' if debug else '.yui'
class _AttachedImageAdminForm(forms.ModelForm):
caption = forms.CharField(label=_('Caption'), required=False)
class Media:
js = [
'generic_images/js/mootools-1.2.4-core-yc.js',
'generic_images/js/GearsUploader.%s%s.js' % (lang, yui,),
'generic_images/js/AttachedImageInline.js',
]
class Meta:
model = AttachedImage
return _AttachedImageAdminForm | Returns ModelForm class to be used in admin.
'lang' is the language for GearsUploader (can be 'en' and 'ru' at the
moment). |
def log_erase_send(self, target_system, target_component, force_mavlink1=False):
'''
Erase all logs
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
'''
return self.send(self.log_erase_encode(target_system, target_component), force_mavlink1=force_mavlink1) | Erase all logs
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t) |
def _set_distribute_list(self, v, load=False):
"""
Setter method for distribute_list, mapped from YANG variable /bgp_state/neighbor/evpn/distribute_list (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_distribute_list is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_distribute_list() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=distribute_list.distribute_list, is_container='container', presence=False, yang_name="distribute-list", rest_name="distribute-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'bgp-access-list-distribute-list-1'}}, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """distribute_list must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=distribute_list.distribute_list, is_container='container', presence=False, yang_name="distribute-list", rest_name="distribute-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'bgp-access-list-distribute-list-1'}}, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='container', is_config=False)""",
})
self.__distribute_list = t
if hasattr(self, '_set'):
self._set() | Setter method for distribute_list, mapped from YANG variable /bgp_state/neighbor/evpn/distribute_list (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_distribute_list is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_distribute_list() directly. |
def normalize_genotypes(genotypes):
"""Normalize the genotypes.
Args:
genotypes (Genotypes): The genotypes to normalize.
Returns:
numpy.array: The normalized genotypes.
"""
genotypes = genotypes.genotypes
return (genotypes - np.nanmean(genotypes)) / np.nanstd(genotypes) | Normalize the genotypes.
Args:
genotypes (Genotypes): The genotypes to normalize.
Returns:
numpy.array: The normalized genotypes. |
def size(self):
"""Calculate and return the file size in bytes."""
old = self.__file.tell() # old position
self.__file.seek(0, 2) # end of file
n_bytes = self.__file.tell() # file size in bytes
self.__file.seek(old) # back to old position
return n_bytes | Calculate and return the file size in bytes. |
def allocate(self):
"""
Arrange for a unique context ID to be allocated and associated with a
route leading to the active context. In masters, the ID is generated
directly, in children it is forwarded to the master via a
:data:`mitogen.core.ALLOCATE_ID` message.
"""
self.lock.acquire()
try:
id_ = self.next_id
self.next_id += 1
return id_
finally:
self.lock.release() | Arrange for a unique context ID to be allocated and associated with a
route leading to the active context. In masters, the ID is generated
directly, in children it is forwarded to the master via a
:data:`mitogen.core.ALLOCATE_ID` message. |
def ungettext(self):
"""
Dispatch to the appropriate ngettext method to handle text objects.
Note that under python 3, this uses `ngettext()`, while under python 2,
it uses `ungettext()`. This should not be used with bytestrings.
"""
# pylint: disable=no-member
if six.PY2:
return self._translations.ungettext
else:
return self._translations.ngettext | Dispatch to the appropriate ngettext method to handle text objects.
Note that under python 3, this uses `ngettext()`, while under python 2,
it uses `ungettext()`. This should not be used with bytestrings. |
def get_stacked_rnn(config: RNNConfig, prefix: str,
parallel_inputs: bool = False,
layers: Optional[Iterable[int]] = None) -> mx.rnn.SequentialRNNCell:
"""
Returns (stacked) RNN cell given parameters.
:param config: rnn configuration.
:param prefix: Symbol prefix for RNN.
:param parallel_inputs: Support parallel inputs for the stacked RNN cells.
:param layers: Specify which layers to create as a list of layer indexes.
:return: RNN cell.
"""
rnn = mx.rnn.SequentialRNNCell() if not parallel_inputs else SequentialRNNCellParallelInput()
if not layers:
layers = range(config.num_layers)
for layer_idx in layers:
# fhieber: the 'l' in the prefix does NOT stand for 'layer' but for the direction 'l' as in mx.rnn.rnn_cell::517
# this ensures parameter name compatibility of training w/ FusedRNN and decoding with 'unfused' RNN.
cell_prefix = "%sl%d_" % (prefix, layer_idx)
if config.cell_type == C.LSTM_TYPE:
if config.dropout_recurrent > 0.0:
cell = RecurrentDropoutLSTMCell(num_hidden=config.num_hidden, prefix=cell_prefix,
forget_bias=config.forget_bias, dropout=config.dropout_recurrent)
else:
cell = mx.rnn.LSTMCell(num_hidden=config.num_hidden, prefix=cell_prefix, forget_bias=config.forget_bias)
elif config.cell_type == C.LNLSTM_TYPE:
cell = LayerNormLSTMCell(num_hidden=config.num_hidden, prefix=cell_prefix, forget_bias=config.forget_bias)
elif config.cell_type == C.LNGLSTM_TYPE:
cell = LayerNormPerGateLSTMCell(num_hidden=config.num_hidden, prefix=cell_prefix,
forget_bias=config.forget_bias)
elif config.cell_type == C.GRU_TYPE:
cell = mx.rnn.GRUCell(num_hidden=config.num_hidden, prefix=cell_prefix)
elif config.cell_type == C.LNGRU_TYPE:
cell = LayerNormGRUCell(num_hidden=config.num_hidden, prefix=cell_prefix)
elif config.cell_type == C.LNGGRU_TYPE:
cell = LayerNormPerGateGRUCell(num_hidden=config.num_hidden, prefix=cell_prefix)
else:
raise NotImplementedError()
if config.dropout_inputs > 0 or config.dropout_states > 0:
cell = VariationalDropoutCell(cell,
dropout_inputs=config.dropout_inputs,
dropout_states=config.dropout_states)
if config.lhuc:
cell = LHUCCell(cell, config.num_hidden, config.dtype)
# layer_idx is 0 based, whereas first_residual_layer is 1-based
if config.residual and layer_idx + 1 >= config.first_residual_layer:
cell = mx.rnn.ResidualCell(cell) if not parallel_inputs else ResidualCellParallelInput(cell)
elif parallel_inputs:
cell = ParallelInputCell(cell)
rnn.add(cell)
return rnn | Returns (stacked) RNN cell given parameters.
:param config: rnn configuration.
:param prefix: Symbol prefix for RNN.
:param parallel_inputs: Support parallel inputs for the stacked RNN cells.
:param layers: Specify which layers to create as a list of layer indexes.
:return: RNN cell. |
def euler_matrix(ai, aj, ak, axes='sxyz'):
"""Return homogeneous rotation matrix from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> R = euler_matrix(1, 2, 3, 'syxz')
>>> np.allclose(np.sum(R[0]), -1.34786452)
True
>>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1))
>>> np.allclose(np.sum(R[0]), -0.383436184)
True
>>> ai, aj, ak = (4*math.pi) * (np.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R = euler_matrix(ai, aj, ak, axes)
>>> for axes in _TUPLE2AXES.keys():
... R = euler_matrix(ai, aj, ak, axes)
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i + parity]
k = _NEXT_AXIS[i - parity + 1]
if frame:
ai, ak = ak, ai
if parity:
ai, aj, ak = -ai, -aj, -ak
si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak)
ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak)
cc, cs = ci * ck, ci * sk
sc, ss = si * ck, si * sk
M = np.identity(4)
if repetition:
M[i, i] = cj
M[i, j] = sj * si
M[i, k] = sj * ci
M[j, i] = sj * sk
M[j, j] = -cj * ss + cc
M[j, k] = -cj * cs - sc
M[k, i] = -sj * ck
M[k, j] = cj * sc + cs
M[k, k] = cj * cc - ss
else:
M[i, i] = cj * ck
M[i, j] = sj * sc - cs
M[i, k] = sj * cc + ss
M[j, i] = cj * sk
M[j, j] = sj * ss + cc
M[j, k] = sj * cs - sc
M[k, i] = -sj
M[k, j] = cj * si
M[k, k] = cj * ci
return M | Return homogeneous rotation matrix from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> R = euler_matrix(1, 2, 3, 'syxz')
>>> np.allclose(np.sum(R[0]), -1.34786452)
True
>>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1))
>>> np.allclose(np.sum(R[0]), -0.383436184)
True
>>> ai, aj, ak = (4*math.pi) * (np.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R = euler_matrix(ai, aj, ak, axes)
>>> for axes in _TUPLE2AXES.keys():
... R = euler_matrix(ai, aj, ak, axes) |
def codeComplete(self, path, line, column, unsaved_files=None,
include_macros=False, include_code_patterns=False,
include_brief_comments=False):
"""
Code complete in this translation unit.
In-memory contents for files can be provided by passing a list of pairs
as unsaved_files, the first items should be the filenames to be mapped
and the second should be the contents to be substituted for the
file. The contents may be passed as strings or file objects.
"""
options = 0
if include_macros:
options += 1
if include_code_patterns:
options += 2
if include_brief_comments:
options += 4
if unsaved_files is None:
unsaved_files = []
unsaved_files_array = 0
if len(unsaved_files):
unsaved_files_array = (_CXUnsavedFile * len(unsaved_files))()
for i,(name,value) in enumerate(unsaved_files):
if not isinstance(value, str):
# FIXME: It would be great to support an efficient version
# of this, one day.
value = value.read()
print(value)
if not isinstance(value, str):
raise TypeError('Unexpected unsaved file contents.')
unsaved_files_array[i].name = c_string_p(name)
unsaved_files_array[i].contents = c_string_p(value)
unsaved_files_array[i].length = len(value)
ptr = conf.lib.clang_codeCompleteAt(self, path, line, column,
unsaved_files_array, len(unsaved_files), options)
if ptr:
return CodeCompletionResults(ptr)
return None | Code complete in this translation unit.
In-memory contents for files can be provided by passing a list of pairs
as unsaved_files, the first items should be the filenames to be mapped
and the second should be the contents to be substituted for the
file. The contents may be passed as strings or file objects. |
def _wmi_to_ts(self, wmi_ts):
''' Convert a wmi formatted timestamp into an epoch.
'''
year, month, day, hour, minute, second, microsecond, tz = to_time(wmi_ts)
tz_delta = timedelta(minutes=int(tz))
if '+' in wmi_ts:
tz_delta = -tz_delta
dt = (
datetime(year=year, month=month, day=day, hour=hour, minute=minute, second=second, microsecond=microsecond)
+ tz_delta
)
return int(calendar.timegm(dt.timetuple())) | Convert a wmi formatted timestamp into an epoch. |
def yaml_get_data(filename):
"""Get data from .yml file
"""
with open(filename, 'rb') as fd:
yaml_data = yaml.load(fd)
return yaml_data
return False | Get data from .yml file |
def get_environment_paths(config, env):
"""
Get environment paths from given environment variable.
"""
if env is None:
return config.get(Config.DEFAULTS, 'environment')
# Config option takes precedence over environment key.
if config.has_option(Config.ENVIRONMENTS, env):
env = config.get(Config.ENVIRONMENTS, env).replace(' ', '').split(';')
else:
env = os.getenv(env)
if env:
env = env.split(os.pathsep)
return [i for i in env if i] | Get environment paths from given environment variable. |
def memoize(fn):
"""Caches previous calls to the function."""
memo = {}
@wraps(fn)
def wrapper(*args, **kwargs):
if not memoize.disabled:
key = pickle.dumps((args, kwargs))
if key not in memo:
memo[key] = fn(*args, **kwargs)
value = memo[key]
else:
# Memoize is disabled, call the function
value = fn(*args, **kwargs)
return value
return wrapper | Caches previous calls to the function. |
def _cumprod(l):
"""Cumulative product of a list.
Args:
l: a list of integers
Returns:
a list with one more element (starting with 1)
"""
ret = [1]
for item in l:
ret.append(ret[-1] * item)
return ret | Cumulative product of a list.
Args:
l: a list of integers
Returns:
a list with one more element (starting with 1) |
def create_mirror_settings(repo_url):
"""
Creates settings.xml in current working directory, which when used makes Maven use given repo URL as a mirror of all
repositories to look at.
:param repo_url: the repository URL to use
:returns: filepath to the created file
"""
cwd = os.getcwd()
settings_path = os.path.join(cwd, "settings.xml")
settings_file = None
try:
settings_file = open(settings_path, "w")
settings_file.write('<?xml version="1.0" encoding="UTF-8"?>\n')
settings_file.write('<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"\n')
settings_file.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n')
settings_file.write(' xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">\n')
settings_file.write('<mirrors>\n')
settings_file.write(' <mirror>\n')
settings_file.write(' <id>repo-mirror</id>\n')
settings_file.write(' <url>%s</url>\n' % repo_url)
settings_file.write(' <mirrorOf>*</mirrorOf>\n')
settings_file.write(' </mirror>\n')
settings_file.write(' </mirrors>\n')
settings_file.write('</settings>\n')
finally:
if settings_file:
settings_file.close()
return settings_path | Creates settings.xml in current working directory, which when used makes Maven use given repo URL as a mirror of all
repositories to look at.
:param repo_url: the repository URL to use
:returns: filepath to the created file |
def verify(full, dataset_uri):
"""Verify the integrity of a dataset.
"""
dataset = dtoolcore.DataSet.from_uri(dataset_uri)
all_okay = True
generated_manifest = dataset.generate_manifest()
generated_identifiers = set(generated_manifest["items"].keys())
manifest_identifiers = set(dataset.identifiers)
for i in generated_identifiers.difference(manifest_identifiers):
message = "Unknown item: {} {}".format(
i,
generated_manifest["items"][i]["relpath"]
)
click.secho(message, fg="red")
all_okay = False
for i in manifest_identifiers.difference(generated_identifiers):
message = "Missing item: {} {}".format(
i,
dataset.item_properties(i)["relpath"]
)
click.secho(message, fg="red")
all_okay = False
for i in manifest_identifiers.intersection(generated_identifiers):
generated_hash = generated_manifest["items"][i]["size_in_bytes"]
manifest_hash = dataset.item_properties(i)["size_in_bytes"]
if generated_hash != manifest_hash:
message = "Altered item size: {} {}".format(
i,
dataset.item_properties(i)["relpath"]
)
click.secho(message, fg="red")
all_okay = False
if full:
for i in manifest_identifiers.intersection(generated_identifiers):
generated_hash = generated_manifest["items"][i]["hash"]
manifest_hash = dataset.item_properties(i)["hash"]
if generated_hash != manifest_hash:
message = "Altered item hash: {} {}".format(
i,
dataset.item_properties(i)["relpath"]
)
click.secho(message, fg="red")
all_okay = False
if not all_okay:
sys.exit(1)
else:
click.secho("All good :)", fg="green") | Verify the integrity of a dataset. |
def sanitize_block(self, block):
"""Santizes the data for the given block.
If block has a matching embed serializer, use the `to_internal_value` method."""
embed_type = block.get('type', None)
data = block.get('data', {})
serializer = self.serializers.get(embed_type, None)
if serializer is None:
return block
block['data'] = serializer.to_internal_value(data)
return block | Santizes the data for the given block.
If block has a matching embed serializer, use the `to_internal_value` method. |
def wallet_unlock(self, wallet, password):
"""
Unlocks **wallet** using **password**
:param wallet: Wallet to unlock
:type wallet: str
:param password: Password to enter
:type password: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.wallet_unlock(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F",
... password="test"
... )
True
"""
wallet = self._process_value(wallet, 'wallet')
payload = {"wallet": wallet, "password": password}
resp = self.call('wallet_unlock', payload)
return resp['valid'] == '1' | Unlocks **wallet** using **password**
:param wallet: Wallet to unlock
:type wallet: str
:param password: Password to enter
:type password: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.wallet_unlock(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F",
... password="test"
... )
True |
def set_option(name, option):
"""
Set the given LLVM "command-line" option.
For example set_option("test", "-debug-pass=Structure") would display
all optimization passes when generating code.
"""
ffi.lib.LLVMPY_SetCommandLine(_encode_string(name),
_encode_string(option)) | Set the given LLVM "command-line" option.
For example set_option("test", "-debug-pass=Structure") would display
all optimization passes when generating code. |
Subsets and Splits