Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
1,300 | def _parse_title_url(html_chunk):
url = None
title_tags = html_chunk.match(
["div", {"class": "polozka_nazev"}],
["a", None, has_param("href")]
)
if not title_tags:
return _parse_alt_title(html_chunk), _parse_alt_url(html_chunk)
title = title_tags[0]
url = normalize_url(BASE_URL, title.params["href"])
title = title.getContent()
if not title:
title = _parse_alt_title(html_chunk)
return title, url | Parse title/name of the book and URL of the book.
Args:
html_chunk (obj): HTMLElement containing slice of the page with details.
Returns:
tuple: (title, url), both as strings. |
1,301 | def fetch_official_missions(data_dir, start_date, end_date):
official_missions = OfficialMissionsDataset()
df = official_missions.fetch(start_date, end_date)
save_to_csv(df, data_dir, "official-missions")
return df | :param data_dir: (str) directory in which the output file will be saved
:param start_date: (datetime) first date of the range to be scraped
:param end_date: (datetime) last date of the range to be scraped |
1,302 | def logstats(self):
lines = [
"node {} current stats".format(self),
"--------------------------------------------------------",
"node inbox size : {}".format(len(self.nodeInBox)),
"client inbox size : {}".format(len(self.clientInBox)),
"age (seconds) : {}".format(time.time() - self.created),
"next check for reconnect: {}".format(time.perf_counter() -
self.nodestack.nextCheck),
"node connections : {}".format(self.nodestack.conns),
"f : {}".format(self.f),
"master instance : {}".format(self.instances.masterId),
"replicas : {}".format(len(self.replicas)),
"view no : {}".format(self.viewNo),
"rank : {}".format(self.rank),
"msgs to replicas : {}".format(self.replicas.sum_inbox_len),
"msgs to view changer : {}".format(len(self.msgsToViewChanger)),
"action queue : {} {}".format(len(self.actionQueue),
id(self.actionQueue)),
"action queue stash : {} {}".format(len(self.aqStash),
id(self.aqStash)),
]
logger.info("\n".join(lines), extra={"cli": False}) | Print the node's current statistics to log. |
1,303 | def get(self, collection_id):
url = "/collections/%s" % collection_id
result = self._get(url)
return CollectionModel.parse(result) | Retrieve a single collection.
To view a user’s private collections, the 'read_collections' scope is required.
:param collection_id [string]: The collections’s ID. Required.
:return: [Collection]: The Unsplash Collection. |
1,304 | def get_open_filenames(self):
editorstack = self.editorstacks[0]
filenames = []
filenames += [finfo.filename for finfo in editorstack.data]
return filenames | Get the list of open files in the current stack |
1,305 | def prompt(text, default=None, show_default=True, invisible=False,
confirm=False, skip=False, type=None, input_function=None):
t = determine_type(type, default)
input_function = get_input_fn(input_function, invisible)
if default is not None and show_default:
text = .format(text, default)
while True:
val = prompt_fn(input_function, text, default, t, skip, repeat=True)
if not confirm or (skip and val is None):
return val
if val == prompt_fn(input_function, , default, t, repeat=True):
return val
echo(, True) | Prompts for input from the user. |
1,306 | def compute_uncertainty_reward(logits, predictions):
vocab_size = logits.shape[-1]
assert vocab_size > 1
log_probs = common_layers.log_prob_from_logits(logits)
max_log_probs = common_layers.index_last_dim_with_indices(log_probs,
predictions)
neg_log_prob = tf.nn.relu(-max_log_probs - 0.02)
reduce_dims = list(range(len(neg_log_prob.shape)))[1:]
summed = tf.reduce_sum(neg_log_prob, axis=reduce_dims)
return summed / 10 | Uncertainty reward based on logits. |
1,307 | def shrink_indexes_in_place(self, triples):
_ent_roots = self.UnionFind(self._ent_id)
_rel_roots = self.UnionFind(self._rel_id)
for t in triples:
_ent_roots.add(t.head)
_ent_roots.add(t.tail)
_rel_roots.add(t.relation)
for i, t in enumerate(triples):
h = _ent_roots.find(t.head)
r = _rel_roots.find(t.relation)
t = _ent_roots.find(t.tail)
triples[i] = kgedata.TripleIndex(h, r, t)
ents = bidict()
available_ent_idx = 0
for previous_idx, ent_exist in enumerate(_ent_roots.roots()):
if not ent_exist:
self._ents.inverse.pop(previous_idx)
else:
ents[self._ents.inverse[previous_idx]] = available_ent_idx
available_ent_idx += 1
rels = bidict()
available_rel_idx = 0
for previous_idx, rel_exist in enumerate(_rel_roots.roots()):
if not rel_exist:
self._rels.inverse.pop(previous_idx)
else:
rels[self._rels.inverse[previous_idx]] = available_rel_idx
available_rel_idx += 1
self._ents = ents
self._rels = rels
self._ent_id = available_ent_idx
self._rel_id = available_rel_idx | Uses a union find to find segment. |
1,308 | def bitstring_probs_to_z_moments(p):
zmat = np.array([[1, 1],
[1, -1]])
return _apply_local_transforms(p, (zmat for _ in range(p.ndim))) | Convert between bitstring probabilities and joint Z moment expectations.
:param np.array p: An array that enumerates bitstring probabilities. When
flattened out ``p = [p_00...0, p_00...1, ...,p_11...1]``. The total number of elements must
therefore be a power of 2. The canonical shape has a separate axis for each qubit, such that
``p[i,j,...,k]`` gives the estimated probability of bitstring ``ij...k``.
:return: ``z_moments``, an np.array with one length-2 axis per qubit which contains the
expectations of all monomials in ``{I, Z_0, Z_1, ..., Z_{n-1}}``. The expectations of each
monomial can be accessed via::
<Z_0^j_0 Z_1^j_1 ... Z_m^j_m> = z_moments[j_0,j_1,...,j_m]
:rtype: np.array |
1,309 | def _CallEventHandler(self, Event, *Args, **KwArgs):
if Event not in self._EventHandlers:
raise ValueError( % (Event, self.__class__.__name__))
args = map(repr, Args) + [ % (key, repr(value)) for key, value in KwArgs.items()]
self.__Logger.debug(, Event, .join(args))
try:
handlers = [self._DefaultEventHandlers[Event]]
except KeyError:
handlers = []
try:
handlers.append(getattr(self._EventHandlerObject, Event))
except AttributeError:
pass
handlers.extend(self._EventHandlers[Event])
if handlers:
after = self._EventThreads.get(Event, None)
thread = EventSchedulerThread(Event, after, handlers, Args, KwArgs)
self._EventThreads[Event] = thread
thread.start() | Calls all event handlers defined for given Event, additional parameters
will be passed unchanged to event handlers, all event handlers are fired on
separate threads.
:Parameters:
Event : str
Name of the event.
Args
Positional arguments for the event handlers.
KwArgs
Keyword arguments for the event handlers. |
1,310 | def submit(self):
elm_name = % self.get_attribute()
try:
self.click(elm_name)
except NoSuchElementException:
super(Form, self).submit() | Try to find element with ID "[FORM_ID]_submit" and click on it. If no
element doesn't exists it will call default behaviour - submiting of
form by pressing enter. |
1,311 | def main(argv=None):
parser = get_parser()
global args
args = parser.parse_args(argv)
args.func() | Runs the program and handles command line options |
1,312 | def skip(self):
line = self.line
self.line =
if line in self.lines:
if not self.skipping:
self.skipping = True
printflush(, end=)
printflush(, end=)
return True
elif line:
self.lines.append(line)
if len(self.lines) > 30:
self.lines.popleft()
return False | Skip this py-pdb command to avoid attaching within the same loop. |
1,313 | def symbol(name: str=None, symbol_type: Type[Symbol]=Symbol) -> :
if isinstance(name, type) and issubclass(name, Symbol) and symbol_type is Symbol:
return SymbolWildcard(name)
return SymbolWildcard(symbol_type, variable_name=name) | Create a `SymbolWildcard` that matches a single `Symbol` argument.
Args:
name:
Optional variable name for the wildcard.
symbol_type:
An optional subclass of `Symbol` to further limit which kind of symbols are
matched by the wildcard.
Returns:
A `SymbolWildcard` that matches the *symbol_type*. |
1,314 | def clips_value(self, dvalue):
try:
return VALUES[type(dvalue)](self._env, dvalue)
except KeyError:
if isinstance(dvalue, (list, tuple)):
return self.list_to_multifield(dvalue)
if isinstance(dvalue, (clips.facts.Fact)):
return dvalue._fact
if isinstance(dvalue, (clips.classes.Instance)):
return dvalue._ist
return ffi.NULL | Convert a Python type into CLIPS. |
1,315 | def parametrize(self):
r
for element in self._chain:
if not element.is_reader and not element._estimated:
element.estimate(element.data_producer, stride=self.param_stride, chunksize=self.chunksize)
self._estimated = True | r"""
Reads all data and discretizes it into discrete trajectories. |
1,316 | def rename(self, old_file_path, new_file_path, force_replace=False):
ends_with_sep = self.ends_with_path_separator(old_file_path)
old_file_path = self.absnormpath(old_file_path)
new_file_path = self.absnormpath(new_file_path)
if not self.exists(old_file_path, check_link=True):
self.raise_os_error(errno.ENOENT, old_file_path, 2)
if ends_with_sep:
self._handle_broken_link_with_trailing_sep(old_file_path)
old_object = self.lresolve(old_file_path)
if not self.is_windows_fs:
self._handle_posix_dir_link_errors(
new_file_path, old_file_path, ends_with_sep)
if self.exists(new_file_path, check_link=True):
new_file_path = self._rename_to_existing_path(
force_replace, new_file_path, old_file_path,
old_object, ends_with_sep)
if not new_file_path:
return
old_dir, old_name = self.splitpath(old_file_path)
new_dir, new_name = self.splitpath(new_file_path)
if not self.exists(new_dir):
self.raise_os_error(errno.ENOENT, new_dir)
old_dir_object = self.resolve(old_dir)
new_dir_object = self.resolve(new_dir)
if old_dir_object.st_dev != new_dir_object.st_dev:
self.raise_os_error(errno.EXDEV, old_file_path)
if not S_ISDIR(new_dir_object.st_mode):
self.raise_os_error(
errno.EACCES if self.is_windows_fs else errno.ENOTDIR,
new_file_path)
if new_dir_object.has_parent_object(old_object):
self.raise_os_error(errno.EINVAL, new_file_path)
object_to_rename = old_dir_object.get_entry(old_name)
old_dir_object.remove_entry(old_name, recursive=False)
object_to_rename.name = new_name
new_name = new_dir_object._normalized_entryname(new_name)
if new_name in new_dir_object.contents:
new_dir_object.remove_entry(new_name)
new_dir_object.add_entry(object_to_rename) | Renames a FakeFile object at old_file_path to new_file_path,
preserving all properties.
Args:
old_file_path: Path to filesystem object to rename.
new_file_path: Path to where the filesystem object will live
after this call.
force_replace: If set and destination is an existing file, it
will be replaced even under Windows if the user has
permissions, otherwise replacement happens under Unix only.
Raises:
OSError: if old_file_path does not exist.
OSError: if new_file_path is an existing directory
(Windows, or Posix if old_file_path points to a regular file)
OSError: if old_file_path is a directory and new_file_path a file
OSError: if new_file_path is an existing file and force_replace
not set (Windows only).
OSError: if new_file_path is an existing file and could not be
removed (Posix, or Windows with force_replace set).
OSError: if dirname(new_file_path) does not exist.
OSError: if the file would be moved to another filesystem
(e.g. mount point). |
1,317 | def check(self, dsm, **kwargs):
med_matrix = CompleteMediation.generate_mediation_matrix(dsm)
return CompleteMediation.matrices_compliance(dsm, med_matrix) | Check if matrix and its mediation matrix are compliant.
It means that number of dependencies for each (line, column) is either
0 if the mediation matrix (line, column) is 0, or >0 if the mediation
matrix (line, column) is 1.
Args:
dsm (:class:`DesignStructureMatrix`): the DSM to check.
Returns:
bool: True if compliant, else False |
1,318 | def db_exists(name, user=None, password=None, host=None, port=None):
**
dbs = db_list(user, password, host, port)
if not isinstance(dbs, list):
return False
return name in [db[] for db in dbs] | Checks if a database exists in Influxdb
name
Database name to create
user
The user to connect as
password
The password of the user
host
The host to connect to
port
The port to connect to
CLI Example:
.. code-block:: bash
salt '*' influxdb08.db_exists <name>
salt '*' influxdb08.db_exists <name> <user> <password> <host> <port> |
1,319 | def step(self, compute=True):
if self.batched:
self.batched = True
self.compute = compute
yield self
if compute:
self._write()
compute = True | Context manager to gradually build a history row, then commit it at the end.
To reduce the number of conditionals needed, code can check run.history.compute:
with run.history.step(batch_idx % log_interval == 0):
run.history.add({"nice": "ok"})
if run.history.compute:
# Something expensive here |
1,320 | def save_settings(file_path, record_details, overwrite=False, secret_key=):
title =
try:
_path_arg = % (title, str(file_path))
except:
raise ValueError( % title)
_details_arg = % title
if not isinstance(record_details, dict):
raise ValueError( % _details_arg)
if secret_key:
try:
_secret_arg = % (title, str(secret_key))
except:
raise ValueError( % title)
ext_map = {}
file_extensions = {
"json": ".+\\.json$",
"json.gz": ".+\\.json\\.gz$",
"yaml": ".+\\.ya?ml$",
"yaml.gz": ".+\\.ya?ml\\.gz$",
"drep": ".+\\.drep$"
}
import re
for key, value in file_extensions.items():
file_pattern = re.compile(value)
if file_pattern.findall(file_path):
ext_map[key] = True
else:
ext_map[key] = False
file_time = 0
file_data = .encode()
if ext_map[]:
import json
file_data = json.dumps(record_details, indent=2).encode()
elif ext_map[]:
import yaml
file_data = yaml.dump(record_details).encode()
elif ext_map[]:
import json
import gzip
file_bytes = json.dumps(record_details).encode()
file_data = gzip.compress(file_bytes)
elif ext_map[]:
import yaml
import gzip
file_bytes = yaml.dump(record_details).encode()
file_data = gzip.compress(file_bytes)
elif ext_map[]:
from labpack.compilers import drep
file_data = drep.dump(record_details, secret_key)
file_time = 1
else:
raise ValueError( % (_path_arg, list(ext_map.keys())))
import os
if not overwrite:
if os.path.exists(file_path):
raise Exception( % (_path_arg, _path_arg))
dir_path = os.path.split(file_path)
if dir_path[0]:
if not os.path.exists(dir_path[0]):
os.makedirs(dir_path[0])
with open(file_path, ) as f:
f.write(file_data)
f.close()
if file_time:
os.utime(file_path, times=(file_time, file_time))
return file_path | a method to save dictionary typed data to a local file
:param file_path: string with path to settings file
:param record_details: dictionary with record details
:param overwrite: [optional] boolean to overwrite existing file data
:param secret_key: [optional] string with key to decrypt drep file
:return: string with file path |
1,321 | def get_statements(self):
for k, v in self.reader_output.items():
for interaction in v[]:
self._process_interaction(k, interaction, v[], self.pmid,
self.extra_annotations) | Process reader output to produce INDRA Statements. |
1,322 | def get_variable_accesses(self, variable, same_name=False):
if variable.region == :
return self.global_manager.get_variable_accesses(variable, same_name=same_name)
elif variable.region in self.function_managers:
return self.function_managers[variable.region].get_variable_accesses(variable, same_name=same_name)
l.warning(, variable.region)
return [ ] | Get a list of all references to the given variable.
:param SimVariable variable: The variable.
:param bool same_name: Whether to include all variables with the same variable name, or just
based on the variable identifier.
:return: All references to the variable.
:rtype: list |
1,323 | def image_to_string(image, lang=None, boxes=False):
s result is
read, and the temporary files are erased.
%s.bmp%s.txt%s.box' % output_file_name_base
try:
image.save(input_file_name)
status, error_string = run_tesseract(input_file_name,
output_file_name_base,
lang=lang,
boxes=boxes)
if status:
errors = get_errors(error_string)
raise TesseractError(status, errors)
f = file(output_file_name)
try:
return f.read().strip()
finally:
f.close()
finally:
cleanup(input_file_name)
cleanup(output_file_name) | Runs tesseract on the specified image. First, the image is written to disk,
and then the tesseract command is run on the image. Resseract's result is
read, and the temporary files are erased. |
1,324 | def get_command(self, ctx, name):
if name not in self.daemon_class.list_actions():
return None
daemon = ctx.obj
def subcommand(debug=False):
if daemon.detach and debug:
daemon.detach = False
daemon.do_action(name)
subcommand.__doc__ = daemon.get_action(name).__doc__
if name == :
subcommand = click.option(
, is_flag=True,
help=
)(subcommand)
subcommand = click.command(
name, options_metavar=self.options_metavar)(subcommand)
return subcommand | Get a callable command object. |
1,325 | def get_gradebook_ids_by_grade_system(self, grade_system_id):
mgr = self._get_provider_manager(, local=True)
lookup_session = mgr.get_grade_system_lookup_session(proxy=self._proxy)
lookup_session.use_federated_gradebook_view()
grade_system = lookup_session.get_grade_system(grade_system_id)
id_list = []
for idstr in grade_system._my_map[]:
id_list.append(Id(idstr))
return IdList(id_list) | Gets the list of ``Gradebook`` ``Ids`` mapped to a ``GradeSystem``.
arg: grade_system_id (osid.id.Id): ``Id`` of a
``GradeSystem``
return: (osid.id.IdList) - list of gradebook ``Ids``
raise: NotFound - ``grade_system_id`` is not found
raise: NullArgument - ``grade_system_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
1,326 | def power(args):
p = OptionParser(power.__doc__)
p.add_option(, default=300, type="int",
help="Maximum number of repeats")
add_simulate_options(p)
opts, args, iopts = p.set_image_options(args, figsize="10x10", format="png")
if len(args) != 0:
sys.exit(not p.print_help())
max_insert = opts.maxinsert
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(ncols=2, nrows=2,
figsize=(iopts.w, iopts.h))
plt.tight_layout(pad=3)
color = "lightslategray"
tredparse_results = parse_results("tredparse_results_het-spanning.txt")
title = SIMULATED_DIPLOID + " (Sub-model 1: Spanning reads)"
plot_compare(ax1, title, tredparse_results, None, color=color,
max_insert=max_insert, risk=False)
tredparse_results = parse_results("tredparse_results_het-partial.txt", exclude=20)
title = SIMULATED_DIPLOID + " (Sub-model 2: Partial reads)"
plot_compare(ax2, title, tredparse_results, None, color=color,
max_insert=max_insert, risk=False)
tredparse_results = parse_results("tredparse_results_het-repeat.txt", exclude=20)
tredparse_results = [x for x in tredparse_results if x[0] > 50]
title = SIMULATED_DIPLOID + " (Sub-model 3: Repeat-only reads)"
plot_compare(ax3, title, tredparse_results, None, color=color,
max_insert=max_insert, risk=False)
tredparse_results = parse_results("tredparse_results_het-pair.txt", exclude=20)
title = SIMULATED_DIPLOID + " (Sub-model 4: Paired-end reads)"
plot_compare(ax4, title, tredparse_results, None, color=color,
max_insert=max_insert, risk=False)
for ax in (ax1, ax2, ax3, ax4):
ax.set_xlim(0, max_insert)
ax.set_ylim(0, max_insert)
root = fig.add_axes([0, 0, 1, 1])
pad = .03
panel_labels(root, ((pad / 2, 1 - pad, "A"), (1 / 2., 1 - pad, "B"),
(pad / 2, 1 / 2. , "C"), (1 / 2., 1 / 2. , "D")))
normalize_axes(root)
image_name = "power." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) | %prog power
Compare performances of various variant callers on simulated STR datasets.
This compares the power of various evidence types. |
1,327 | def _set_label(self, which, label, **kwargs):
prop_default = {
: 18,
}
for prop, default in prop_default.items():
kwargs[prop] = kwargs.get(prop, default)
setattr(self.label, which, label)
setattr(self.label, which + , kwargs)
return | Private method for setting labels.
Args:
which (str): The indicator of which part of the plots
to adjust. This currently handles `xlabel`/`ylabel`,
and `title`.
label (str): The label to be added.
fontsize (int, optional): Fontsize for associated label. Default
is None. |
1,328 | def rcm_vertex_order(vertices_resources, nets):
vertices_neighbours = _get_vertices_neighbours(nets)
for subgraph_vertices in _get_connected_subgraphs(vertices_resources,
vertices_neighbours):
cm_order = _cuthill_mckee(subgraph_vertices, vertices_neighbours)
for vertex in reversed(cm_order):
yield vertex | A generator which iterates over the vertices in Reverse-Cuthill-McKee
order.
For use as a vertex ordering for the sequential placer. |
1,329 | def get_plugins():
if os.path.exists():
for filename in sorted([f for f in os.listdir()
if not os.path.isdir(f) and f.endswith(".py")]):
plugin_name = filename[:-3]
try:
plugin = import_plugin(plugin_name)
except SystemExit as e:
description = "Plugin has a syntax error"
else:
description = plugin.__doc__ or "No description found"
yield {plugin_name: description} | Gets available plugins by looking into the plugins/ directory |
1,330 | def from_project_config(cls, project_dict, packages_dict=None):
try:
project_dict = cls._preprocess(project_dict)
except RecursionException:
raise DbtProjectError(
,
project=project_dict
)
try:
ProjectContract(**project_dict)
except ValidationException as e:
raise DbtProjectError(str(e))
name = project_dict[]
version = project_dict[]
project_root = project_dict[]
quoting = project_dict.get(, {})
models = project_dict.get(, {})
on_run_start = project_dict.get(, [])
on_run_end = project_dict.get(, [])
archive = project_dict.get(, [])
seeds = project_dict.get(, {})
dbt_raw_version = project_dict.get(, )
try:
dbt_version = _parse_versions(dbt_raw_version)
except SemverException as e:
raise DbtProjectError(str(e))
packages = package_config_from_data(packages_dict)
project = cls(
project_name=name,
version=version,
project_root=project_root,
profile_name=profile_name,
source_paths=source_paths,
macro_paths=macro_paths,
data_paths=data_paths,
test_paths=test_paths,
analysis_paths=analysis_paths,
docs_paths=docs_paths,
target_path=target_path,
archive_paths=archive_paths,
clean_targets=clean_targets,
log_path=log_path,
modules_path=modules_path,
quoting=quoting,
models=models,
on_run_start=on_run_start,
on_run_end=on_run_end,
archive=archive,
seeds=seeds,
dbt_version=dbt_version,
packages=packages
)
project.validate()
return project | Create a project from its project and package configuration, as read
by yaml.safe_load().
:param project_dict dict: The dictionary as read from disk
:param packages_dict Optional[dict]: If it exists, the packages file as
read from disk.
:raises DbtProjectError: If the project is missing or invalid, or if
the packages file exists and is invalid.
:returns Project: The project, with defaults populated. |
1,331 | def q(cls, **kwargs):
redis = cls.get_redis()
return QuerySet(cls, redis.sscan_iter(cls.members_key())) | Creates an iterator over the members of this class that applies the
given filters and returns only the elements matching them |
1,332 | def loglike(self, y, f, var=None):
r
var = self._check_param(var)
y, f = np.broadcast_arrays(y, f)
ll = - 0.5 * (np.log(2 * np.pi * var) + (y - f)**2 / var)
return ll | r"""
Gaussian log likelihood.
Parameters
----------
y: ndarray
array of 0, 1 valued integers of targets
f: ndarray
latent function from the GLM prior (:math:`\mathbf{f} =
\boldsymbol\Phi \mathbf{w}`)
var: float, ndarray, optional
The variance of the distribution, if not input, the initial value
of variance is used.
Returns
-------
logp: ndarray
the log likelihood of each y given each f under this
likelihood. |
1,333 | def sdram_alloc_as_filelike(self, size, tag=0, x=Required, y=Required,
app_id=Required, clear=False):
start_address = self.sdram_alloc(size, tag, x, y, app_id, clear)
return MemoryIO(self, x, y, start_address, start_address + size) | Like :py:meth:`.sdram_alloc` but returns a :py:class:`file-like
object <.MemoryIO>` which allows safe reading and writing to the block
that is allocated.
Returns
-------
:py:class:`.MemoryIO`
File-like object which allows accessing the newly allocated region
of memory. For example::
>>> # Read, write and seek through the allocated memory just
>>> # like a file
>>> mem = mc.sdram_alloc_as_filelike(12) # doctest: +SKIP
>>> mem.write(b"Hello, world") # doctest: +SKIP
12
>>> mem.seek(0) # doctest: +SKIP
>>> mem.read(5) # doctest: +SKIP
b"Hello"
>>> mem.read(7) # doctest: +SKIP
b", world"
>>> # Reads and writes are truncated to the allocated region,
>>> # preventing accidental clobbering/access of memory.
>>> mem.seek(0) # doctest: +SKIP
>>> mem.write(b"How are you today?") # doctest: +SKIP
12
>>> mem.seek(0) # doctest: +SKIP
>>> mem.read(100) # doctest: +SKIP
b"How are you "
See the :py:class:`.MemoryIO` class for details of other features
of these file-like views of SpiNNaker's memory.
Raises
------
rig.machine_control.machine_controller.SpiNNakerMemoryError
If the memory cannot be allocated, or the tag is already taken or
invalid. |
1,334 | def _validate_derived_from(cursor, model):
derived_from_uri = model.metadata.get()
if derived_from_uri is None:
return
try:
ident_hash = parse_archive_uri(derived_from_uri)
uuid_, version = split_ident_hash(ident_hash, split_version=True)
except (ValueError, IdentHashSyntaxError, IdentHashShortId) as exc:
raise exceptions.InvalidMetadata(, derived_from_uri,
original_exception=exc)
args = [uuid_]
table =
version_condition =
if version != (None, None,):
args.extend(version)
table =
version_condition = " AND major_version = %s" \
" AND minor_version {} %s" \
.format(version[1] is None and or )
cursor.execute(
.format(table, version_condition), args)
try:
_exists = cursor.fetchone()[0]
except TypeError:
raise exceptions.InvalidMetadata(, derived_from_uri)
| Given a database cursor and model, check the derived-from
value accurately points to content in the archive.
The value can be nothing or must point to existing content. |
1,335 | def proc_ovrds(**kwargs):
return [
(k, v) for k, v in kwargs.items()
if k not in list(ELEM_KEYS.keys()) + list(ELEM_KEYS.values()) + PRSV_COLS
] | Bloomberg overrides
Args:
**kwargs: overrides
Returns:
list of tuples
Examples:
>>> proc_ovrds(DVD_Start_Dt='20180101')
[('DVD_Start_Dt', '20180101')]
>>> proc_ovrds(DVD_Start_Dt='20180101', cache=True, has_date=True)
[('DVD_Start_Dt', '20180101')] |
1,336 | def get_session(self, token=None, signature=None):
if token is not None:
access_token, access_token_secret = token
session = self.session_obj(self.consumer_key,
self.consumer_secret,
access_token,
access_token_secret,
signature or self.signature_obj,
service=self)
else:
signature = signature or self.signature_obj
session = self.session_obj(self.consumer_key,
self.consumer_secret,
signature=signature,
service=self)
return session | If provided a `token` parameter, tries to retrieve a stored
`rauth.OAuth1Session` instance. Otherwise generates a new session
instance with the :class:`rauth.OAuth1Service.consumer_key` and
:class:`rauth.OAuth1Service.consumer_secret` stored on the
`rauth.OAuth1Service` instance.
:param token: A tuple of strings with which to memoize the session
object instance.
:type token: tuple |
1,337 | def attention_lm_attention_moe_tiny():
hparams = attention_lm_moe_small()
hparams.moe_layers = ""
hparams.attention_num_experts = 128
hparams.filter_size = 8192
hparams.attention_type = AttentionType.LOCAL_EXPERTS
return hparams | Cheap model for debugging.
Returns:
an hparams object. |
1,338 | def is_used(self, regs, i, top=None):
if i < 0:
i = 0
if self.lock:
return True
regs = list(regs)
if top is None:
top = len(self)
else:
top -= 1
for ii in range(i, top):
for r in self.mem[ii].requires:
if r in regs:
return True
for r in self.mem[ii].destroys:
if r in regs:
regs.remove(r)
if not regs:
return False
self.lock = True
result = self.goes_requires(regs)
self.lock = False
return result | Checks whether any of the given regs are required from the given point
to the end or not. |
1,339 | def get_variable_days(self, year):
days = super(LateSummer, self).get_variable_days(year)
days.append((
self.get_nth_weekday_in_month(year, 9, MON),
"Late Summer Holiday"
))
return days | Add Late Summer holiday (First Monday of September) |
1,340 | def create_service(self, *args, **kwargs):
return self._client.create_service(*args, scope=self.id, **kwargs) | Create a service to current scope.
See :class:`pykechain.Client.create_service` for available parameters.
.. versionadded:: 1.13 |
1,341 | def items(self):
from projexui.widgets.xnodewidget import XNode, XNodeConnection
output = []
for item in self.scene().items():
if not (isinstance(item, XNode) or
isinstance(item, XNodeConnection)):
continue
if item.layer() == self:
output.append(item)
return output | Returns a list of the items that are linked to this layer.
:return [<XNode> || <XNodeConnection>, ..] |
1,342 | def _serialize(self):
result = { a: getattr(self, a) for a in type(self).properties
if type(self).properties[a].mutable }
for k, v in result.items():
if isinstance(v, Base):
result[k] = v.id
return result | A helper method to build a dict of all mutable Properties of
this object |
1,343 | def make_steam64(id=0, *args, **kwargs):
accountid = id
etype = EType.Invalid
universe = EUniverse.Invalid
instance = None
if len(args) == 0 and len(kwargs) == 0:
value = str(accountid)
if value.isdigit():
value = int(value)
if 0 < value < 2**32:
accountid = value
etype = EType.Individual
universe = EUniverse.Public
elif value < 2**64:
return value
else:
result = steam2_to_tuple(value) or steam3_to_tuple(value)
if result:
(accountid,
etype,
universe,
instance,
) = result
else:
accountid = 0
elif len(args) > 0:
length = len(args)
if length == 1:
etype, = args
elif length == 2:
etype, universe = args
elif length == 3:
etype, universe, instance = args
else:
raise TypeError("Takes at most 4 arguments (%d given)" % length)
if len(kwargs) > 0:
etype = kwargs.get(, etype)
universe = kwargs.get(, universe)
instance = kwargs.get(, instance)
etype = (EType(etype)
if isinstance(etype, (int, EType))
else EType[etype]
)
universe = (EUniverse(universe)
if isinstance(universe, (int, EUniverse))
else EUniverse[universe]
)
if instance is None:
instance = 1 if etype in (EType.Individual, EType.GameServer) else 0
assert instance <= 0xffffF, "instance larger than 20bits"
return (universe << 56) | (etype << 52) | (instance << 32) | accountid | Returns steam64 from various other representations.
.. code:: python
make_steam64() # invalid steamid
make_steam64(12345) # accountid
make_steam64('12345')
make_steam64(id=12345, type='Invalid', universe='Invalid', instance=0)
make_steam64(103582791429521412) # steam64
make_steam64('103582791429521412')
make_steam64('STEAM_1:0:2') # steam2
make_steam64('[g:1:4]') # steam3 |
1,344 | def __get_league_object():
data = mlbgame.data.get_properties()
return etree.parse(data).getroot().find().find() | Returns the xml object corresponding to the league
Only designed for internal use |
1,345 | def _commit(self):
assert self.uri is not None, Exception("BadArgument: uri property cannot be None")
url = .format(self.uri, self.__class__.__name__)
serialized_json = jsonpickle.encode(self, unpicklable=False, )
headers = {: , : str(len(serialized_json))}
response = Http.post(url=url, data=serialized_json, headers=headers)
if response.status_code != 200:
from ArubaCloud.base.Errors import MalformedJsonRequest
raise MalformedJsonRequest("Request: {}, Status Code: {}".format(serialized_json, response.status_code))
content = jsonpickle.decode(response.content.decode("utf-8"))
if content[] == 17:
from ArubaCloud.base.Errors import OperationAlreadyEnqueued
raise OperationAlreadyEnqueued("{} already enqueued".format(self.__class__.__name__))
if content[] is False:
from ArubaCloud.base.Errors import RequestFailed
raise RequestFailed("Request: {}, Response: {}".format(serialized_json, response.content))
return content | :return: (dict) Response object content |
1,346 | def clean_line(str, delimiter):
return [x.strip() for x in str.strip().split(delimiter) if x != ] | Split string on given delimiter, remove whitespace from each field. |
1,347 | def resample(self, section_length):
if len(self.points) < 2:
return Line(self.points)
resampled_points = []
for i in range(2, len(self.points)):
points = resampled_points[-1].equally_spaced_points(
self.points[i], section_length
)
resampled_points.extend(points[1:])
return Line(resampled_points) | Resample this line into sections.
The first point in the resampled line corresponds
to the first point in the original line.
Starting from the first point in the original line, a line
segment is defined as the line connecting the last point in the
resampled line and the next point in the original line.
The line segment is then split into sections of length equal to
``section_length``. The resampled line is obtained
by concatenating all sections.
The number of sections in a line segment is calculated as follows:
``round(segment_length / section_length)``.
Note that the resulting line has a length that is an exact multiple of
``section_length``, therefore its length is in general smaller
or greater (depending on the rounding) than the length
of the original line.
For a straight line, the difference between the resulting length
and the original length is at maximum half of the ``section_length``.
For a curved line, the difference my be larger,
because of corners getting cut.
:param section_length:
The length of the section, in km.
:type section_length:
float
:returns:
A new line resampled into sections based on the given length.
:rtype:
An instance of :class:`Line` |
1,348 | def expand_hostname_range(line = None):
[]|
all_hosts = []
if line:
all_hosts.append(hname)
return all_hosts | A helper function that expands a given line that contains a pattern
specified in top docstring, and returns a list that consists of the
expanded version.
The '[' and ']' characters are used to maintain the pseudo-code
appearance. They are replaced in this function with '|' to ease
string splitting.
References: http://ansible.github.com/patterns.html#hosts-and-groups |
1,349 | def decide_k(airport_code):
if airport_code[:1].upper() == :
try:
return Airport.objects.get(location_identifier__iexact=airport_code[1:]).location_identifier
except Airport.DoesNotExist:
return airport_code
else:
return airport_code | A function to decide if a leading 'K' is throwing off an airport match and return the correct code. |
1,350 | def strip(self, text):
tags, results = [], []
return self.re_tag.sub(lambda m: self.clear_tag(m, tags, results), text) | Return string with markup tags removed. |
1,351 | def plot_diagnostics(self, variable=0, lags=10, fig=None, figsize=None):
_get_plt()
from statsmodels.graphics.utils import create_mpl_fig
fig = create_mpl_fig(fig, figsize)
res_wpr = self.arima_res_
data = res_wpr.data
if hasattr(res_wpr, ):
d = res_wpr.loglikelihood_burn
if hasattr(res_wpr, ):
d = np.maximum(d, res_wpr.nobs_diffuse)
resid = res_wpr.filter_results\
.standardized_forecasts_error[variable, d:]
else:
d = 0
r = res_wpr.resid
resid = (r - np.nanmean(r)) / np.nanstd(r)
ax = fig.add_subplot(221)
if hasattr(data, ) and data.dates is not None:
x = data.dates[d:]._mpl_repr()
else:
x = np.arange(len(resid))
ax.plot(x, resid)
ax.hlines(0, x[0], x[-1], alpha=0.5)
ax.set_xlim(x[0], x[-1])
ax.set_title()
resid_nonmissing = resid[~(np.isnan(resid))]
ax = fig.add_subplot(222)
with warnings.catch_warnings(record=True):
ax.hist(resid_nonmissing, normed=True, label=)
kde = gaussian_kde(resid_nonmissing)
xlim = (-1.96 * 2, 1.96 * 2)
x = np.linspace(xlim[0], xlim[1])
ax.plot(x, kde(x), label=)
ax.plot(x, norm.pdf(x), label=)
ax.set_xlim(xlim)
ax.legend()
ax.set_title()
ax = fig.add_subplot(223)
from statsmodels.graphics.gofplots import qqplot
qqplot(resid_nonmissing, line=, ax=ax)
ax.set_title()
ax = fig.add_subplot(224)
from statsmodels.graphics.tsaplots import plot_acf
plot_acf(resid, ax=ax, lags=lags)
ax.set_title()
ax.set_ylim(-1, 1)
return fig | Plot an ARIMA's diagnostics.
Diagnostic plots for standardized residuals of one endogenous variable
Parameters
----------
variable : integer, optional
Index of the endogenous variable for which the diagnostic plots
should be created. Default is 0.
lags : integer, optional
Number of lags to include in the correlogram. Default is 10.
fig : Matplotlib Figure instance, optional
If given, subplots are created in this figure instead of in a new
figure. Note that the 2x2 grid will be created in the provided
figure using `fig.add_subplot()`.
figsize : tuple, optional
If a figure is created, this argument allows specifying a size.
The tuple is (width, height).
Notes
-----
Produces a 2x2 plot grid with the following plots (ordered clockwise
from top left):
1. Standardized residuals over time
2. Histogram plus estimated density of standardized residulas, along
with a Normal(0,1) density plotted for reference.
3. Normal Q-Q plot, with Normal reference line.
4. Correlogram
See Also
--------
statsmodels.graphics.gofplots.qqplot
pmdarima.utils.visualization.plot_acf
References
----------
.. [1] https://www.statsmodels.org/dev/_modules/statsmodels/tsa/statespace/mlemodel.html#MLEResults.plot_diagnostics # noqa: E501 |
1,352 | def clearDevice(self):
print(self.pre, "clearDevice")
if not self.device:
return
self.filterchain.delViewPort(self.viewport)
self.filterchain = None
self.device = None
self.video.update() | Remove the current stream |
1,353 | def members_from_score_range_in(
self, leaderboard_name, minimum_score, maximum_score, **options):
raw_leader_data = []
if self.order == self.DESC:
raw_leader_data = self.redis_connection.zrevrangebyscore(
leaderboard_name,
maximum_score,
minimum_score)
else:
raw_leader_data = self.redis_connection.zrangebyscore(
leaderboard_name,
minimum_score,
maximum_score)
return self._parse_raw_members(
leaderboard_name, raw_leader_data, **options) | Retrieve members from the named leaderboard within a given score range.
@param leaderboard_name [String] Name of the leaderboard.
@param minimum_score [float] Minimum score (inclusive).
@param maximum_score [float] Maximum score (inclusive).
@param options [Hash] Options to be used when retrieving the data from the leaderboard.
@return members from the leaderboard that fall within the given score range. |
1,354 | def set_language(self, language):
if isinstance(language, str):
language_obj = languages.getlang(language)
if language_obj:
self.language = language_obj.code
else:
raise TypeError("Language code {} not found".format(language))
if isinstance(language, languages.Language):
self.language = language.code | Set self.language to internal lang. repr. code from str or Language object. |
1,355 | def adjust_learning_rate(optimizer, epoch, gammas, schedule):
lr = args.learning_rate
assert len(gammas) == len(schedule), "length of gammas and schedule should be equal"
for (gamma, step) in zip(gammas, schedule):
if (epoch >= step):
lr = lr * gamma
else:
break
for param_group in optimizer.param_groups:
param_group[] = lr
return lr | Sets the learning rate to the initial LR decayed by 10 every 30 epochs |
1,356 | def remove(self, auto_confirm=False, verbose=False):
if not self.paths:
logger.info(
"Can%sUninstalling %s:Removing file or directory %sSuccessfully uninstalled %s', dist_name_version) | Remove paths in ``self.paths`` with confirmation (unless
``auto_confirm`` is True). |
1,357 | def dict(self):
metadata = super(ImpactLayerMetadata, self).dict
metadata[] = self.provenance
metadata[] = self.summary_data
return metadata | calls the overridden method and adds provenance and summary data
:return: dictionary representation of the metadata
:rtype: dict |
1,358 | def process_content(self, content, filename=None, content_type=None):
file_path, file_id = self.store_content(content, filename, content_type)
self[] = file_id
self[] = file_path
saved_file = self.file
self[] = saved_file.filename
self[] = saved_file.content_type
self[] = saved_file.last_modified.strftime()
self[] = saved_file.public_url | Standard implementation of :meth:`.DepotFileInfo.process_content`
This is the standard depot implementation of files upload, it will
store the file on the default depot and will provide the standard
attributes.
Subclasses will need to call this method to ensure the standard
set of attributes is provided. |
1,359 | def parse_parameters(self, parameters):
self.parameters = []
for param_name, param_value in parameters.items():
p = Parameter(param_name, param_value)
if p:
self.parameters.append(p) | Parses and sets parameters in the model. |
1,360 | def output_data(self):
c = self.has_output
if c <= 0:
return None
try:
buf = self._pn_transport.peek(c)
except Exception as e:
self._connection_failed(str(e))
return None
return buf | Get a buffer of data that needs to be written to the network. |
1,361 | def cleanUp(self):
file_keys = self.file_keys
for item in file_keys:
if self[item] is not None:
self[item].close()
remove(self[item].name)
if hasattr(self, "_input_filename"):
remove(self._input_filename) | Delete files that are written by CommandLineApplication from disk
WARNING: after cleanUp() you may still have access to part of
your result data, but you should be aware that if the file
size exceeds the size of the buffer you will only have part
of the file. To be safe, you should not use cleanUp() until
you are done with the file or have copied it to a different
location. |
1,362 | def _delete_vlan_profile(self, handle, vlan_id, ucsm_ip):
vlan_name = self.make_vlan_name(vlan_id)
vlan_profile_dest = (const.VLAN_PATH + const.VLAN_PROFILE_PATH_PREFIX +
vlan_name)
try:
obj = handle.query_dn(vlan_profile_dest)
if obj:
handle.remove_mo(obj)
handle.commit()
except Exception as e:
raise cexc.UcsmConfigFailed(config=vlan_id,
ucsm_ip=ucsm_ip, exc=e) | Deletes VLAN Profile from UCS Manager. |
1,363 | async def zrangebylex(self, name, min, max, start=None, num=None):
if (start is not None and num is None) or \
(num is not None and start is None):
raise RedisError("``start`` and ``num`` must both be specified")
pieces = [, name, min, max]
if start is not None and num is not None:
pieces.extend([b(), start, num])
return await self.execute_command(*pieces) | Return the lexicographical range of values from sorted set ``name``
between ``min`` and ``max``.
If ``start`` and ``num`` are specified, then return a slice of the
range. |
1,364 | def save(self):
if self.__session:
self.session.commit()
else:
self.logger.warning("Save called but no session open.") | Save changes |
1,365 | def _get_area_rates(self, source, mmin, mmax=np.inf):
points = list(source)
for point in points:
self._get_point_rates(point, mmin, mmax) | Adds the rates from the area source by discretising the source
to a set of point sources
:param source:
Area source as instance of :class:
openquake.hazardlib.source.area.AreaSource |
1,366 | def delete(self):
args = {: [self.id]}
_perform_command(self.project.owner, , args)
del self.project.owner.tasks[self.id] | Delete the task.
>>> from pytodoist import todoist
>>> user = todoist.login('[email protected]', 'password')
>>> project = user.get_project('Homework')
>>> task = project.add_task('Read Chapter 4')
>>> task.delete() |
1,367 | def _validate_jp2c(self, boxes):
jp2h_lst = [idx for (idx, box) in enumerate(boxes)
if box.box_id == ]
jp2h_idx = jp2h_lst[0]
jp2c_lst = [idx for (idx, box) in enumerate(boxes)
if box.box_id == ]
if len(jp2c_lst) == 0:
msg = ("A codestream box must be defined in the outermost "
"list of boxes.")
raise IOError(msg)
jp2c_idx = jp2c_lst[0]
if jp2h_idx >= jp2c_idx:
msg = "The codestream box must be preceeded by a jp2 header box."
raise IOError(msg) | Validate the codestream box in relation to other boxes. |
1,368 | def print_stream(file, name):
logger = logging.getLogger(.format(name))
for line in file:
logger.info(.format(name, line.strip())) | Print stream from file to logger. |
1,369 | def accept_record(self, record):
with db.session.begin_nested():
req = InclusionRequest.get(self.id, record.id)
if req is None:
raise InclusionRequestMissingError(community=self,
record=record)
req.delete()
self.add_record(record)
self.last_record_accepted = datetime.utcnow() | Accept a record for inclusion in the community.
:param record: Record object. |
1,370 | def add_arg(self,arg, prepend=False):
self.args = [arg_.strip() for arg_ in self.args if arg_.strip()]
if arg.title() not in self.args:
if prepend:
self.args = [arg.title()] + self.args
else:
self.args.append(arg.title()) | Append an arg to the arg list |
1,371 | def _from_sql(self, soql):
assert not self.soql, "DonSELECT (.*) FROM (\w+)\b(.*)$Invalid SQL: %s,COUNT()\b\w+$ expr{}&&(&).....')[:-1]
for scrumb in root_crumbs:
subroots.setdefault(scrumb, {})
subroots = subroots[scrumb]
self.aliases.append(alias)
self.fields.append(field) | Create Force.com SOQL tree structure from SOQL |
1,372 | def organizer(self):
rule = getConstant()
org = {
: _(),
: {: _(), : _()},
: {: , : },
: None,
}
def updateForMonth(self, org):
if self.month:
org.update({
: _(month_name[self.month]),
: {: _(month_name[self.month]), : self.month},
: % self.month,
})
return org
def updateForSession(self, org):
if self.session:
org.update({
: self.session.name,
: {: _(self.session.name), : _(self.session.name)},
: self.session.pk,
})
return org
if rule in [, ]:
org = updateForSession(self, org)
if not org.get():
org = updateForMonth(self, org)
elif rule == :
org = updateForMonth(self, org)
elif rule in [,]:
org = updateForSession(self, org)
elif rule in [,]:
if self.session and self.month:
org.update({
: _( % (month_name[self.month], self.session.name)),
: {: _(month_name[self.month]), : self.month},
: {: _(self.session.name), : _(self.session.name)},
: % (self.month, self.session.pk),
})
elif not self.month:
org = updateForSession(self, org)
elif not self.session:
org = updateForMonth(self, org)
elif rule == :
w = self.weekday
d = day_name[w]
if w is not None:
org.update({
: _(d),
: {: _(d), : w},
: w,
})
elif rule == :
w = self.weekday
d = day_name[w]
m = self.month
mn = month_name[m]
if w is not None and m:
org.update({
: _( % (d, mn)),
: {: _(mn), : m},
: {: _( % d), : w},
: % (m, w)
})
return org | Since events can be organized for registration in different ways (e.g. by month,
by session, or the interaction of the two), this property is used to make it easy
for templates to include necessary organizing information. Note that this method
has nothing to do with the sorting of any queryset in use, which still has to be
handled elsewhere. |
1,373 | def _get_timestamp(dirname_full, remove):
record_filename = os.path.join(dirname_full, RECORD_FILENAME)
if not os.path.exists(record_filename):
return None
mtime = os.stat(record_filename).st_mtime
mtime_str = datetime.fromtimestamp(mtime)
print(.format(dirname_full, mtime_str))
if Settings.record_timestamp and remove:
OLD_TIMESTAMPS.add(record_filename)
return mtime | Get the timestamp from the timestamp file.
Optionally mark it for removal if we're going to write another one. |
1,374 | def set_attributes(self, **attributes_dict):
for attr_name, attr_value in attributes_dict.items():
self.set_attr(attr_name, attr_value)
return self | Set the value of multiple attributes.
:param attributes_dict dict: a dictionary containing key-value pairs as attribute names and values to be set
:returns: the resource itself |
1,375 | def convert_predict_response(pred, serving_bundle):
output = pred.outputs[serving_bundle.predict_output_tensor]
raw_output = output.float_val
if serving_bundle.model_type == :
values = []
for example_index in range(output.tensor_shape.dim[0].size):
start = example_index * output.tensor_shape.dim[1].size
values.append(raw_output[start:start + output.tensor_shape.dim[1].size])
else:
values = raw_output
return convert_prediction_values(values, serving_bundle, pred.model_spec) | Converts a PredictResponse to ClassificationResponse or RegressionResponse.
Args:
pred: PredictResponse to convert.
serving_bundle: A `ServingBundle` object that contains the information about
the serving request that the response was generated by.
Returns:
A ClassificationResponse or RegressionResponse. |
1,376 | def cbv_decorator(function_decorator):
def class_decorator(View):
View.dispatch = method_decorator(function_decorator)(View.dispatch)
return View
return class_decorator | Allows a function-based decorator to be used on a CBV. |
1,377 | def delete(self, symbol, date_range=None):
query = {SYMBOL: symbol}
date_range = to_pandas_closed_closed(date_range)
if date_range is not None:
assert date_range.start and date_range.end
query[START] = {: date_range.start}
query[END] = {: date_range.end}
else:
self._metadata.delete_one({SYMBOL: symbol})
return self._collection.delete_many(query) | Delete all chunks for a symbol.
Which are, for the moment, fully contained in the passed in
date_range.
Parameters
----------
symbol : `str`
symbol name for the item
date_range : `date.DateRange`
DateRange to delete ticks in |
1,378 | def generate_aead(hsm, args, password):
try:
pw = password.ljust(args.min_len, chr(0x0))
return hsm.generate_aead_simple(args.nonce.decode(), args.key_handle, pw)
except pyhsm.exception.YHSM_CommandFailed, e:
if e.status_str == :
print "ERROR: The key handle %s is not permitted to YSM_AEAD_GENERATE." % (args.key_handle)
return None
else:
print "ERROR: %s" % (e.reason) | Generate an AEAD using the YubiHSM. |
1,379 | def get_errors(self):
return [{cr.component_name: cr.get_error()}
for cr in self.component_results if cr.has_error()] | If there were any business errors fetching data for this property,
returns the error messages.
Returns:
string - the error message, or None if there was no error. |
1,380 | def attachedimage_form_factory(lang=, debug=False):
langenru
yui = if debug else
class _AttachedImageAdminForm(forms.ModelForm):
caption = forms.CharField(label=_(), required=False)
class Media:
js = [
,
% (lang, yui,),
,
]
class Meta:
model = AttachedImage
return _AttachedImageAdminForm | Returns ModelForm class to be used in admin.
'lang' is the language for GearsUploader (can be 'en' and 'ru' at the
moment). |
1,381 | def log_erase_send(self, target_system, target_component, force_mavlink1=False):
return self.send(self.log_erase_encode(target_system, target_component), force_mavlink1=force_mavlink1) | Erase all logs
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t) |
1,382 | def _set_distribute_list(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=distribute_list.distribute_list, is_container=, presence=False, yang_name="distribute-list", rest_name="distribute-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u}}, namespace=, defining_module=, yang_type=, is_config=False)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__distribute_list = t
if hasattr(self, ):
self._set() | Setter method for distribute_list, mapped from YANG variable /bgp_state/neighbor/evpn/distribute_list (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_distribute_list is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_distribute_list() directly. |
1,383 | def normalize_genotypes(genotypes):
genotypes = genotypes.genotypes
return (genotypes - np.nanmean(genotypes)) / np.nanstd(genotypes) | Normalize the genotypes.
Args:
genotypes (Genotypes): The genotypes to normalize.
Returns:
numpy.array: The normalized genotypes. |
1,384 | def size(self):
old = self.__file.tell()
self.__file.seek(0, 2)
n_bytes = self.__file.tell()
self.__file.seek(old)
return n_bytes | Calculate and return the file size in bytes. |
1,385 | def allocate(self):
self.lock.acquire()
try:
id_ = self.next_id
self.next_id += 1
return id_
finally:
self.lock.release() | Arrange for a unique context ID to be allocated and associated with a
route leading to the active context. In masters, the ID is generated
directly, in children it is forwarded to the master via a
:data:`mitogen.core.ALLOCATE_ID` message. |
1,386 | def ungettext(self):
if six.PY2:
return self._translations.ungettext
else:
return self._translations.ngettext | Dispatch to the appropriate ngettext method to handle text objects.
Note that under python 3, this uses `ngettext()`, while under python 2,
it uses `ungettext()`. This should not be used with bytestrings. |
1,387 | def get_stacked_rnn(config: RNNConfig, prefix: str,
parallel_inputs: bool = False,
layers: Optional[Iterable[int]] = None) -> mx.rnn.SequentialRNNCell:
rnn = mx.rnn.SequentialRNNCell() if not parallel_inputs else SequentialRNNCellParallelInput()
if not layers:
layers = range(config.num_layers)
for layer_idx in layers:
cell_prefix = "%sl%d_" % (prefix, layer_idx)
if config.cell_type == C.LSTM_TYPE:
if config.dropout_recurrent > 0.0:
cell = RecurrentDropoutLSTMCell(num_hidden=config.num_hidden, prefix=cell_prefix,
forget_bias=config.forget_bias, dropout=config.dropout_recurrent)
else:
cell = mx.rnn.LSTMCell(num_hidden=config.num_hidden, prefix=cell_prefix, forget_bias=config.forget_bias)
elif config.cell_type == C.LNLSTM_TYPE:
cell = LayerNormLSTMCell(num_hidden=config.num_hidden, prefix=cell_prefix, forget_bias=config.forget_bias)
elif config.cell_type == C.LNGLSTM_TYPE:
cell = LayerNormPerGateLSTMCell(num_hidden=config.num_hidden, prefix=cell_prefix,
forget_bias=config.forget_bias)
elif config.cell_type == C.GRU_TYPE:
cell = mx.rnn.GRUCell(num_hidden=config.num_hidden, prefix=cell_prefix)
elif config.cell_type == C.LNGRU_TYPE:
cell = LayerNormGRUCell(num_hidden=config.num_hidden, prefix=cell_prefix)
elif config.cell_type == C.LNGGRU_TYPE:
cell = LayerNormPerGateGRUCell(num_hidden=config.num_hidden, prefix=cell_prefix)
else:
raise NotImplementedError()
if config.dropout_inputs > 0 or config.dropout_states > 0:
cell = VariationalDropoutCell(cell,
dropout_inputs=config.dropout_inputs,
dropout_states=config.dropout_states)
if config.lhuc:
cell = LHUCCell(cell, config.num_hidden, config.dtype)
if config.residual and layer_idx + 1 >= config.first_residual_layer:
cell = mx.rnn.ResidualCell(cell) if not parallel_inputs else ResidualCellParallelInput(cell)
elif parallel_inputs:
cell = ParallelInputCell(cell)
rnn.add(cell)
return rnn | Returns (stacked) RNN cell given parameters.
:param config: rnn configuration.
:param prefix: Symbol prefix for RNN.
:param parallel_inputs: Support parallel inputs for the stacked RNN cells.
:param layers: Specify which layers to create as a list of layer indexes.
:return: RNN cell. |
1,388 | def euler_matrix(ai, aj, ak, axes=):
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes]
except (AttributeError, KeyError):
_TUPLE2AXES[axes]
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i + parity]
k = _NEXT_AXIS[i - parity + 1]
if frame:
ai, ak = ak, ai
if parity:
ai, aj, ak = -ai, -aj, -ak
si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak)
ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak)
cc, cs = ci * ck, ci * sk
sc, ss = si * ck, si * sk
M = np.identity(4)
if repetition:
M[i, i] = cj
M[i, j] = sj * si
M[i, k] = sj * ci
M[j, i] = sj * sk
M[j, j] = -cj * ss + cc
M[j, k] = -cj * cs - sc
M[k, i] = -sj * ck
M[k, j] = cj * sc + cs
M[k, k] = cj * cc - ss
else:
M[i, i] = cj * ck
M[i, j] = sj * sc - cs
M[i, k] = sj * cc + ss
M[j, i] = cj * sk
M[j, j] = sj * ss + cc
M[j, k] = sj * cs - sc
M[k, i] = -sj
M[k, j] = cj * si
M[k, k] = cj * ci
return M | Return homogeneous rotation matrix from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> R = euler_matrix(1, 2, 3, 'syxz')
>>> np.allclose(np.sum(R[0]), -1.34786452)
True
>>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1))
>>> np.allclose(np.sum(R[0]), -0.383436184)
True
>>> ai, aj, ak = (4*math.pi) * (np.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R = euler_matrix(ai, aj, ak, axes)
>>> for axes in _TUPLE2AXES.keys():
... R = euler_matrix(ai, aj, ak, axes) |
1,389 | def codeComplete(self, path, line, column, unsaved_files=None,
include_macros=False, include_code_patterns=False,
include_brief_comments=False):
options = 0
if include_macros:
options += 1
if include_code_patterns:
options += 2
if include_brief_comments:
options += 4
if unsaved_files is None:
unsaved_files = []
unsaved_files_array = 0
if len(unsaved_files):
unsaved_files_array = (_CXUnsavedFile * len(unsaved_files))()
for i,(name,value) in enumerate(unsaved_files):
if not isinstance(value, str):
value = value.read()
print(value)
if not isinstance(value, str):
raise TypeError()
unsaved_files_array[i].name = c_string_p(name)
unsaved_files_array[i].contents = c_string_p(value)
unsaved_files_array[i].length = len(value)
ptr = conf.lib.clang_codeCompleteAt(self, path, line, column,
unsaved_files_array, len(unsaved_files), options)
if ptr:
return CodeCompletionResults(ptr)
return None | Code complete in this translation unit.
In-memory contents for files can be provided by passing a list of pairs
as unsaved_files, the first items should be the filenames to be mapped
and the second should be the contents to be substituted for the
file. The contents may be passed as strings or file objects. |
1,390 | def _wmi_to_ts(self, wmi_ts):
year, month, day, hour, minute, second, microsecond, tz = to_time(wmi_ts)
tz_delta = timedelta(minutes=int(tz))
if in wmi_ts:
tz_delta = -tz_delta
dt = (
datetime(year=year, month=month, day=day, hour=hour, minute=minute, second=second, microsecond=microsecond)
+ tz_delta
)
return int(calendar.timegm(dt.timetuple())) | Convert a wmi formatted timestamp into an epoch. |
1,391 | def yaml_get_data(filename):
with open(filename, ) as fd:
yaml_data = yaml.load(fd)
return yaml_data
return False | Get data from .yml file |
1,392 | def get_environment_paths(config, env):
if env is None:
return config.get(Config.DEFAULTS, )
if config.has_option(Config.ENVIRONMENTS, env):
env = config.get(Config.ENVIRONMENTS, env).replace(, ).split()
else:
env = os.getenv(env)
if env:
env = env.split(os.pathsep)
return [i for i in env if i] | Get environment paths from given environment variable. |
1,393 | def memoize(fn):
memo = {}
@wraps(fn)
def wrapper(*args, **kwargs):
if not memoize.disabled:
key = pickle.dumps((args, kwargs))
if key not in memo:
memo[key] = fn(*args, **kwargs)
value = memo[key]
else:
value = fn(*args, **kwargs)
return value
return wrapper | Caches previous calls to the function. |
1,394 | def _cumprod(l):
ret = [1]
for item in l:
ret.append(ret[-1] * item)
return ret | Cumulative product of a list.
Args:
l: a list of integers
Returns:
a list with one more element (starting with 1) |
1,395 | def create_mirror_settings(repo_url):
cwd = os.getcwd()
settings_path = os.path.join(cwd, "settings.xml")
settings_file = None
try:
settings_file = open(settings_path, "w")
settings_file.write()
settings_file.write()
settings_file.write()
settings_file.write()
settings_file.write()
settings_file.write()
settings_file.write()
settings_file.write( % repo_url)
settings_file.write()
settings_file.write()
settings_file.write()
settings_file.write()
finally:
if settings_file:
settings_file.close()
return settings_path | Creates settings.xml in current working directory, which when used makes Maven use given repo URL as a mirror of all
repositories to look at.
:param repo_url: the repository URL to use
:returns: filepath to the created file |
1,396 | def verify(full, dataset_uri):
dataset = dtoolcore.DataSet.from_uri(dataset_uri)
all_okay = True
generated_manifest = dataset.generate_manifest()
generated_identifiers = set(generated_manifest["items"].keys())
manifest_identifiers = set(dataset.identifiers)
for i in generated_identifiers.difference(manifest_identifiers):
message = "Unknown item: {} {}".format(
i,
generated_manifest["items"][i]["relpath"]
)
click.secho(message, fg="red")
all_okay = False
for i in manifest_identifiers.difference(generated_identifiers):
message = "Missing item: {} {}".format(
i,
dataset.item_properties(i)["relpath"]
)
click.secho(message, fg="red")
all_okay = False
for i in manifest_identifiers.intersection(generated_identifiers):
generated_hash = generated_manifest["items"][i]["size_in_bytes"]
manifest_hash = dataset.item_properties(i)["size_in_bytes"]
if generated_hash != manifest_hash:
message = "Altered item size: {} {}".format(
i,
dataset.item_properties(i)["relpath"]
)
click.secho(message, fg="red")
all_okay = False
if full:
for i in manifest_identifiers.intersection(generated_identifiers):
generated_hash = generated_manifest["items"][i]["hash"]
manifest_hash = dataset.item_properties(i)["hash"]
if generated_hash != manifest_hash:
message = "Altered item hash: {} {}".format(
i,
dataset.item_properties(i)["relpath"]
)
click.secho(message, fg="red")
all_okay = False
if not all_okay:
sys.exit(1)
else:
click.secho("All good :)", fg="green") | Verify the integrity of a dataset. |
1,397 | def sanitize_block(self, block):
embed_type = block.get(, None)
data = block.get(, {})
serializer = self.serializers.get(embed_type, None)
if serializer is None:
return block
block[] = serializer.to_internal_value(data)
return block | Santizes the data for the given block.
If block has a matching embed serializer, use the `to_internal_value` method. |
1,398 | def wallet_unlock(self, wallet, password):
wallet = self._process_value(wallet, )
payload = {"wallet": wallet, "password": password}
resp = self.call(, payload)
return resp[] == | Unlocks **wallet** using **password**
:param wallet: Wallet to unlock
:type wallet: str
:param password: Password to enter
:type password: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.wallet_unlock(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F",
... password="test"
... )
True |
1,399 | def set_option(name, option):
ffi.lib.LLVMPY_SetCommandLine(_encode_string(name),
_encode_string(option)) | Set the given LLVM "command-line" option.
For example set_option("test", "-debug-pass=Structure") would display
all optimization passes when generating code. |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.