Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
2,500 | def check_file_for_tabs(filename, verbose=True):
file_contains_tabs = False
with open(filename) as f:
lines = f.read().split("\n")
line_no = 1
for line in lines:
if "\t" in line:
file_contains_tabs = True
location = [
i for i in range(len(line)) if line.startswith(, i)]
if verbose:
Console.error("Tab found in line {} and column(s) {}"
.format(line_no,
str(location).replace("[", "").replace(
"]", "")),
traceflag=False)
line_no += 1
return file_contains_tabs | identifies if the file contains tabs and returns True if it
does. It also prints the location of the lines and columns. If
verbose is set to False, the location is not printed.
:param verbose: if true prints information about issues
:param filename: the filename
:rtype: True if there are tabs in the file |
2,501 | def new_filename(data, file_kind, ext):
nb_key = file_kind + "number"
if nb_key not in data.keys():
data[nb_key] = -1
if not data["override externals"]:
file_exists = True
while file_exists:
data[nb_key] = data[nb_key] + 1
filename, name = _gen_filename(data, nb_key, ext)
file_exists = os.path.isfile(filename)
else:
data[nb_key] = data[nb_key] + 1
filename, name = _gen_filename(data, nb_key, ext)
if data["rel data path"]:
rel_filepath = posixpath.join(data["rel data path"], name)
else:
rel_filepath = name
return filename, rel_filepath | Returns an available filename.
:param file_kind: Name under which numbering is recorded, such as 'img' or
'table'.
:type file_kind: str
:param ext: Filename extension.
:type ext: str
:returns: (filename, rel_filepath) where filename is a path in the
filesystem and rel_filepath is the path to be used in the tex
code. |
2,502 | def indent(text: str, num: int = 2) -> str:
lines = text.splitlines()
return "\n".join(indent_iterable(lines, num=num)) | Indent a piece of text. |
2,503 | def ligolw_add(xmldoc, urls, non_lsc_tables_ok = False, verbose = False, contenthandler = DefaultContentHandler):
for n, url in enumerate(urls):
if verbose:
print >>sys.stderr, "%d/%d:" % (n + 1, len(urls)),
utils.load_url(url, verbose = verbose, xmldoc = xmldoc, contenthandler = contenthandler)
if not non_lsc_tables_ok and lsctables.HasNonLSCTables(xmldoc):
raise ValueError("non-LSC tables found. Use --non-lsc-tables-ok to force")
reassign_ids(xmldoc, verbose = verbose)
if verbose:
print >>sys.stderr, "merging elements ..."
merge_ligolws(xmldoc)
merge_compatible_tables(xmldoc)
return xmldoc | An implementation of the LIGO LW add algorithm. urls is a list of
URLs (or filenames) to load, xmldoc is the XML document tree to
which they should be added. |
2,504 | def load_extra_emacs_page_navigation_bindings():
registry = ConditionalRegistry(Registry(), EmacsMode())
handle = registry.add_binding
handle(Keys.ControlV)(scroll_page_down)
handle(Keys.PageDown)(scroll_page_down)
handle(Keys.Escape, )(scroll_page_up)
handle(Keys.PageUp)(scroll_page_up)
return registry | Key bindings, for scrolling up and down through pages.
This are separate bindings, because GNU readline doesn't have them. |
2,505 | def terminate_bits(self, payload):
data_capacity = tables.data_capacity[self.version][self.error][0]
if len(payload) > data_capacity:
raise ValueError(
)
if len(payload) == data_capacity:
return None
elif len(payload) <= data_capacity-4:
bits = self.binary_string(0,4)
else:
bits = self.binary_string(0, data_capacity - len(payload))
return bits | This method adds zeros to the end of the encoded data so that the
encoded data is of the correct length. It returns a binary string
containing the bits to be added. |
2,506 | def status(self):
status_list = []
for platform in self._config.platforms.instances:
instance_name = platform[]
driver_name = self.name
provisioner_name = self._config.provisioner.name
scenario_name = self._config.scenario.name
status_list.append(
Status(
instance_name=instance_name,
driver_name=driver_name,
provisioner_name=provisioner_name,
scenario_name=scenario_name,
created=self._created(),
converged=self._converged(),
))
return status_list | Collects the instances state and returns a list.
.. important::
Molecule assumes all instances were created successfully by
Ansible, otherwise Ansible would return an error on create. This
may prove to be a bad assumption. However, configuring Molecule's
driver to match the options passed to the playbook may prove
difficult. Especially in cases where the user is provisioning
instances off localhost.
:returns: list |
2,507 | def walk_egg(egg_dir):
walker = sorted_walk(egg_dir)
base, dirs, files = next(walker)
if in dirs:
dirs.remove()
yield base, dirs, files
for bdf in walker:
yield bdf | Walk an unpacked egg's contents, skipping the metadata directory |
2,508 | def _downloaded_filename(self):
link = self._link() or self._finder.find_requirement(self._req, upgrade=False)
if link:
lower_scheme = link.scheme.lower()
if lower_scheme == or lower_scheme == :
file_path = self._download(link)
return basename(file_path)
elif lower_scheme == :
% (self._req,)) | Download the package's archive if necessary, and return its
filename.
--no-deps is implied, as we have reimplemented the bits that would
ordinarily do dependency resolution. |
2,509 | def operator(func=None, *, pipable=False):
def decorator(func):
bases = (Stream,)
name = func.__name__
module = func.__module__
extra_doc = func.__doc__
doc = extra_doc or f
signature = inspect.signature(func)
parameters = list(signature.parameters.values())
if parameters and parameters[0].name in (, ):
raise ValueError(
)
self_parameter = inspect.Parameter(
, inspect.Parameter.POSITIONAL_OR_KEYWORD)
cls_parameter = inspect.Parameter(
, inspect.Parameter.POSITIONAL_OR_KEYWORD)
original = func
original.__qualname__ = name +
raw = func
raw.__qualname__ = name +
def init(self, *args, **kwargs):
if pipable and args:
assert_async_iterable(args[0])
factory = functools.partial(self.raw, *args, **kwargs)
return Stream.__init__(self, factory)
new_parameters = [self_parameter] + parameters
init.__signature__ = signature.replace(parameters=new_parameters)
init.__qualname__ = name +
init.__name__ =
init.__module__ = module
init.__doc__ = f
if pipable:
def raw(*args, **kwargs):
if args:
assert_async_iterable(args[0])
return func(*args, **kwargs)
raw.__signature__ = signature
raw.__qualname__ = name +
raw.__module__ = module
raw.__doc__ = doc
def pipe(cls, *args, **kwargs):
return lambda source: cls(source, *args, **kwargs)
if parameters and parameters[0].kind in (
inspect.Parameter.POSITIONAL_ONLY,
inspect.Parameter.POSITIONAL_OR_KEYWORD):
new_parameters = [cls_parameter] + parameters[1:]
else:
new_parameters = [cls_parameter] + parameters
pipe.__signature__ = signature.replace(parameters=new_parameters)
pipe.__qualname__ = name +
pipe.__module__ = module
pipe.__doc__ = f
if extra_doc:
pipe.__doc__ += "\n\n " + extra_doc
attrs = {
: init,
: module,
: doc,
: staticmethod(raw),
: staticmethod(original),
: classmethod(pipe) if pipable else None}
return type(name, bases, attrs)
return decorator if func is None else decorator(func) | Create a stream operator from an asynchronous generator
(or any function returning an asynchronous iterable).
Decorator usage::
@operator
async def random(offset=0., width=1.):
while True:
yield offset + width * random.random()
Decorator usage for pipable operators::
@operator(pipable=True)
async def multiply(source, factor):
async with streamcontext(source) as streamer:
async for item in streamer:
yield factor * item
In the case of pipable operators, the first argument is expected
to be the asynchronous iteratable used for piping.
The return value is a dynamically created class.
It has the same name, module and doc as the original function.
A new stream is created by simply instanciating the operator::
xs = random()
ys = multiply(xs, 2)
The original function is called at instanciation to check that
signature match. In the case of pipable operators, the source is
also checked for asynchronous iteration.
The operator also have a pipe class method that can be used along
with the piping synthax::
xs = random()
ys = xs | multiply.pipe(2)
This is strictly equivalent to the previous example.
Other methods are available:
- `original`: the original function as a static method
- `raw`: same as original but add extra checking
The raw method is useful to create new operators from existing ones::
@operator(pipable=True)
def double(source):
return multiply.raw(source, 2) |
2,510 | def list_runids(s3_client, full_path):
listing_finished = False
run_ids_buffer = []
last_continuation_token = None
(bucket, prefix) = split_full_path(full_path)
while not listing_finished:
options = clean_dict({
: bucket,
: prefix,
: ,
: last_continuation_token
})
response = s3_client.list_objects_v2(**options)
keys = [extract_run_id(key[]) for key
in response.get(, [])]
run_ids_buffer.extend([key for key in keys if key is not None])
last_continuation_token = response.get(, None)
if not response[]:
listing_finished = True
non_archived_run_ids = [run_id for run_id in run_ids_buffer
if not is_glacier(s3_client, bucket, run_id)]
return non_archived_run_ids | Return list of all run ids inside S3 folder. It does not respect
S3 pagination (`MaxKeys`) and returns **all** keys from bucket
and won't list any prefixes with object archived to AWS Glacier
Arguments:
s3_client - boto3 S3 client (not service)
full_path - full valid S3 path to events (such as enriched-archive)
example: s3://acme-events-bucket/main-pipeline/enriched-archive |
2,511 | def configure_settings(settings, environment_settings=True):
changes = 1
iterations = 0
while changes:
changes = 0
app_names = [] + list(settings[])
if environment_settings:
app_names.append()
for app_name in app_names:
import django_autoconfig.contrib
if autoconfig_module_exists(app_name):
module = importlib.import_module("%s.autoconfig" % (app_name,))
elif app_name in django_autoconfig.contrib.CONTRIB_CONFIGS:
module = django_autoconfig.contrib.CONTRIB_CONFIGS[app_name]
else:
continue
changes += merge_dictionaries(
settings,
getattr(module, , {}),
template_special_case=True,
)
changes += merge_dictionaries(
settings,
getattr(module, , {}),
only_defaults=True,
)
for relationship in getattr(module, , []):
changes += relationship.apply_changes(settings)
if iterations >= MAX_ITERATIONS:
raise ImproperlyConfigured(
)
iterations += 1
LOGGER.debug("Autoconfiguration took %d iterations.", iterations) | Given a settings object, run automatic configuration of all
the apps in INSTALLED_APPS. |
2,512 | def _do_refresh_session(self):
if self._session and self._last_session_refresh + self._loop_wait > time.time():
return False
if self._session:
try:
self._client.session.renew(self._session)
except NotFound:
self._session = None
ret = not self._session
if ret:
try:
self._session = self._client.session.create(name=self._scope + + self._name,
checks=self.__session_checks,
lock_delay=0.001, behavior=)
except InvalidSessionTTL:
logger.exception()
self.adjust_ttl()
raise
self._last_session_refresh = time.time()
return ret | :returns: `!True` if it had to create new session |
2,513 | def precedence(item):
try:
mro = item.__class__.__mro__
except AttributeError:
return PRECEDENCE["Atom"]
for i in mro:
n = i.__name__
if n in PRECEDENCE_FUNCTIONS:
return PRECEDENCE_FUNCTIONS[n](item)
elif n in PRECEDENCE_VALUES:
return PRECEDENCE_VALUES[n]
return PRECEDENCE["Atom"] | Returns the precedence of a given object. |
2,514 | def flatten(nested_iterable):
if not isinstance(nested_iterable, (list, tuple)):
yield nested_iterable
else:
for i in nested_iterable:
if isinstance(i, (list, tuple)):
for j in flatten(i):
yield j
else:
yield i | Flattens arbitrarily nested lists/tuples.
Code partially taken from https://stackoverflow.com/a/10824420.
Parameters
----------
nested_iterable
A list or tuple of arbitrarily nested values.
Yields
------
any
Non-list and non-tuple values in `nested_iterable`. |
2,515 | def remove_override(self, key):
keys = key.split()
if len(keys) > 1:
raise NotImplementedError
elif key in self.overrides:
del self.overrides[key]
self._uncache(key) | Remove a setting override, if one exists. |
2,516 | def spielman_wr(self, norm=True):
wr = []
for r in range(self.nsites):
num = 0
den = 0
for i in range(N_CODON):
j = scipy.intersect1d(scipy.where(CODON_SINGLEMUT[i]==True)[0],
scipy.where(CODON_NONSYN[i]==True)[0])
p_i = self.stationarystate[r][i]
P_xy = self.Prxy[r][i][j].sum()
if norm:
P_xy = P_xy/self.omega
Q_xy = self.Qxy[i][j].sum()
num += (p_i * P_xy)
den += (p_i * Q_xy)
result = num/den
wr.append(result)
return wr | Returns a list of site-specific omega values calculated from the `ExpCM`.
Args:
`norm` (bool)
If `True`, normalize the `omega_r` values by the ExpCM
gene-wide `omega`.
Returns:
`wr` (list)
list of `omega_r` values of length `nsites`
Following
`Spielman and Wilke, MBE, 32:1097-1108 <https://doi.org/10.1093/molbev/msv003>`_,
we can predict the `dN/dS` value for each site `r`,
:math:`\\rm{spielman}\\omega_r`, from the `ExpCM`.
When `norm` is `False`, the `omega_r` values are defined as
:math:`\\rm{spielman}\\omega_r = \\frac{\\sum_x \\sum_{y \\in N_x}p_{r,x}\
P_{r,xy}}{\\sum_x \\sum_{y \\in Nx}p_{r,x}Q_{xy}}`,
where `r,x,y`, :math:`p_{r,x}`, :math:`P_{r,xy}`, and :math:`Q_{x,y}`
have the same definitions as in the main `ExpCM` doc string and :math:`N_{x}`
is the set of codons which are non-synonymous to codon `x` and differ from
`x` by one nucleotide.
When `norm` is `True`, the `omega_r` values above are divided by the
ExpCM `omega` value. |
2,517 | def _match_line(self, city_name, lines):
for line in lines:
toponym = line.split()[0]
if toponym.lower() == city_name.lower():
return line.strip()
return None | The lookup is case insensitive and returns the first matching line,
stripped.
:param city_name: str
:param lines: list of str
:return: str |
2,518 | def get_default_config(self):
config = super(rmqHandler, self).get_default_config()
config.update({
: ,
: ,
})
return config | Return the default config for the handler |
2,519 | def setImage(self,
img,
autoRange=True,
useAutoLevels=None,
levels=None,
axes=None,
pos=None,
scale=None,
transform=None,
):
if hasattr(img, ) and img.implements():
img = img.asarray()
if not isinstance(img, np.ndarray):
required = [, , , , , ]
if not all([hasattr(img, attr) for attr in required]):
raise TypeError("Image must be NumPy array or any object "
"that provides compatible attributes/methods:\n"
" %s" % str(required))
self.image = img
self.imageDisp = None
if axes is None:
x, y = (0, 1) if self.imageItem.axisOrder == else (1, 0)
if img.ndim == 2:
self.axes = {: None, : x, : y, : None}
elif img.ndim == 3:
if img.shape[2] <= 4:
self.axes = {: None, : x, : y, : 2}
else:
self.axes = {: 0, : x+ 1, : y+ 1, : None}
elif img.ndim == 4:
self.axes = {: 0, : x+ 1, : y+ 1, : 3}
else:
raise Exception(
"Can not interpret image with dimensions %s" %
(str(img.shape)))
elif isinstance(axes, dict):
self.axes = axes.copy()
elif isinstance(axes, list) or isinstance(axes, tuple):
self.axes = {}
for i in range(len(axes)):
self.axes[axes[i]] = i
else:
raise Exception(
"Can not interpret axis specification %s. "
"Must be like {: 2, : 0, : 1} or "
"(, , , )" % (str(axes)))
for x in [, , , ]:
self.axes[x] = self.axes.get(x, None)
axes = self.axes
self.currentIndex = 0
if levels is None and useAutoLevels:
self._useAutoLevels= useAutoLevels
if levels is not None:
self.setLevels(*levels)
self._updateImage()
self._updateLabelInfo()
self.imageItem.resetTransform()
if scale is not None:
self.imageItem.scale(*scale)
if pos is not None:
self.imageItem.setPos(*pos)
if transform is not None:
self.imageItem.setTransform(transform) | Set the image to be displayed in the widget.
================== ===========================================================================
**Arguments:**
img (numpy array) the image to be displayed. See :func:`ImageItem.setImage` and
*notes* below.
xvals (numpy array) 1D array of z-axis values corresponding to the third axis
in a 3D image. For video, this array should contain the time of each frame.
autoRange (bool) whether to scale/pan the view to fit the image.
useAutoLevels (bool) whether to update the white/black levels to fit the image.
levels (min, max); the white and black level values to use.
axes Dictionary indicating the interpretation for each axis.
This is only needed to override the default guess. Format is::
{'t':0, 'x':1, 'y':2, 'c':3};
pos Change the position of the displayed image
scale Change the scale of the displayed image
transform Set the transform of the displayed image. This option overrides *pos*
and *scale*.
autoHistogramRange If True, the histogram y-range is automatically scaled to fit the
image data.
================== ===========================================================================
**Notes:**
For backward compatibility, image data is assumed to be in column-major order (column, row).
However, most image data is stored in row-major order (row, column) and will need to be
transposed before calling setImage()::
imageview.setImage(imagedata.T)
This requirement can be changed by the ``imageAxisOrder``
:ref:`global configuration option <apiref_config>`. |
2,520 | def regularizer(name, regularization_fn, name_filter=):
regex = re.compile(name_filter)
def fn(var_name, variable, phase):
if phase is pt.Phase.train and regex.search(var_name):
with tf.name_scope(None, name, [variable]):
loss = regularization_fn(variable)
if loss is not None:
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, loss)
return variable
return fn | Wraps a regularizer in a parameter-function.
Args:
name: The name scope for this regularizer.
regularization_fn: A function with signature:
fn(variable) -> loss `Tensor` or `None`.
name_filter: A regex that will be used to filter variables by name.
Returns:
A parameter modification function that adds the loss to the
REGULARIZATION_LOSSES graph key. |
2,521 | def concat_batch_variantcalls(items, region_block=True, skip_jointcheck=False):
items = [utils.to_single_data(x) for x in items]
batch_name = _get_batch_name(items, skip_jointcheck)
variantcaller = _get_batch_variantcaller(items)
if not variantcaller and all(d.get("vrn_file") for d in items):
return {"vrn_file": items[0]["vrn_file"]}
out_file = os.path.join(dd.get_work_dir(items[0]), variantcaller, "%s.vcf.gz" % (batch_name))
utils.safe_makedir(os.path.dirname(out_file))
if region_block:
regions = [_region_to_coords(rs[0]) for rs in items[0]["region_block"]]
else:
regions = [_region_to_coords(r) for r in items[0]["region"]]
vrn_file_regions = items[0]["vrn_file_region"]
out_file = vcfutils.concat_variant_files(vrn_file_regions, out_file, regions,
dd.get_ref_file(items[0]), items[0]["config"])
return {"vrn_file": out_file} | CWL entry point: combine variant calls from regions into single VCF. |
2,522 | def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
C = self.COEFFS[imt]
C_PGA = self.COEFFS[PGA()]
imt_per = 0 if imt.name == else imt.period
pga_rock = self._get_pga_on_rock(C_PGA, rup, dists)
mean = (self._get_magnitude_scaling_term(C, rup) +
self._get_path_scaling(C, dists, rup.mag) +
self._get_site_scaling(C, pga_rock, sites, imt_per, dists.rjb))
stddevs = self._get_stddevs(C, rup, dists, sites, stddev_types)
return mean, stddevs | See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values. |
2,523 | def process_large_file(self, local_file, parent):
file_content_sender = FileUploader(self.settings.config, self.settings.data_service, local_file,
self.settings.watcher, self.settings.file_upload_post_processor)
remote_id = file_content_sender.upload(self.settings.project_id, parent.kind, parent.remote_id)
local_file.set_remote_id_after_send(remote_id) | Upload a single file using multiple processes to upload multiple chunks at the same time.
Updates local_file with it's remote_id when done.
:param local_file: LocalFile: file we are uploading
:param parent: LocalFolder/LocalProject: parent of the file |
2,524 | def get_variant_type(variant_source):
file_type = get_file_type(variant_source)
variant_type =
if file_type == :
variants = VCF(variant_source)
elif file_type == :
variants = GeminiQuery(variant_source)
gemini_query = "SELECT * from variants"
variants.run(gemini_query)
for i,variant in enumerate(variants):
if file_type == :
if variant.is_snp:
variant_type =
elif file_type == :
if variant[] == :
variant_type =
if i > 1000:
break
return variant_type | Try to find out what type of variants that exists in a variant source
Args:
variant_source (str): Path to variant source
source_mode (str): 'vcf' or 'gemini'
Returns:
variant_type (str): 'sv' or 'snv' |
2,525 | def twoQ_gates(self):
two_q_gates = []
for node in self.gate_nodes():
if len(node.qargs) == 2:
two_q_gates.append(node)
return two_q_gates | Get list of 2-qubit gates. Ignore snapshot, barriers, and the like. |
2,526 | def get_build_controllers(self, name=None):
query_parameters = {}
if name is not None:
query_parameters[] = self._serialize.query(, name, )
response = self._send(http_method=,
location_id=,
version=,
query_parameters=query_parameters)
return self._deserialize(, self._unwrap_collection(response)) | GetBuildControllers.
Gets controller, optionally filtered by name
:param str name:
:rtype: [BuildController] |
2,527 | def _assert_path_is_rw(self):
if not self.path:
raise ValueError("`path` argument must be set!")
if not os.path.exists(self.path):
raise IOError("`%s` not found." % self.path)
if not os.path.isdir(self.path):
raise IOError("`%s` is not a directory!" % self.path)
if not os.access(self.path, (os.R_OK or os.W_OK)):
raise IOError(
"Can't access `%s`, please check permissions." % self.path
) | Make sure, that `self.path` exists, is directory a readable/writeable.
Raises:
IOError: In case that any of the assumptions failed.
ValueError: In case that `self.path` is not set. |
2,528 | def clone(self, name=None):
if name is None:
name = self.module_name + "_clone"
return MLP(
name=name,
output_sizes=self.output_sizes,
activation=self.activation,
activate_final=self.activate_final,
initializers=self.initializers,
partitioners=self.partitioners,
regularizers=self.regularizers,
use_bias=self.use_bias,
use_dropout=self.use_dropout) | Creates a new MLP with the same structure.
Args:
name: Optional string specifying the name of the new module. The default
name is constructed by appending "_clone" to the original name.
Returns:
A cloned `MLP` module. |
2,529 | def _GetShowID(self, showName):
self._GetTitleList()
self._GetIDList()
for index, showTitle in enumerate(self._showTitleList):
if showName == showTitle:
return self._showIDList[index]
return None | Get epguides show id for a given show name.
Attempts to match the given show name against a show title in
self._showTitleList and, if found, returns the corresponding index
in self._showIDList.
Parameters
----------
showName : string
Show name to get show ID for.
Returns
----------
int or None
If a show id is found this will be returned, otherwise None is returned. |
2,530 | def writePIDFile(self):
with self._jobStore.writeSharedFileStream() as f:
f.write(str(os.getpid()).encode()) | Write a the pid of this process to a file in the jobstore.
Overwriting the current contents of pid.log is a feature, not a bug of this method.
Other methods will rely on always having the most current pid available.
So far there is no reason to store any old pids. |
2,531 | def proximity_metric(self, a, b):
res = 1
for ap, bp, n in zip(a.path_parts, b.path_parts, list(range(4))):
res += ap == bp
if n >= 3:
break
return res | Return the weight of the dependency from a to b. Higher weights
usually have shorter straighter edges. Return 1 if it has normal
weight. A value of 4 is usually good for ensuring that a related
pair of modules are drawn next to each other.
Returns an int between 1 (unknown, default), and 4 (very related). |
2,532 | def taskfileinfo_descriptor_data(tfi, role):
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
return tfi.descriptor | Return the data for descriptor
:param tfi: the :class:`jukeboxcore.filesys.TaskFileInfo` holds the data
:type tfi: :class:`jukeboxcore.filesys.TaskFileInfo`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the descriptor
:rtype: depending on role
:raises: None |
2,533 | def import_apps_submodule(submodule):
found_apps = []
for appconfig in apps.get_app_configs():
app = appconfig.name
if import_module_or_none(.format(app, submodule)) is not None:
found_apps.append(app)
return found_apps | Look for a submodule is a series of packages, e.g. ".pagetype_plugins" in all INSTALLED_APPS. |
2,534 | def promote(self, content):
for n, v in content.data:
if isinstance(v, list):
content.data = v
return
content.data = [] | Promote (replace) the content.data with the first attribute
of the current content.data that is a I{list}. Note: the
content.data may be empty or contain only _x attributes.
In either case, the content.data is assigned an empty list.
@param content: An array content.
@type content: L{Content} |
2,535 | def _set_pos(self, pos):
if self._canvas.height < self._max_height:
pos *= self._max_height - self._canvas.height + 1
pos = int(round(max(0, pos), 0))
self._canvas.scroll_to(pos) | Set current position for scroll bar. |
2,536 | def match(tgt, opts=None):
if not opts:
opts = __opts__
nodegroups = opts.get(, {})
matchers = salt.loader.matchers(opts)
if not isinstance(tgt, six.string_types) and not isinstance(tgt, (list, tuple)):
log.error()
return False
log.debug(, opts[], tgt)
ref = {: ,
: ,
: ,
: ,
: ,
: None,
: ,
: }
if HAS_RANGE:
ref[] =
results = []
opers = [, , , , ]
if isinstance(tgt, six.string_types):
words = tgt.split()
else:
words = tgt[:]
while words:
word = words.pop(0)
target_info = salt.utils.minions.parse_target(word)
if word in opers:
if results:
if results[-1] == and word in (, ):
log.error(, word)
return False
if word == :
if not results[-1] in (, , ):
results.append()
results.append(word)
else:
if word not in [, ]:
log.error(, word)
return False
results.append(word)
elif target_info and target_info[]:
if == target_info[]:
decomposed = salt.utils.minions.nodegroup_comp(target_info[], nodegroups)
if decomposed:
words = decomposed + words
continue
engine = ref.get(target_info[])
if not engine:
log.error(
, target_info[], word
)
return False
engine_args = [target_info[]]
engine_kwargs = {}
if target_info[]:
engine_kwargs[] = target_info[]
results.append(
six.text_type(matchers[.format(engine)](*engine_args, **engine_kwargs))
)
else:
results.append(six.text_type(matchers[](word)))
results = .join(results)
log.debug(, opts[], tgt, results)
try:
return eval(results)
except Exception:
log.error(
, tgt, results)
return False
return False | Runs the compound target check |
2,537 | def autofit(ts, maxp=5, maxd=2, maxq=5, sc=None):
assert sc != None, "Missing SparkContext"
jmodel = sc._jvm.com.cloudera.sparkts.models.ARIMA.autoFit(_py2java(sc, Vectors.dense(ts)), maxp, maxd, maxq)
return ARIMAModel(jmodel=jmodel, sc=sc) | Utility function to help in fitting an automatically selected ARIMA model based on approximate
Akaike Information Criterion (AIC) values. The model search is based on the heuristic
developed by Hyndman and Khandakar (2008) and described in [[http://www.jstatsoft
.org/v27/i03/paper]]. In contrast to the algorithm in the paper, we use an approximation to
the AIC, rather than an exact value. Note that if the maximum differencing order provided
does not suffice to induce stationarity, the function returns a failure, with the appropriate
message. Additionally, note that the heuristic only considers models that have parameters
satisfying the stationarity/invertibility constraints. Finally, note that our algorithm is
slightly more lenient than the original heuristic. For example, the original heuristic
rejects models with parameters "close" to violating stationarity/invertibility. We only
reject those that actually violate it.
This functionality is even less mature than some of the other model fitting functions here, so
use it with caution.
Parameters
----------
ts:
time series to which to automatically fit an ARIMA model as a Numpy array
maxP:
limit for the AR order
maxD:
limit for differencing order
maxQ:
limit for the MA order
sc:
The SparkContext, required.
returns an ARIMAModel |
2,538 | def _record_field_to_json(fields, row_value):
record = {}
isdict = isinstance(row_value, dict)
for subindex, subfield in enumerate(fields):
subname = subfield.name
if isdict:
subvalue = row_value.get(subname)
else:
subvalue = row_value[subindex]
record[subname] = _field_to_json(subfield, subvalue)
return record | Convert a record/struct field to its JSON representation.
Args:
fields ( \
Sequence[:class:`~google.cloud.bigquery.schema.SchemaField`], \
):
The :class:`~google.cloud.bigquery.schema.SchemaField`s of the
record's subfields to use for type conversion and field names.
row_value (Union[Tuple[Any], Mapping[str, Any]):
A tuple or dictionary to convert to JSON-serializable values.
Returns:
Mapping[str, any]:
A JSON-serializable dictionary. |
2,539 | async def handle_user_exception(self, error: Exception) -> Response:
if isinstance(error, HTTPException) and not self.trap_http_exception(error):
return await self.handle_http_exception(error)
handler = self._find_exception_handler(error)
if handler is None:
raise error
return await handler(error) | Handle an exception that has been raised.
This should forward :class:`~quart.exception.HTTPException` to
:meth:`handle_http_exception`, then attempt to handle the
error. If it cannot it should reraise the error. |
2,540 | def excepthook (self, etype, evalue, etb):
self.inner_excepthook (etype, evalue, etb)
if issubclass (etype, KeyboardInterrupt):
signal.signal (signal.SIGINT, signal.SIG_DFL)
os.kill (os.getpid (), signal.SIGINT) | Handle an uncaught exception. We always forward the exception on to
whatever `sys.excepthook` was present upon setup. However, if the
exception is a KeyboardInterrupt, we additionally kill ourselves with
an uncaught SIGINT, so that invoking programs know what happened. |
2,541 | def _maybe_parse_configurable_reference(self):
if self._current_token.value != :
return False, None
location = self._current_location()
self._advance_one_token()
scoped_name = self._parse_selector(allow_periods_in_scope=True)
evaluate = False
if self._current_token.value == :
evaluate = True
self._advance()
if self._current_token.value != :
self._raise_syntax_error("Expected .")
self._advance_one_token()
self._skip_whitespace_and_comments()
with utils.try_with_location(location):
reference = self._delegate.configurable_reference(scoped_name, evaluate)
return True, reference | Try to parse a configurable reference (@[scope/name/]fn_name[()]). |
2,542 | def size(dtype):
dtype = tf.as_dtype(dtype)
if hasattr(dtype, ):
return dtype.size
return np.dtype(dtype).itemsize | Returns the number of bytes to represent this `dtype`. |
2,543 | def process_nxml_str(nxml_str, citation=None, offline=False,
output_fname=default_output_fname):
if offline:
if not try_offline:
logger.error()
return None
try:
api_ruler = reach_reader.get_api_ruler()
except ReachOfflineReadingError as e:
logger.error(e)
logger.error(
)
return None
try:
result_map = api_ruler.annotateNxml(nxml_str, )
except JavaException as e:
logger.error()
logger.error(e)
return None
json_str = result_map.get()
if not json_str:
json_str = result_map.get()
if json_str is None:
logger.warning()
return None
if isinstance(json_str, bytes):
json_str = json_str.decode()
return process_json_str(json_str, citation)
else:
data = {: nxml_str}
try:
res = requests.post(reach_nxml_url, data)
except requests.exceptions.RequestException as e:
logger.error()
logger.error(e)
return None
if res.status_code != 200:
logger.error(
+ % res.status_code)
return None
json_str = res.text
with open(output_fname, ) as fh:
fh.write(json_str.encode())
return process_json_str(json_str, citation) | Return a ReachProcessor by processing the given NXML string.
NXML is the format used by PubmedCentral for papers in the open
access subset.
Parameters
----------
nxml_str : str
The NXML string to be processed.
citation : Optional[str]
A PubMed ID passed to be used in the evidence for the extracted INDRA
Statements. Default: None
offline : Optional[bool]
If set to True, the REACH system is ran offline. Otherwise (by default)
the web service is called. Default: False
output_fname : Optional[str]
The file to output the REACH JSON output to.
Defaults to reach_output.json in current working directory.
Returns
-------
rp : ReachProcessor
A ReachProcessor containing the extracted INDRA Statements
in rp.statements. |
2,544 | def loudest_triggers_from_cli(opts, coinc_parameters=None,
sngl_parameters=None, bank_parameters=None):
bin_results = []
ifos = opts.sngl_trigger_files.keys()
bins_idx, bank_data = bank_bins_from_cli(opts)
bin_names = bins_idx.keys()
if opts.statmap_file and opts.bank_file and opts.sngl_trigger_files:
for bin_name in bin_names:
data = {}
statmap = hdf.ForegroundTriggers(
opts.statmap_file, opts.bank_file,
sngl_files=opts.sngl_trigger_files.values(),
n_loudest=opts.search_n_loudest,
group=opts.statmap_group)
template_hash = statmap.get_bankfile_array("template_hash")
stat = statmap.get_coincfile_array("stat")
bin_idx = numpy.in1d(template_hash,
bank_data["template_hash"][bins_idx[bin_name]])
sorting = stat[bin_idx].argsort()[::-1]
for p in coinc_parameters:
arr = statmap.get_coincfile_array(p)
data[p] = arr[bin_idx][sorting][:opts.n_loudest]
for p in sngl_parameters:
for ifo in ifos:
key = "/".join([ifo, p])
arr = statmap.get_snglfile_array_dict(p)[ifo]
data[key] = arr[bin_idx][sorting][:opts.n_loudest]
for p in bank_parameters:
arr = statmap.get_bankfile_array(p)
data[p] = arr[bin_idx][sorting][:opts.n_loudest]
bin_results.append(data)
elif opts.bank_file and opts.sngl_trigger_files:
for bin_name in bin_names:
data = {}
if len(opts.sngl_trigger_files.keys()) == 1:
ifo = opts.sngl_trigger_files.keys()[0]
else:
raise ValueError("Too many IFOs")
sngls = hdf.SingleDetTriggers(opts.sngl_trigger_files[ifo],
opts.bank_file, opts.veto_file,
opts.veto_segment_name, None, ifo)
n_loudest = opts.search_n_loudest \
if opts.search_n_loudest else len(sngls.template_id)
sngls.mask_to_n_loudest_clustered_events(n_loudest=n_loudest)
template_hash = \
sngls.bank["template_hash"][:][sngls.template_id]
bin_idx = numpy.in1d(template_hash,
bank_data["template_hash"][bins_idx[bin_name]])
stats = sngls.stat
sorting = stats[bin_idx].argsort()[::-1]
for p in sngl_parameters:
key = "/".join([ifo, p])
arr = sngls.get_column(p)
data[key] = arr[bin_idx][sorting][:opts.n_loudest]
for p in bank_parameters:
arr = sngls.bank[p][:]
data[p] = \
arr[sngls.template_id][bin_idx][sorting][:opts.n_loudest]
bin_results.append(data)
else:
raise ValueError("Must have --bank-file and --sngl-trigger-files")
return bin_names, bin_results | Parses the CLI options related to find the loudest coincident or
single detector triggers.
Parameters
----------
opts : object
Result of parsing the CLI with OptionParser.
coinc_parameters : list
List of datasets in statmap file to retrieve.
sngl_parameters : list
List of datasets in single-detector trigger files to retrieve.
bank_parameters : list
List of datasets in template bank file to retrieve.
Results
-------
bin_names : dict
A list of bin names.
bin_results : dict
A list of dict holding trigger data data. |
2,545 | def do_alarm_definition_patch(mc, args):
fields = {}
fields[] = args.id
if args.name:
fields[] = args.name
if args.description:
fields[] = args.description
if args.expression:
fields[] = args.expression
if args.alarm_actions:
fields[] = _arg_split_patch_update(args.alarm_actions, patch=True)
if args.ok_actions:
fields[] = _arg_split_patch_update(args.ok_actions, patch=True)
if args.undetermined_actions:
fields[] = _arg_split_patch_update(args.undetermined_actions,
patch=True)
if args.actions_enabled:
if args.actions_enabled not in enabled_types:
errmsg = ( +
.join(enabled_types) + )
print(errmsg)
return
fields[] = args.actions_enabled in [, ]
if args.severity:
if not _validate_severity(args.severity):
return
fields[] = args.severity
try:
alarm = mc.alarm_definitions.patch(**fields)
except (osc_exc.ClientException, k_exc.HttpError) as he:
raise osc_exc.CommandError( % (he.message, he.details))
else:
print(jsonutils.dumps(alarm, indent=2)) | Patch the alarm definition. |
2,546 | def _connect(self):
"Create a Unix domain socket connection"
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.socket_timeout)
sock.connect(self.path)
return sock | Create a Unix domain socket connection |
2,547 | def der_cert(der_data):
if isinstance(der_data, str):
der_data = bytes(der_data, )
return x509.load_der_x509_certificate(der_data, default_backend()) | Load a DER encoded certificate
:param der_data: DER-encoded certificate
:return: A cryptography.x509.certificate instance |
2,548 | def shutil_rmtree_onerror(func: Callable[[str], None],
path: str,
exc_info: EXC_INFO_TYPE) -> None:
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
func(path)
else:
exc = exc_info[1]
raise exc | Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage: ``shutil.rmtree(path, onerror=shutil_rmtree_onerror)``
See
https://stackoverflow.com/questions/2656322/shutil-rmtree-fails-on-windows-with-access-is-denied |
2,549 | def output(self,pin,value):
self.mraa_gpio.Gpio.write(self.mraa_gpio.Gpio(pin), value) | Set the specified pin the provided high/low value. Value should be
either 1 (ON or HIGH), or 0 (OFF or LOW) or a boolean. |
2,550 | def console_get_default_background(con: tcod.console.Console) -> Color:
return Color._new_from_cdata(
lib.TCOD_console_get_default_background(_console(con))
) | Return this consoles default background color.
.. deprecated:: 8.5
Use :any:`Console.default_bg` instead. |
2,551 | def check_for_rerun_user_task(self):
data = self.current.input
if in data:
return
current_task = self.workflow.get_tasks(Task.READY)[0]
current_task_type = current_task.task_spec.__class__.__name__
pre_task = current_task.parent
pre_task_type = pre_task.task_spec.__class__.__name__
if pre_task_type != :
return
if current_task_type == :
return
pre_lane = pre_task.task_spec.lane
current_lane = current_task.task_spec.lane
if pre_lane == current_lane:
pre_task._set_state(Task.READY)
current_task._set_state(Task.MAYBE) | Checks that the user task needs to re-run.
If necessary, current task and pre task's states are changed and re-run.
If wf_meta not in data(there is no user interaction from pre-task) and last completed task
type is user task and current step is not EndEvent and there is no lane change,
this user task is rerun. |
2,552 | def mag_cal_progress_encode(self, compass_id, cal_mask, cal_status, attempt, completion_pct, completion_mask, direction_x, direction_y, direction_z):
return MAVLink_mag_cal_progress_message(compass_id, cal_mask, cal_status, attempt, completion_pct, completion_mask, direction_x, direction_y, direction_z) | Reports progress of compass calibration.
compass_id : Compass being calibrated (uint8_t)
cal_mask : Bitmask of compasses being calibrated (uint8_t)
cal_status : Status (see MAG_CAL_STATUS enum) (uint8_t)
attempt : Attempt number (uint8_t)
completion_pct : Completion percentage (uint8_t)
completion_mask : Bitmask of sphere sections (see http://en.wikipedia.org/wiki/Geodesic_grid) (uint8_t)
direction_x : Body frame direction vector for display (float)
direction_y : Body frame direction vector for display (float)
direction_z : Body frame direction vector for display (float) |
2,553 | def middleware_in_executor(middleware):
@wraps(middleware)
def _(environ, start_response):
loop = get_event_loop()
return loop.run_in_executor(None, middleware, environ, start_response)
return _ | Use this middleware to run a synchronous middleware in the event loop
executor.
Useful when using synchronous web-frameworks such as :django:`django <>`. |
2,554 | def and_terms(*args):
args = [arg if not isinstance(arg, list) else .join(arg) for arg in args]
return .format(.join(args)) | Connect given term strings or list(s) of term strings with an AND operator for querying.
Args:
An arbitrary number of either strings or lists of strings representing query terms.
Returns
A query string consisting of argument terms and'ed together. |
2,555 | def filter_by_pattern(self, pattern):
_filt_values, _filt_datetimes = self._filter_by_pattern(pattern)
if self._enumeration is None:
self._get_mutable_enumeration()
col_obj = self._enumeration[][self._collection_type]
collection = col_obj(self.header.duplicate(), _filt_values, _filt_datetimes)
collection._validated_a_period = self._validated_a_period
return collection | Filter the Data Collection based on a list of booleans.
Args:
pattern: A list of True/False values. Typically, this is a list
with a length matching the length of the Data Collections values
but it can also be a pattern to be repeated over the Data Collection.
Return:
A new Data Collection with filtered data |
2,556 | def openXmlDocument(path=None, file_=None, data=None, url=None, mime_type=None):
if path is not None:
file_ = open(path, )
elif file_ is not None:
assert hasattr(file_, )
elif url is not None:
file_ = urllib2.urlopen(url)
if mime_type is None:
mime_type = file_.headers.gettype()
elif data is not None:
file_ = cStringIO.StringIO(data)
assert mime_type is not None
else:
raise ValueError("Either path, file_, data, or url should be provided")
if mime_type is not None:
for class_ in _document_classes:
if class_.canProcessMime(mime_type):
return class_(file_, mime_type=mime_type)
raise ValueError("%s MIME type is unknown." % mime_type)
else:
assert hasattr(file_, )
for class_ in _document_classes:
if class_.canProcessFilename(file_.name):
return class_(file_, mime_type=mime_type)
raise ValueError("Can't guess mime_type. You should set the mime_type param")
return | **Factory function**
Will guess what document type is best suited and return the appropriate
document type.
User must provide either ``path``, ``file_``, ``data`` or ``url`` parameter.
:param path: file path in the local filesystem to a document.
:param file_: a file (like) object to a document (must be opened in 'rb' mode')
:param data: the binary data of a document
:param url: the URL of a document
:param mime_type: mime type if known. One of the known MIME types from :mod:`openxmllib.contenttypes`.
Note that ``mime_tyype`` parameter **must** be provided if you provide the
Open XML document through the ``data`` parameter. Otherwise, if you don't
provide one, we'll try to guess which is the most appropriate using the file
extension.
:return: A subclass of :class:`openxmllib.document.Document`. |
2,557 | def _compute_base_term(self, C, rup, dists):
c1 = self.CONSTS[]
R = np.sqrt(dists.rrup ** 2 + self.CONSTS[] ** 2)
base_term = (C[] +
C[] * ((8.5 - rup.mag) ** 2) +
(C[] + self.CONSTS[] * (rup.mag - c1)) *
np.log(R))
if rup.mag <= c1:
return base_term + self.CONSTS[] * (rup.mag - c1)
else:
return base_term + self.CONSTS[] * (rup.mag - c1) | Compute and return base model term, that is the first term in equation
1, page 74. The calculation of this term is explained in paragraph
'Base Model', page 75. |
2,558 | def p_generate_if_woelse(self, p):
p[0] = IfStatement(p[3], p[5], None, lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | generate_if : IF LPAREN cond RPAREN gif_true_item |
2,559 | def add_x509_key_descriptors(metadata, cert=None, add_encryption=True):
if cert is None or cert == :
return metadata
try:
root = OneLogin_Saml2_XML.to_etree(metadata)
except Exception as e:
raise Exception( + str(e))
assert root.tag == % OneLogin_Saml2_Constants.NS_MD
try:
sp_sso_descriptor = next(root.iterfind(, namespaces=OneLogin_Saml2_Constants.NSMAP))
except StopIteration:
raise Exception()
if add_encryption:
OneLogin_Saml2_Metadata.__add_x509_key_descriptors(sp_sso_descriptor, cert, False)
OneLogin_Saml2_Metadata.__add_x509_key_descriptors(sp_sso_descriptor, cert, True)
return OneLogin_Saml2_XML.to_string(root) | Adds the x509 descriptors (sign/encryption) to the metadata
The same cert will be used for sign/encrypt
:param metadata: SAML Metadata XML
:type metadata: string
:param cert: x509 cert
:type cert: string
:param add_encryption: Determines if the KeyDescriptor[use="encryption"] should be added.
:type add_encryption: boolean
:returns: Metadata with KeyDescriptors
:rtype: string |
2,560 | def __getitem_slice(self, slce):
scaled_indices = (self._step * n for n in slce.indices(self._len))
start_offset, stop_offset, new_step = scaled_indices
return newrange(self._start + start_offset,
self._start + stop_offset,
new_step) | Return a range which represents the requested slce
of the sequence represented by this range. |
2,561 | def validate_config(cls, config):
if "discovery" not in config:
raise ValueError("No discovery method defined.")
installed_balancers = Balancer.get_installed_classes().keys()
if not any([balancer in config for balancer in installed_balancers]):
raise ValueError("No available balancer configs defined.") | Validates a config dictionary parsed from a cluster config file.
Checks that a discovery method is defined and that at least one of
the balancers in the config are installed and available. |
2,562 | def start(token,
control=False,
trigger=,
groups=None,
groups_pillar_name=None,
fire_all=False,
tag=):
if (not token) or (not token.startswith()):
time.sleep(2) | Listen to slack events and forward them to salt, new version |
2,563 | def comparable(self):
string_parts = []
if self.location is not None:
string_parts.append(.format(self.location))
if self.store_index is not None:
string_parts.append(.format(self.store_index))
return self._GetComparable(sub_comparable_string=.join(string_parts)) | str: comparable representation of the path specification. |
2,564 | def set_value(self, dry_wet: LeakSensorState):
if dry_wet == LeakSensorState.DRY:
self._update_subscribers(0x11)
else:
self._update_subscribers(0x13) | Set the state to wet or dry. |
2,565 | def replace_namespaced_deployment_scale(self, name, namespace, body, **kwargs):
kwargs[] = True
if kwargs.get():
return self.replace_namespaced_deployment_scale_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_deployment_scale_with_http_info(name, namespace, body, **kwargs)
return data | replace scale of the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_deployment_scale(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Scale body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1Scale
If the method is called asynchronously,
returns the request thread. |
2,566 | def set_encode_key_value(self, value, store_type=PUBLIC_KEY_STORE_TYPE_BASE64):
if store_type == PUBLIC_KEY_STORE_TYPE_PEM:
PublicKeyBase.set_encode_key_value(self, value.exportKey().decode(), store_type)
else:
PublicKeyBase.set_encode_key_value(self, value.exportKey(), store_type) | Set the value based on the type of encoding supported by RSA. |
2,567 | def _segment_index(self, recarr, existing_index, start, new_segments):
idx_col = self._datetime64_index(recarr)
existing_index_arr = np.frombuffer(decompress(existing_index), dtype=INDEX_DTYPE)
if start > 0:
existing_index_arr = existing_index_arr[existing_index_arr[] < start]
index = np.concatenate((existing_index_arr, index))
return Binary(compress(index.tostring()))
elif existing_index:
raise ArcticException("Could not find datetime64 index in item but existing data contains one")
return None | Generate index of datetime64 -> item offset.
Parameters:
-----------
new_data: new data being written (or appended)
existing_index: index field from the versions document of the previous version
start: first (0-based) offset of the new data
segments: list of offsets. Each offset is the row index of the
the last row of a particular chunk relative to the start of the _original_ item.
array(new_data) - segments = array(offsets in item)
Returns:
--------
Binary(compress(array([(index, datetime)]))
Where index is the 0-based index of the datetime in the DataFrame |
2,568 | def parse_py(s, **kwargs):
nbf = current_nbformat
nbm = current_nbformat_minor
pattern = r
m = re.search(pattern,s)
if m is not None:
digits = m.group().split()
nbf = int(digits[0])
if len(digits) > 1:
nbm = int(digits[1])
return nbf, nbm, s | Parse a string into a (nbformat, string) tuple. |
2,569 | def next_page(self, max_=None):
result = type(self)()
result.after = After(self.last.value)
result.max_ = max_
return result | Return a query set which requests the page after this response.
:param max_: Maximum number of items to return.
:type max_: :class:`int` or :data:`None`
:rtype: :class:`ResultSetMetadata`
:return: A new request set up to request the next page.
Must be called on a result set which has :attr:`last` set. |
2,570 | def create(self, friendly_name=values.unset, sync_service_sid=values.unset):
data = values.of({: friendly_name, : sync_service_sid, })
payload = self._version.create(
,
self._uri,
data=data,
)
return DeploymentInstance(self._version, payload, fleet_sid=self._solution[], ) | Create a new DeploymentInstance
:param unicode friendly_name: A human readable description for this Deployment.
:param unicode sync_service_sid: The unique identifier of the Sync service instance.
:returns: Newly created DeploymentInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.deployment.DeploymentInstance |
2,571 | def migrate_database(adapter):
all_variants = adapter.get_variants()
nr_variants = all_variants.count()
nr_updated = 0
with progressbar(all_variants, label="Updating variants", length=nr_variants) as bar:
for variant in bar:
if in variant:
continue
nr_updated += 1
splitted_id = variant[].split()
chrom = splitted_id[0]
start = int(splitted_id[1])
ref = splitted_id[2]
alt = splitted_id[3]
end = start + (max(len(ref), len(alt)) - 1)
adapter.db.variant.find_one_and_update(
{: variant[]},
{
: {
: chrom,
: start,
: end
}
}
)
return nr_updated | Migrate an old loqusdb instance to 1.0
Args:
adapter
Returns:
nr_updated(int): Number of variants that where updated |
2,572 | async def subscribe(self, *args, **kwargs):
if args:
args = list_or_args(args[0], args[1:])
new_channels = {}
new_channels.update(dict.fromkeys(map(self.encode, args)))
for channel, handler in iteritems(kwargs):
new_channels[self.encode(channel)] = handler
ret_val = await self.execute_command(, *iterkeys(new_channels))
self.channels.update(new_channels)
return ret_val | Subscribe to channels. Channels supplied as keyword arguments expect
a channel name as the key and a callable as the value. A channel's
callable will be invoked automatically when a message is received on
that channel rather than producing a message via ``listen()`` or
``get_message()``. |
2,573 | def calculateHurst(self, series, exponent=None):
rescaledRange = list()
sizeRange = list()
rescaledRangeMean = list()
if(exponent is None):
exponent = self.bestExponent(len(series))
for i in range(0, exponent):
partsNumber = int(math.pow(2, i))
size = int(len(series)/partsNumber)
sizeRange.append(size)
rescaledRange.append(0)
rescaledRangeMean.append(0)
for x in range(0, partsNumber):
start = int(size*(x))
limit = int(size*(x+1))
deviationAcumulative = self.sumDeviation(self.deviation(
series, start, limit, self.mean(series, start, limit)))
deviationsDifference = float(
max(deviationAcumulative) - min(deviationAcumulative))
standartDeviation = self.standartDeviation(
series, start, limit)
if(deviationsDifference != 0 and standartDeviation != 0):
rescaledRange[i] += (deviationsDifference /
standartDeviation)
y = 0
for x in rescaledRange:
rescaledRangeMean[y] = x/int(math.pow(2, y))
y = y+1
rescaledRangeLog = list()
sizeRangeLog = list()
for i in range(0, exponent):
rescaledRangeLog.append(math.log(rescaledRangeMean[i], 10))
sizeRangeLog.append(math.log(sizeRange[i], 10))
slope, intercept = np.polyfit(sizeRangeLog, rescaledRangeLog, 1)
ablineValues = [slope * i + intercept for i in sizeRangeLog]
plt.plot(sizeRangeLog, rescaledRangeLog, )
plt.plot(sizeRangeLog, ablineValues, )
plt.title(slope)
limitUp = 0
if(max(sizeRangeLog) > max(rescaledRangeLog)):
limitUp = max(sizeRangeLog)
else:
limitUp = max(rescaledRangeLog)
limitDown = 0
if(min(sizeRangeLog) > min(rescaledRangeLog)):
limitDown = min(rescaledRangeLog)
else:
limitDown = min(sizeRangeLog)
plt.gca().set_xlim(limitDown, limitUp)
plt.gca().set_ylim(limitDown, limitUp)
print("Hurst exponent: " + str(slope))
plt.show()
return slope | :type series: List
:type exponent: int
:rtype: float |
2,574 | def no_ssl_verification(self):
try:
from functools import partialmethod
except ImportError:
from functools import partial
class partialmethod(partial):
def __get__(self, instance, owner):
if instance is None:
return self
return partial(self.func, instance, *(self.args or ()), **(self.keywords or {}))
old_request = requests.Session.request
requests.Session.request = partialmethod(old_request, verify=False)
warnings.filterwarnings(, )
yield
warnings.resetwarnings()
requests.Session.request = old_request | Requests module fails due to lets encrypt ssl encryption. Will be fixed in the future release. |
2,575 | def train(self, ftrain):
self.coeffs = 0*self.coeffs
upoints, wpoints = self.getQuadraturePointsAndWeights()
try:
fpoints = [ftrain(u) for u in upoints]
except TypeError:
fpoints = ftrain
for ipoly in np.arange(self.N_poly):
inds = tuple(self.index_polys[ipoly])
coeff = 0.0
for (u, q, w) in zip(upoints, fpoints, wpoints):
coeff += eval_poly(u, inds, self.J_list)*q*np.prod(w)
self.coeffs[inds] = coeff
return None | Trains the polynomial expansion.
:param numpy.ndarray/function ftrain: output values corresponding to the
quadrature points given by the getQuadraturePoints method to
which the expansion should be trained. Or a function that should be evaluated
at the quadrature points to give these output values.
*Sample Usage*::
>>> thePC = PolySurrogate(dimensions=2)
>>> thePC.train(myFunc)
>>> predicted_q = thePC.predict([0, 1])
>>> thePC = PolySurrogate(dimensions=2)
>>> U = thePC.getQuadraturePoints()
>>> Q = [myFunc(u) for u in U]
>>> thePC.train(Q)
>>> predicted_q = thePC.predict([0, 1]) |
2,576 | def warning(self, message, *args, **kwargs):
self.system.warning(message, *args, **kwargs) | Log warning event.
Compatible with logging.warning signature. |
2,577 | def searchTriples(expnums,ccd):
import MOPfits,os
import MOPdbaccess
if len(expnums)!=3:
return(-1)
mysql=MOPdbaccess.connect(,,)
bucket=mysql.cursor()
proc_file = open("proc-these-files","w")
proc_file.write("
proc_file.write("
import string
import os.path
filenames=[]
import pyfits
for expnum in expnums:
bucket.execute("SELECT obs_iq_refccd FROM exposure WHERE expnum=%s" , (expnum, ) )
row=bucket.fetchone()
fwhm=row[0]
if not fwhm > 0:
fwhm=1.0
if int(ccd)<18:
cutout="[-*,-*]"
else:
cutout=None
filename=MOPfits.adGet(str(expnum)+"p",extno=int(ccd),cutout=cutout)
print filename
if not os.access(filename,os.R_OK):
return(-3)
filename=os.path.splitext(filename)
filenames.append(filename[0])
proc_file.write("%s %f %s \n" % ( filename[0], fwhm/0.183, "no"))
proc_file.flush()
proc_file.close()
command="find.pl -p -d ./ "
sys.stderr.write(command)
try:
os.system(command)
except:
sys.stderr.write("Failed while running find")
file_extens=[
"cands.comb",
"measure3.cands.astrom",
"measure3.WARNING",
"measure3.astrom.scatter"]
if os.access("find.OK",os.R_OK):
os.system("touch /home/cadc/kavelaar/results/05AQ06B/"+filenames[0]+".OK")
else:
os.system("touch /home/cadc/kavelaar/results/05AQ06B/"+filenames[0]+".FAILED")
import shutil
for ext in file_extens:
if os.access(filenames[0]+"."+ext,os.R_OK):
shutil.copy(filenames[0]+"."+ext,"/home/cadc/kavelaar/results/05AQ06B")
astrom=filenames[0]+".measure3.cands.astrom"
print astrom
cmd = "mpc_gen.pl -c "+astrom
print os.access(astrom,os.R_OK)
if os.access(astrom,os.R_OK):
print cmd
os.system(cmd)
os.system("mpcIngest.pl *.MPC")
os.system("cp *.MPC /home/cadc/kavelaar/results/05AQ06B")
return(1)
return(0) | Given a list of exposure numbers, find all the KBOs in that set of exposures |
2,578 | def sort_index(self, **kwargs):
axis = kwargs.pop("axis", 0)
index = self.columns if axis else self.index
ascending = kwargs.pop("ascending", True)
if ascending is None:
ascending = False
kwargs["ascending"] = ascending
def sort_index_builder(df, **kwargs):
if axis:
df.columns = index
else:
df.index = index
return df.sort_index(axis=axis, **kwargs)
func = self._prepare_method(sort_index_builder, **kwargs)
new_data = self._map_across_full_axis(axis, func)
if axis:
new_columns = pandas.Series(self.columns).sort_values(**kwargs)
new_index = self.index
else:
new_index = pandas.Series(self.index).sort_values(**kwargs)
new_columns = self.columns
return self.__constructor__(
new_data, new_index, new_columns, self.dtypes.copy()
) | Sorts the data with respect to either the columns or the indices.
Returns:
DataManager containing the data sorted by columns or indices. |
2,579 | def __record(self, oid=None):
f = self.__getFileObj(self.dbf)
recordContents = self.__recStruct.unpack(f.read(self.__recStruct.size))
if recordContents[0] != b:
return None
record = []
for (name, typ, size, deci), value in zip(self.fields, recordContents):
if name == :
continue
elif typ in ("N","F"):
value = value.split(b)[0]
value = value.replace(b, b)
if value == b:
value = None
elif deci:
try:
value = float(value)
except ValueError:
value = None
else:
try:
value = int(value)
except ValueError:
try:
value = int(float(value))
except ValueError:
value = None
elif typ == :
if value.count(b) == len(value):
value = None
else:
try:
y, m, d = int(value[:4]), int(value[4:6]), int(value[6:8])
value = date(y, m, d)
except:
value = value.strip()
elif typ == :
if value == b" ":
value = None
else:
if value in b:
value = True
elif value in b:
value = False
else:
value = None
else:
value = u(value, self.encoding, self.encodingErrors)
value = value.strip()
record.append(value)
return _Record(self.__fieldposition_lookup, record, oid) | Reads and returns a dbf record row as a list of values. |
2,580 | def get_song(self, netease=False):
song = self._playlist.get(True)
self.hash_sid[song[]] = True
self.get_netease_song(song, netease)
self._playingsong = song
return song | 获取歌曲, 对外统一接口 |
2,581 | def serve_forever(self):
loop = True
while loop:
loop = self.__serve_forever()
self.end() | Wrapper to the serve_forever function. |
2,582 | def find(cls, *args, **kwargs):
return cls.from_cursor(cls.collection.find(*args, **kwargs)) | Same as ``collection.find``, returns model object instead of dict. |
2,583 | def create(self, fields):
try:
cleaned_fields = {}
for key, value in fields.items():
if type(value) is dict:
try:
if value[] == :
fake_fk = self.fake_fk(value[])
cleaned_fields.update({key: fake_fk})
except:
pass
else:
cleaned_fields.update({key: value})
model_class = self.model_class()
obj = model_class.objects.create(**cleaned_fields)
for key, value in fields.items():
if type(value) is dict:
try:
if value[] == :
self.fake_m2m(obj, value[])
except:
pass
try:
obj.save_m2m()
except:
obj.save()
return obj
except Exception as e:
raise e | Create the object only once.
So, you need loop to usage.
:param `fields` is dictionary fields. |
2,584 | def transfer(self, receiver_address, amount, sender_account):
self._keeper.token.token_approve(receiver_address, amount,
sender_account)
self._keeper.token.transfer(receiver_address, amount, sender_account) | Transfer a number of tokens from `sender_account` to `receiver_address`
:param receiver_address: hex str ethereum address to receive this transfer of tokens
:param amount: int number of tokens to transfer
:param sender_account: Account instance to take the tokens from
:return: bool |
2,585 | def createMemoryParserCtxt(buffer, size):
ret = libxml2mod.xmlCreateMemoryParserCtxt(buffer, size)
if ret is None:raise parserError()
return parserCtxt(_obj=ret) | Create a parser context for an XML in-memory document. |
2,586 | def luks_cleartext_holder(self):
if not self.is_luks:
return None
for device in self._daemon:
if device.luks_cleartext_slave == self:
return device
return None | Get wrapper to the unlocked luks cleartext device. |
2,587 | def run(self, verbose=False):
log = []
modules_copy = dict(sys.modules)
for modname, module in modules_copy.items():
if modname == :
print(modname, module)
print(self.previous_modules)
if modname not in self.previous_modules:
modpath = getattr(module, , None)
if modpath is None:
continue
if not self.is_module_blacklisted(modname, modpath):
log.append(modname)
del sys.modules[modname]
if verbose and log:
print("\x1b[4;33m%s\x1b[24m%s\x1b[0m" % ("UMD has deleted",
": " + ", ".join(log))) | Del user modules to force Python to deeply reload them
Do not del modules which are considered as system modules, i.e.
modules installed in subdirectories of Python interpreter's binary
Do not del C modules |
2,588 | def _cleanup(self, kill, verbose):
if kill:
removed_indices = self.g.prune()
self.nout -= len(removed_indices)
if verbose and removed_indices:
print( % removed_indices)
for j in removed_indices:
self.inv_map.pop(j[0]) | Look for dead components (weight=0) and remove them
if enabled by ``kill``.
Resize storage. Recompute determinant and covariance. |
2,589 | def receive_message(
sock, operation, request_id, max_message_size=MAX_MESSAGE_SIZE):
header = _receive_data_on_socket(sock, 16)
length = _UNPACK_INT(header[:4])[0]
actual_op = _UNPACK_INT(header[12:])[0]
if operation != actual_op:
raise ProtocolError("Got opcode %r but expected "
"%r" % (actual_op, operation))
if request_id is not None:
response_id = _UNPACK_INT(header[8:12])[0]
if request_id != response_id:
raise ProtocolError("Got response id %r but expected "
"%r" % (response_id, request_id))
if length <= 16:
raise ProtocolError("Message length (%r) not longer than standard "
"message header size (16)" % (length,))
if length > max_message_size:
raise ProtocolError("Message length (%r) is larger than server max "
"message size (%r)" % (length, max_message_size))
return _receive_data_on_socket(sock, length - 16) | Receive a raw BSON message or raise socket.error. |
2,590 | def convert_sqlite_to_mysql(
self):
from fundamentals.renderer import list_of_dictionaries
from fundamentals.mysql import directory_script_runner
self.log.debug()
con = lite.connect(self.pathToSqlite)
con.row_factory = lite.Row
cur = con.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type=;")
tables = cur.fetchall()
createStatements = []
inserts = []
for table in tables:
table = table[]
if table == "sqlite_sequence":
continue
cur.execute(
"SELECT sql FROM sqlite_master WHERE name = ;" % locals())
createStatement = cur.fetchone()
createStatement = createStatement[0].replace(, ) + ";"
if "DEFAULT" not in createStatement:
if "primary key(" in createStatement:
tmp = createStatement.split("primary key(")
tmp[0] = tmp[0].replace(
",", " varchar(150) DEFAULT NULL,")
createStatement = ("primary key(").join(tmp)
if "primary key," in createStatement:
tmp = createStatement.split("primary key,")
tmp[1] = tmp[1].replace(
",", " varchar(150) DEFAULT NULL,")
tmp[1] = tmp[1].replace(
");", " varchar(150) DEFAULT NULL);")
createStatement = ("primary key,").join(tmp)
createStatement = createStatement.replace(
"INTEGER PRIMARY KEY", "INTEGER AUTO_INCREMENT PRIMARY KEY")
createStatement = createStatement.replace(
"AUTOINCREMENT", "AUTO_INCREMENT")
createStatement = createStatement.replace(
"DEFAULT ", "DEFAULT ")
createStatement = createStatement.replace(
"DEFAULT ", "DEFAULT ")
createStatement = createStatement.replace(",", ",")
createStatement = createStatement.replace(",", ",")
if "CREATE TABLE `" in createStatement:
createStatement = createStatement.replace(
"CREATE TABLE `", "CREATE TABLE IF NOT EXISTS `" + self.tablePrefix)
else:
createStatement = createStatement.replace(
"CREATE TABLE ", "CREATE TABLE IF NOT EXISTS " + self.tablePrefix)
if ", primary key(" in createStatement:
createStatement = createStatement.replace(", primary key(", )
else:
createStatement = createStatement.replace(");", )
createStatement = createStatement.replace(
" text primary key", " varchar(100) primary key")
createStatement = createStatement.replace(
"`EntryText` TEXT NOT NULL,", "`EntryText` TEXT,")
createStatement = createStatement.replace(
"`SelectionText` TEXT NOT NULL", "`SelectionText` TEXT")
createStatement = createStatement.replace(
"`Filename` INTEGER NOT NULL,", "`Filename` TEXT NOT NULL,")
createStatement = createStatement.replace(
"`SessionPartUUID` TEXT NOT NULL UNIQUE,", "`SessionPartUUID` VARCHAR(100) NOT NULL UNIQUE,")
createStatement = createStatement.replace(
"`Name` TEXT PRIMARY KEY NOT NULL", "`Name` VARCHAR(100) PRIMARY KEY NOT NULL")
createStatement = createStatement.replace(
" VARCHAR ", " VARCHAR(100) ")
createStatement = createStatement.replace(
" VARCHAR,", " VARCHAR(100),")
cur.execute(
"SELECT * from ;" % locals())
rows = cur.fetchall()
allRows = []
for row in rows:
allRows.append(dict(row))
if not os.path.exists("/tmp/headjack/"):
os.makedirs("/tmp/headjack/")
writequery(
log=self.log,
sqlQuery=createStatement,
dbConn=self.dbConn,
)
from fundamentals.mysql import insert_list_of_dictionaries_into_database_tables
insert_list_of_dictionaries_into_database_tables(
dbConn=self.dbConn,
log=self.log,
dictList=allRows,
dbTableName=self.tablePrefix + table,
uniqueKeyList=[],
dateModified=True,
dateCreated=True,
batchSize=10000,
replace=True,
dbSettings=self.settings["database settings"]
)
con.close()
self.log.debug()
return None | *copy the contents of the sqlite database into the mysql database*
See class docstring for usage |
2,591 | def loglike(self):
sum = logp_of_set(self.children)
if self.verbose > 2:
print_( + self._id + , sum)
return sum | The summed log-probability of all stochastic variables that depend on
self.stochastics, with self.stochastics removed. |
2,592 | def _extract_sender(
message: Message, resent_dates: List[Union[str, Header]] = None
) -> str:
if resent_dates:
sender_header = "Resent-Sender"
from_header = "Resent-From"
else:
sender_header = "Sender"
from_header = "From"
if sender_header in message:
sender = message[sender_header]
else:
sender = message[from_header]
return str(sender) if sender else "" | Extract the sender from the message object given. |
2,593 | def grouped_insert(t, value):
collator = Collator.createInstance(Locale(t.lang) if t.lang else Locale())
if value.tail is not None:
val_prev = value.getprevious()
if val_prev is not None:
val_prev.tail = (val_prev.tail or ) + value.tail
else:
val_parent = value.getparent()
if val_parent is not None:
val_parent.text = (val_parent.text or ) + value.tail
value.tail = None
if t.isgroup and t.sort(value) is not None:
if t.groupby:
for child in t.tree:
if child.get() == :
order = collator.compare(
t.groupby(child[1]) or , t.groupby(value) or )
if order == 0:
c_target = Target(child, sort=t.sort, lang=t.lang)
insert_group(value, c_target)
break
elif order > 0:
group = create_group(t.groupby(value))
group.append(value)
child.addprevious(group)
break
else:
group = create_group(t.groupby(value))
group.append(value)
t.tree.append(group)
else:
insert_group(value, t)
elif t.sort and t.sort(value) is not None:
insert_sort(value, t)
elif t.location == :
for child in t.tree:
value.append(child)
value.text = t.tree.text
t.tree.text = None
t.tree.append(value)
elif t.location == :
value.tail = t.tree.tail
t.tree.tail = None
target_parent_descendants = (
[n.getparent() for n in t.parent.iterdescendants() if n == t.tree])
try:
parent = target_parent_descendants[0]
parent.insert(parent.index(t.tree), value)
value.append(t.tree)
except IndexError as e:
logger.error()
raise e
elif t.location == :
value.tail = t.tree.text
t.tree.text = None
t.tree.insert(0, value)
else:
t.tree.append(value) | Insert value into the target tree 't' with correct grouping. |
2,594 | def start(self):
if self._already_running():
message =
sys.stderr.write(message % self.pid_file)
return 0
self.set_gid()
self.set_uid()
self.setup_logging()
self.daemonize()
try:
self.run()
except Exception:
self.logger.exception()
return 1
return 0 | Start the daemon |
2,595 | def retrieve(self, id) :
_, _, task = self.http_client.get("/tasks/{id}".format(id=id))
return task | Retrieve a single task
Returns a single task available to the user according to the unique task ID provided
If the specified task does not exist, this query will return an error
:calls: ``get /tasks/{id}``
:param int id: Unique identifier of a Task.
:return: Dictionary that support attriubte-style access and represent Task resource.
:rtype: dict |
2,596 | def generic_html(self, result, errors):
h1 = htmlize(type(result))
out = []
result = pre_process_json(result)
if not hasattr(result, ):
header = "<tr><th>Value</th></tr>"
if type(result) is list:
result = htmlize_list(result)
else:
result = htmlize(result)
out = ["<tr><td>" + result + "</td></tr>"]
elif hasattr(result, ):
out = ["<tr><td>" + result + "</td></tr>"]
else:
header = "<tr><th>Key</th><th>Value</th></tr>"
for key, value in result.items():
v = htmlize(value)
row = "<tr><td>{0}</td><td>{1}</td></tr>".format(key, v)
out.append(row)
env = Environment(loader=PackageLoader())
template = env.get_template()
rendered = template.render({: h1, : header, : out})
return {: rendered, : } | Try to display any object in sensible HTML. |
2,597 | def _flush(self):
if self._recording:
raise Exception("Cannot flush data queue while recording!")
if self._saving_cache:
logging.warn("Flush when using cache means unsaved data will be lost and not returned!")
self._cmds_q.put(("reset_data_segment",))
else:
data = self._extract_q(0)
return data | Returns a list of all current data |
2,598 | async def sort(self, request, reverse=False):
return sorted(
self.collection, key=lambda o: getattr(o, self.columns_sort, 0), reverse=reverse) | Sort collection. |
2,599 | def execute(self, query_string, params=None):
cr = self.connection.cursor()
logger.info("SQL: %s (%s)", query_string, params)
self.last_query = (query_string, params)
t0 = time.time()
cr.execute(query_string, params or self.core.empty_params)
ms = (time.time() - t0) * 1000
logger.info("RUNTIME: %.2f ms", ms)
self._update_cursor_stats(cr)
return cr | Executes a query. Returns the resulting cursor.
:query_string: the parameterized query string
:params: can be either a tuple or a dictionary, and must match the parameterization style of the
query
:return: a cursor object |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.