Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
28,600 |
def mi(x, y, bins_x=None, bins_y=None, bins_xy=None, method=, units=):
nearest-neighborsgaussianbinbitsnats
try:
if isinstance(x, zip):
x = list(x)
if isinstance(y, zip):
y = list(y)
except:
pass
try:
if len(x.shape) == 1:
x = np.expand_dims(x, 1)
if len(y.shape) == 1:
y = np.expand_dims(y, 1)
except:
pass
HX = entropy(data=x, bins=bins_x, method=method, units=units)
HY = entropy(data=y, bins=bins_y, method=method, units=units)
HXY = entropy(data=np.concatenate([x, y], axis=1), bins=bins_xy, method=method, units=units)
return HX + HY - HXY
|
compute and return the mutual information between x and y
inputs:
-------
x, y: numpy arrays of shape samples x dimension
method: 'nearest-neighbors', 'gaussian', or 'bin'
units: 'bits' or 'nats'
output:
-------
mi: float
Notes:
------
if you are trying to mix several symbols together as in mi(x, (y0,y1,...)), try
info[p] = _info.mi(x, info.combine_symbols(y0, y1, ...) )
|
28,601 |
def get_mapping_client(self, max_concurrency=64, auto_batch=None):
if auto_batch is None:
auto_batch = self.auto_batch
return MappingClient(connection_pool=self.connection_pool,
max_concurrency=max_concurrency,
auto_batch=auto_batch)
|
Returns a thread unsafe mapping client. This client works
similar to a redis pipeline and returns eventual result objects.
It needs to be joined on to work properly. Instead of using this
directly you shold use the :meth:`map` context manager which
automatically joins.
Returns an instance of :class:`MappingClient`.
|
28,602 |
def unzip(self, payload):
zip_with_rel_path = payload.pop()
url = "{url_base}/resource/{pid}/functions/unzip/{path}/".format(
url_base=self.hs.url_base,
path=zip_with_rel_path,
pid=self.pid)
r = self.hs._request(, url, None, payload)
return r
|
Unzips a file
:param payload:
zip_with_rel_path: string
remove_original_zip: boolean
:return: (object)
unzipped_path: string
|
28,603 |
def get_preview_kwargs(self, **kwargs):
if not self.pass_through_kwarg:
return {}
obj = self.get_object()
return {
self.pass_through_kwarg: getattr(obj, self.pass_through_attr)
}
|
Gets the url keyword arguments to pass to the
`preview_view` callable. If the `pass_through_kwarg`
attribute is set the value of `pass_through_attr` will
be looked up on the object.
So if you are previewing an item Obj<id=2> and
::
self.pass_through_kwarg = 'object_id'
self.pass_through_attr = 'pk'
This will return
::
{ 'object_id' : 2 }
|
28,604 |
def _build_cmdargs(argv):
parser = _build_arg_parser()
namespace = parser.parse_args(argv[1:])
cmdargs = vars(namespace)
return cmdargs
|
Build command line arguments dict to use;
- displaying usages
- vint.linting.env.build_environment
This method take an argv parameter to make function pure.
|
28,605 |
def fetch_host_ip_and_country(host: str) -> Tuple:
ip = fetch_host_ip(host)
if not host:
return ,
country = fetch_country_by_ip(ip)
return ip, country
|
Fetch ip and country by host
|
28,606 |
def install(ctx, services, delete_after_install=False):
logger.debug("running command %s (%s)", ctx.command.name, ctx.params,
extra={"command": ctx.command.name, "params": ctx.params})
home = ctx.obj["HOME"]
services_path = os.path.join(home, SERVICES)
installed_all_plugins = True
for service in services:
try:
plugin_utils.install_plugin(service, SERVICE, services_path, register_service)
except exceptions.PluginAlreadyInstalled as exc:
click.echo(exc)
installed_all_plugins = False
if not installed_all_plugins:
raise ctx.exit(errno.EEXIST)
|
Install a honeypot service from the online library, local path or zipfile.
|
28,607 |
def path_to_node(tree, path):
if path is None:
return None
node = tree
for key in path:
node = child_by_key(node, key)
return node
|
FST node located at the given path
|
28,608 |
def parse_individual(self, individual):
scaled_ind = []
for i in range(len(self._params[])):
scaled_ind.append(self._params[][i] + (
individual[i] * self._params[][i]))
fullpars = list(self._params[])
for k in range(len(self._params[])):
for j in range(len(fullpars)):
if fullpars[j] == self._params[][k]:
fullpars[j] = scaled_ind[k]
return fullpars
|
Converts a deap individual into a full list of parameters.
Parameters
----------
individual: deap individual from optimization
Details vary according to type of optimization, but
parameters within deap individual are always between -1
and 1. This function converts them into the values used to
actually build the model
Returns
-------
fullpars: list
Full parameter list for model building.
|
28,609 |
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
C = self.COEFFS_SSLAB[imt]
mag = rup.mag
if mag >= 8.0:
mag = 8.0
G = 10 ** (0.301 - 0.01 * mag)
pga_rock = self._compute_mean(self.COEFFS_SSLAB[PGA()], G, mag,
rup.hypo_depth, dists.rrup, sites.vs30,
np.zeros_like(sites.vs30) + 600,
PGA())
pga_rock = 10 ** (pga_rock)
mean = self._compute_mean(C, G, mag, rup.hypo_depth, dists.rrup,
sites.vs30, pga_rock, imt)
mean = np.log((10 ** mean) * 1e-2 / g)
if imt.period == 4.0:
mean /= 0.550
stddevs = self._get_stddevs(C, stddev_types, sites.vs30.shape[0])
return mean, stddevs
|
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
|
28,610 |
def _read(self, source):
if isinstance(source, str) and is_config(source):
source_fp = StringIO(source)
elif isinstance(source, IOBase) or isinstance(source, StringIO):
source_fp = source
elif os.path.exists(source):
source_fp = open(source)
else:
return False
self._parser.read_file(source_fp)
self._parse_extra(source_fp)
return True
|
Reads and parses the config source
:param file/str source: Config source string, file name, or file pointer. If file name does not exist, it is
ignored.
:return: True if source was successfully read, otherwise False
|
28,611 |
def optimize_batch(self, batchsize=10, returns=, paralell=True):
if returns not in (, ):
raise ValueError()
starts = [np.random.rand(self.m * 2) * 10 for i in range(batchsize)]
if paralell:
with Pool() as p:
results = p.map(self.optimize, starts)
else:
results = map(self.optimize, starts)
results = sorted(results, key=lambda x: x.stress)
return results if returns == else results[0]
|
Run multiple optimizations using different starting coordinates.
Args:
batchsize (`int`): Number of optimizations to run.
returns (`str`): If ``'all'``, return results of all optimizations,
ordered by stress, ascending. If ``'best'`` return the
projection with the lowest stress.
parallel (`bool`): If ``True``, run optimizations in parallel.
Examples:
.. doctest::
>>> import pandas as pd
>>> from pymds import DistanceMatrix
>>> dist = pd.DataFrame({
... 'a': [0.0, 1.0, 2.0],
... 'b': [1.0, 0.0, 3 ** 0.5],
... 'c': [2.0, 3 ** 0.5, 0.0]} , index=['a', 'b', 'c'])
>>> dm = DistanceMatrix(dist)
>>> batch = dm.optimize_batch(batchsize=3, returns='all')
>>> len(batch)
3
>>> type(batch[0])
<class 'pymds.mds.Projection'>
Returns:
`list` or :py:class:`pymds.Projection`:
`list`: Length batchsize, containing instances of
:py:class:`pymds.Projection`. Sorted by stress, ascending.
or
:py:class:`pymds.Projection`: Projection with the lowest
stress.
|
28,612 |
def _try_dump_cnt(self):
now = time.time()
if now - self._last_dump_cnt > 60:
self._last_dump_cnt = now
self._dump_cnt()
self._print_counter_log()
|
Dump counters every 60 seconds
|
28,613 |
def l1_regularizer(weight=1.0, scope=None):
def regularizer(tensor):
with tf.name_scope(scope, , [tensor]):
l1_weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name=)
return tf.multiply(l1_weight, tf.reduce_sum(tf.abs(tensor)), name=)
return regularizer
|
Define a L1 regularizer.
Args:
weight: scale the loss by this factor.
scope: Optional scope for name_scope.
Returns:
a regularizer function.
|
28,614 |
def expand_defaults(schema, features):
schema_names = [x[] for x in schema]
for name, transform in six.iteritems(features):
if not in transform:
transform[] = name
used_schema_columns = []
for name, transform in six.iteritems(features):
if transform[] not in schema_names:
raise ValueError(
% (transform[], name))
used_schema_columns.append(transform[])
for col_schema in schema:
schema_name = col_schema[]
schema_type = col_schema[].lower()
if schema_type not in constant.NUMERIC_SCHEMA + [constant.STRING_SCHEMA]:
raise ValueError((
% .join(constant.NUMERIC_SCHEMA + [constant.STRING_SCHEMA])))
if schema_name not in used_schema_columns:
if schema_type in constant.NUMERIC_SCHEMA:
features[schema_name] = {
: constant.DEFAULT_NUMERIC_TRANSFORM,
: schema_name}
elif schema_type == constant.STRING_SCHEMA:
features[schema_name] = {
: constant.DEFAULT_CATEGORICAL_TRANSFORM,
: schema_name}
else:
raise NotImplementedError( % schema_type)
|
Add to features any default transformations.
Not every column in the schema has an explicit feature transformation listed
in the featurs file. For these columns, add a default transformation based on
the schema's type. The features dict is modified by this function call.
After this function call, every column in schema is used in a feature, and
every feature uses a column in the schema.
Args:
schema: schema list
features: features dict
Raises:
ValueError: if transform cannot be applied given schema type.
|
28,615 |
def get_groupby_statistic(data):
if data.name is not None and data.name in _VALUE_COUNTS_MEMO:
return _VALUE_COUNTS_MEMO[data.name]
value_counts_with_nan = data.value_counts(dropna=False)
value_counts_without_nan = value_counts_with_nan.reset_index().dropna().set_index().iloc[:,0]
distinct_count_with_nan = value_counts_with_nan.count()
if value_counts_without_nan.index.inferred_type == "mixed":
raise TypeError()
result = [value_counts_without_nan, distinct_count_with_nan]
if data.name is not None:
_VALUE_COUNTS_MEMO[data.name] = result
return result
|
Calculate value counts and distinct count of a variable (technically a Series).
The result is cached by column name in a global variable to avoid recomputing.
Parameters
----------
data : Series
The data type of the Series.
Returns
-------
list
value count and distinct count
|
28,616 |
def filter(func):
def expand_kv(kv):
return func(*kv)
def filter_values(value):
cls = type(value)
if isinstance(value, dict):
return cls(_filter(expand_kv, value.items()))
else:
return cls(_filter(func, value))
return transform(filter_values)
|
Filters out unwanted items using the specified function.
Supports both dicts and sequences, key/value pairs are
expanded when applied to a dict.
|
28,617 |
def coarse_grain(self, user_sets):
r
(tpt_sets, Aindexes, Bindexes) = self._compute_coarse_sets(user_sets)
nnew = len(tpt_sets)
F_coarse = tptapi.coarsegrain(self._gross_flux, tpt_sets)
Fnet_coarse = tptapi.to_netflux(F_coarse)
pstat_coarse = np.zeros((nnew))
forward_committor_coarse = np.zeros((nnew))
backward_committor_coarse = np.zeros((nnew))
for i in range(0, nnew):
I = list(tpt_sets[i])
muI = self._mu[I]
pstat_coarse[i] = np.sum(muI)
partialI = muI / pstat_coarse[i]
forward_committor_coarse[i] = np.dot(partialI, self._qplus[I])
backward_committor_coarse[i] = np.dot(partialI, self._qminus[I])
res = ReactiveFlux(Aindexes, Bindexes, Fnet_coarse, mu=pstat_coarse,
qminus=backward_committor_coarse, qplus=forward_committor_coarse, gross_flux=F_coarse)
return (tpt_sets, res)
|
r"""Coarse-grains the flux onto user-defined sets.
Parameters
----------
user_sets : list of int-iterables
sets of states that shall be distinguished in the coarse-grained flux.
Returns
-------
(sets, tpt) : (list of int-iterables, tpt-object)
sets contains the sets tpt is computed on. The tpt states of the new
tpt object correspond to these sets of states in this order. Sets might
be identical, if the user has already provided a complete partition that
respects the boundary between A, B and the intermediates. If not, Sets
will have more members than provided by the user, containing the
"remainder" states and reflecting the splitting at the A and B
boundaries.
tpt contains a new tpt object for the coarse-grained flux. All its
quantities (gross_flux, net_flux, A, B, committor, backward_committor)
are coarse-grained to sets.
Notes
-----
All user-specified sets will be split (if necessary) to
preserve the boundary between A, B and the intermediate
states.
|
28,618 |
def serve_static(filename=None, prefix=, basedir=None):
if not basedir:
basedir = os.path.join(os.path.dirname(__file__), )
return send_from_directory(os.path.join(basedir, prefix), filename)
|
Handler for static files: server filename in basedir/prefix.
If not specified then basedir defaults to the local third_party
directory.
|
28,619 |
def get_translation(self, lang, field):
key = self._get_translation_cache_key(lang, field)
trans = cache.get(key, )
if not trans:
trans_obj = self.get_translation_obj(lang, field)
trans = getattr(trans_obj, , )
cache.set(key, trans)
return trans
|
Return the translation string of an specific field in a Translatable
istance
@type lang: string
@param lang: a string with the name of the language
@type field: string
@param field: a string with the name that we try to get
@rtype: string
@return: Returns a translation string
|
28,620 |
def est_propensity(self, lin=, qua=None):
lin_terms = parse_lin_terms(self.raw_data[], lin)
qua_terms = parse_qua_terms(self.raw_data[], qua)
self.propensity = Propensity(self.raw_data, lin_terms, qua_terms)
self.raw_data._dict[] = self.propensity[]
self._post_pscore_init()
|
Estimates the propensity scores given list of covariates to
include linearly or quadratically.
The propensity score is the conditional probability of
receiving the treatment given the observed covariates.
Estimation is done via a logistic regression.
Parameters
----------
lin: string or list, optional
Column numbers (zero-based) of variables of
the original covariate matrix X to include
linearly. Defaults to the string 'all', which
uses whole covariate matrix.
qua: list, optional
Tuples indicating which columns of the original
covariate matrix to multiply and include. E.g.,
[(1,1), (2,3)] indicates squaring the 2nd column
and including the product of the 3rd and 4th
columns. Default is to not include any
quadratic terms.
|
28,621 |
def expand_time(str_time, default_unit=, multiplier=1):
parser = re.compile(r)
parts = parser.findall(str_time)
result = 0.0
for value, unit in parts:
value = int(value)
unit = unit.lower()
if unit == :
unit = default_unit
if unit == :
result += value * 0.001
continue
elif unit == :
result += value
continue
elif unit == :
result += value * 60
continue
elif unit == :
result += value * 60 * 60
continue
elif unit == :
result += value * 60 * 60 * 24
continue
elif unit == :
result += value * 60 * 60 * 24 * 7
continue
else:
raise ValueError(
"String contains unsupported unit %s: %s" % (unit, str_time))
return int(result * multiplier)
|
helper for above functions
|
28,622 |
def form_valid(self, form, formsets):
auto_tags, changed_tags, old_tags = tag_handler.get_tags_from_data(
form.data, self.get_tags(instance))
tag_handler.set_auto_tags_for_form(form, auto_tags)
with transaction.commit_on_success():
self.object = self.save_form(form)
self.save_formsets(form, formsets, auto_tags=auto_tags)
url = self.get_object_url()
self.log_action(self.object, CMSLog.SAVE, url=url)
msg = self.write_message()
if not new_object and changed_tags and old_tags:
tag_handler.update_changed_tags(changed_tags, old_tags)
return self.success_response(msg)
|
Response for valid form. In one transaction this will
save the current form and formsets, log the action
and message the user.
Returns the results of calling the `success_response` method.
|
28,623 |
def addRnaQuantMetadata(self, fields):
self._featureSetIds = fields["feature_set_ids"].split()
self._description = fields["description"]
self._name = fields["name"]
self._biosampleId = fields.get("biosample_id", "")
if fields["read_group_ids"] == "":
self._readGroupIds = []
else:
self._readGroupIds = fields["read_group_ids"].split()
if fields["programs"] == "":
self._programs = []
else:
self._programs = []
|
data elements are:
Id, annotations, description, name, readGroupId
where annotations is a comma separated list
|
28,624 |
def bind(nodemask):
mask = set_to_numa_nodemask(nodemask)
bitmask = libnuma.numa_allocate_nodemask()
libnuma.copy_nodemask_to_bitmask(byref(mask), bitmask)
libnuma.numa_bind(bitmask)
libnuma.numa_bitmask_free(bitmask)
|
Binds the current thread and its children to the nodes specified in nodemask.
They will only run on the CPUs of the specified nodes and only be able to allocate memory from them.
@param nodemask: node mask
@type nodemask: C{set}
|
28,625 |
def cover_update(self, photo, **kwds):
result = self._client.album.cover_update(self, photo, **kwds)
self._replace_fields(result.get_fields())
self._update_fields_with_objects()
|
Endpoint: /album/<album_id>/cover/<photo_id>/update.json
Update the cover photo of this album.
|
28,626 |
def transcription(ref, est, **kwargs):
rreference.jamsestimated.jamspitch_contournote_hz
namespace =
ref = coerce_annotation(ref, namespace)
est = coerce_annotation(est, namespace)
ref_intervals, ref_p = ref.to_interval_values()
est_intervals, est_p = est.to_interval_values()
ref_pitches = np.asarray([p[] * (-1)**(~p[]) for p in ref_p])
est_pitches = np.asarray([p[] * (-1)**(~p[]) for p in est_p])
return mir_eval.transcription.evaluate(
ref_intervals, ref_pitches, est_intervals, est_pitches, **kwargs)
|
r'''Note transcription evaluation
Parameters
----------
ref : jams.Annotation
Reference annotation object
est : jams.Annotation
Estimated annotation object
kwargs
Additional keyword arguments
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
See Also
--------
mir_eval.transcription.evaluate
Examples
--------
>>> # Load in the JAMS objects
>>> ref_jam = jams.load('reference.jams')
>>> est_jam = jams.load('estimated.jams')
>>> # Select the first relevant annotations. You can use any annotation
>>> # type that can be converted to pitch_contour (such as pitch_midi)
>>> ref_ann = ref_jam.search(namespace='pitch_contour')[0]
>>> est_ann = est_jam.search(namespace='note_hz')[0]
>>> scores = jams.eval.transcription(ref_ann, est_ann)
|
28,627 |
def iso_register(iso_code):
def wrapper(cls):
registry.register(iso_code, cls)
return cls
return wrapper
|
Registers Calendar class as country or region in IsoRegistry.
Registered country must set class variables ``iso`` using this decorator.
>>> from workalendar.core import Calendar
>>> @iso_register('MC-MR')
>>> class MyRegion(Calendar):
>>> 'My Region'
Region calendar is then retrievable from registry:
>>> calendar = registry.get_calendar_class('MC-MR')
|
28,628 |
def get_success_url(self):
messages.success(self.request, self.success_message)
if self.object.is_topic_head and self.object.is_topic_tail:
return reverse(
,
kwargs={
: self.object.topic.forum.slug, : self.object.topic.forum.pk,
},
)
return reverse(
,
kwargs={
: self.object.topic.forum.slug,
: self.object.topic.forum.pk,
: self.object.topic.slug,
: self.object.topic.pk,
},
)
|
Returns the URL to redirect the user to upon valid form processing.
|
28,629 |
def replace_param_occurrences(string, params):
for k, v in params.items():
string = string.replace(k, str(v))
return string
|
replace occurrences of the tuning params with their current value
|
28,630 |
def IsFile(v):
try:
if v:
v = str(v)
return os.path.isfile(v)
else:
raise FileInvalid()
except TypeError:
raise FileInvalid()
|
Verify the file exists.
>>> os.path.basename(IsFile()(__file__)).startswith('validators.py')
True
>>> with raises(FileInvalid, 'not a file'):
... IsFile()("random_filename_goes_here.py")
>>> with raises(FileInvalid, 'Not a file'):
... IsFile()(None)
|
28,631 |
def update_ca_bundle(
target=None,
source=None,
opts=None,
merge_files=None,
):
if opts is None:
opts = {}
if target is None:
target = get_ca_bundle(opts)
if target is None:
log.error()
return
if source is None:
source = opts.get(, )
log.debug(, source, target)
query(
source,
text=True,
decode=False,
headers=False,
status=False,
text_out=target
)
if merge_files is not None:
if isinstance(merge_files, six.string_types):
merge_files = [merge_files]
if not isinstance(merge_files, list):
log.error(
)
return
merge_content =
for cert_file in merge_files:
if os.path.exists(cert_file):
log.debug(
,
cert_file, target
)
try:
with salt.utils.files.fopen(cert_file, ) as fcf:
merge_content = .join((merge_content, fcf.read()))
except IOError as exc:
log.error(
,
cert_file, exc
)
if merge_content:
log.debug(, target)
try:
with salt.utils.files.fopen(target, ) as tfp:
tfp.write()
tfp.write(merge_content)
except IOError as exc:
log.error(
,
target, exc
)
|
Attempt to update the CA bundle file from a URL
If not specified, the local location on disk (``target``) will be
auto-detected, if possible. If it is not found, then a new location on disk
will be created and updated.
The default ``source`` is:
http://curl.haxx.se/ca/cacert.pem
This is based on the information at:
http://curl.haxx.se/docs/caextract.html
A string or list of strings representing files to be appended to the end of
the CA bundle file may also be passed through as ``merge_files``.
|
28,632 |
def union_q(token):
query = Q()
operation =
negation = False
for t in token:
if type(t) is ParseResults:
query &= union_q(t)
else:
if t in (, ):
operation = t
elif t == :
negation = True
else:
if negation:
t = ~t
if operation == :
query |= t
else:
query &= t
return query
|
Appends all the Q() objects.
|
28,633 |
def _get_error(self, code, errors, indentation=0):
results = []
lines = None
if code is not None:
lines = [line.strip() for line in code.split()]
for error in errors.split():
error = error.strip()
if not error:
continue
linenr, error = self._parse_error(error)
if None in (linenr, lines):
results.append( % error)
else:
results.append( % (linenr, error))
if linenr > 0 and linenr < len(lines):
results.append( % lines[linenr - 1])
results = [ * indentation + r for r in results]
return .join(results)
|
Get error and show the faulty line + some context
Other GLIR implementations may omit this.
|
28,634 |
def new(self, string=None, *args, **kwargs):
if len(self):
hobj = _ChainedHashAlgorithm(self, *args, **kwargs)
else:
hobj = _NopHashAlgorithm(*args, **kwargs)
if string is not None:
hobj.update(string)
return hobj
|
Returns a `_ChainedHashAlgorithm` if the underlying tuple
(specifying the list of algorithms) is not empty, otherwise a
`_NopHashAlgorithm` instance is returned.
|
28,635 |
def extract_subjects(cert_pem):
primary_str, subject_info_xml = d1_common.cert.x509.extract_subjects(cert_pem)
equivalent_set = {
primary_str,
d1_common.const.SUBJECT_AUTHENTICATED,
d1_common.const.SUBJECT_PUBLIC,
}
if subject_info_xml is not None:
equivalent_set |= d1_common.cert.subject_info.extract_subjects(
subject_info_xml, primary_str
)
return primary_str, equivalent_set
|
Extract subjects from a DataONE PEM (Base64) encoded X.509 v3 certificate.
Args:
cert_pem: str or bytes
PEM (Base64) encoded X.509 v3 certificate
Returns:
2-tuple:
- The primary subject string, extracted from the certificate DN.
- A set of equivalent identities, group memberships and inferred symbolic
subjects extracted from the SubjectInfo (if present.)
- All returned subjects are DataONE compliant serializations.
- A copy of the primary subject is always included in the set of equivalent
identities.
|
28,636 |
def playlist(self, playlist_id, *, include_songs=False):
playlist_info = next(
(
playlist
for playlist in self.playlists(include_songs=include_songs)
if playlist[] == playlist_id
),
None
)
return playlist_info
|
Get information about a playlist.
Parameters:
playlist_id (str): A playlist ID.
include_songs (bool, Optional): Include songs from
the playlist in the returned dict.
Default: ``False``
Returns:
dict: Playlist information.
|
28,637 |
def validatePrepare(self, prepare: Prepare, sender: str) -> bool:
key = (prepare.viewNo, prepare.ppSeqNo)
primaryStatus = self.isPrimaryForMsg(prepare)
ppReq = self.getPrePrepare(*key)
if self.isMsgFromPrimary(prepare, sender):
raise SuspiciousNode(sender, Suspicions.PR_FRM_PRIMARY, prepare)
if primaryStatus is False:
if self.prepares.hasPrepareFrom(prepare, sender):
raise SuspiciousNode(
sender, Suspicions.DUPLICATE_PR_SENT, prepare)
if not ppReq:
self.enqueue_prepare(prepare, sender)
self._setup_last_ordered_for_non_master()
return False
if primaryStatus is True:
if self.prepares.hasPrepareFrom(prepare, sender):
raise SuspiciousNode(
sender, Suspicions.DUPLICATE_PR_SENT, prepare)
elif not ppReq:
raise SuspiciousNode(
sender, Suspicions.UNKNOWN_PR_SENT, prepare)
if primaryStatus is None and not ppReq:
self.enqueue_prepare(prepare, sender)
self._setup_last_ordered_for_non_master()
return False
if prepare.digest != ppReq.digest:
raise SuspiciousNode(sender, Suspicions.PR_DIGEST_WRONG, prepare)
elif prepare.stateRootHash != ppReq.stateRootHash:
raise SuspiciousNode(sender, Suspicions.PR_STATE_WRONG,
prepare)
elif prepare.txnRootHash != ppReq.txnRootHash:
raise SuspiciousNode(sender, Suspicions.PR_TXN_WRONG,
prepare)
elif prepare.auditTxnRootHash != ppReq.auditTxnRootHash:
raise SuspiciousNode(sender, Suspicions.PR_AUDIT_TXN_ROOT_HASH_WRONG,
prepare)
try:
self.execute_hook(ReplicaHooks.VALIDATE_PR, prepare, ppReq)
except Exception as ex:
self.logger.warning(
.
format(self, ReplicaHooks.VALIDATE_PR, ex))
raise SuspiciousNode(sender, Suspicions.PR_PLUGIN_EXCEPTION,
prepare)
self._bls_bft_replica.validate_prepare(prepare, sender)
return True
|
Return whether the PREPARE specified is valid.
:param prepare: the PREPARE to validate
:param sender: the name of the node that sent the PREPARE
:return: True if PREPARE is valid, False otherwise
|
28,638 |
def create_from_tree(cls, repo, tree, message, parent_commits=None, head=False, author=None, committer=None,
author_date=None, commit_date=None):
if parent_commits is None:
try:
parent_commits = [repo.head.commit]
except ValueError:
parent_commits = []
else:
for p in parent_commits:
if not isinstance(p, cls):
raise ValueError("Parent commit must be of type %s" % (p, cls))
cr = repo.config_reader()
env = os.environ
committer = committer or Actor.committer(cr)
author = author or Actor.author(cr)
unix_time = int(time())
is_dst = daylight and localtime().tm_isdst > 0
offset = altzone if is_dst else timezone
author_date_str = env.get(cls.env_author_date, )
if author_date:
author_time, author_offset = parse_date(author_date)
elif author_date_str:
author_time, author_offset = parse_date(author_date_str)
else:
author_time, author_offset = unix_time, offset
committer_date_str = env.get(cls.env_committer_date, )
if commit_date:
committer_time, committer_offset = parse_date(commit_date)
elif committer_date_str:
committer_time, committer_offset = parse_date(committer_date_str)
else:
committer_time, committer_offset = unix_time, offset
enc_section, enc_option = cls.conf_encoding.split()
conf_encoding = cr.get_value(enc_section, enc_option, cls.default_encoding)
if isinstance(tree, str):
tree = repo.tree(tree)
new_commit = cls(repo, cls.NULL_BIN_SHA, tree,
author, author_time, author_offset,
committer, committer_time, committer_offset,
message, parent_commits, conf_encoding)
stream = BytesIO()
new_commit._serialize(stream)
streamlen = stream.tell()
stream.seek(0)
istream = repo.odb.store(IStream(cls.type, streamlen, stream))
new_commit.binsha = istream.binsha
if head:
import git.refs
try:
repo.head.set_commit(new_commit, logmsg=message)
except ValueError:
master = git.refs.Head.create(repo, repo.head.ref, new_commit, logmsg="commit (initial): %s" % message)
repo.head.set_reference(master, logmsg= % master)
return new_commit
|
Commit the given tree, creating a commit object.
:param repo: Repo object the commit should be part of
:param tree: Tree object or hex or bin sha
the tree of the new commit
:param message: Commit message. It may be an empty string if no message is provided.
It will be converted to a string in any case.
:param parent_commits:
Optional Commit objects to use as parents for the new commit.
If empty list, the commit will have no parents at all and become
a root commit.
If None , the current head commit will be the parent of the
new commit object
:param head:
If True, the HEAD will be advanced to the new commit automatically.
Else the HEAD will remain pointing on the previous commit. This could
lead to undesired results when diffing files.
:param author: The name of the author, optional. If unset, the repository
configuration is used to obtain this value.
:param committer: The name of the committer, optional. If unset, the
repository configuration is used to obtain this value.
:param author_date: The timestamp for the author field
:param commit_date: The timestamp for the committer field
:return: Commit object representing the new commit
:note:
Additional information about the committer and Author are taken from the
environment or from the git configuration, see git-commit-tree for
more information
|
28,639 |
def generate_cot_body(context):
try:
cot = {
: get_cot_artifacts(context),
: 1,
: context.claim_task[],
: context.task,
: context.claim_task[][],
: context.claim_task[],
: context.config[],
: context.config[],
: get_cot_environment(context),
}
except (KeyError, ) as exc:
raise ScriptWorkerException("Can't generate chain of trust! {}".format(str(exc)))
return cot
|
Generate the chain of trust dictionary.
This is the unsigned and unformatted chain of trust artifact contents.
Args:
context (scriptworker.context.Context): the scriptworker context.
Returns:
dict: the unsignd and unformatted chain of trust artifact contents.
Raises:
ScriptWorkerException: on error.
|
28,640 |
def _one_to_many_query(cls, query_obj, search4, model_attrib):
model = model_attrib.parent.class_
already_joined_tables = [mapper.class_ for mapper in query_obj._join_entities]
if isinstance(search4, (str, int, Iterable)) and model not in already_joined_tables:
query_obj = query_obj.join(model)
if isinstance(search4, str):
query_obj = query_obj.filter(model_attrib.like(search4))
elif isinstance(search4, int):
query_obj = query_obj.filter(model_attrib == search4)
elif isinstance(search4, Iterable):
query_obj = query_obj.filter(model_attrib.in_(search4))
return query_obj
|
extends and returns a SQLAlchemy query object to allow one-to-many queries
:param query_obj: SQL Alchemy query object
:param str search4: search string
:param model_attrib: attribute in model
|
28,641 |
def getContactTypes(self):
yield VIPPersonContactType()
yield EmailContactType(self.store)
yield PostalContactType()
yield PhoneNumberContactType()
yield NotesContactType()
for getContactTypes in self._gatherPluginMethods():
for contactType in getContactTypes():
self._checkContactType(contactType)
yield contactType
|
Return an iterator of L{IContactType} providers available to this
organizer's store.
|
28,642 |
def get_params(self, deep=True):
params = {:self.coef_, :self.intercept_}
if deep:
for key, value in self.B.items():
params[+str(key)] = value
return params
|
Get parameters for the estimator.
Args:
deep (boolean, optional) : If True, will return the parameters for this estimator and contained subobjects that are estimators.
Returns:
params : mapping of string to any contained subobjects that are estimators.
|
28,643 |
def list_gewesten(self, sort=1):
def creator():
res = crab_gateway_request(self.client, , sort)
tmp = {}
for r in res.GewestItem:
if r.GewestId not in tmp:
tmp[r.GewestId] = {}
tmp[r.GewestId][r.TaalCodeGewestNaam] = r.GewestNaam
return[
Gewest(
k,
v
)for k, v in tmp.items()
]
if self.caches[].is_configured:
key = % sort
gewesten = self.caches[].get_or_create(key, creator)
else:
gewesten = creator()
for g in gewesten:
g.set_gateway(self)
return gewesten
|
List all `gewesten` in Belgium.
:param integer sort: What field to sort on.
:rtype: A :class`list` of class: `Gewest`.
|
28,644 |
def hook_wrapper_23(stdin, stdout, prompt):
u
try:
res = ensure_str(readline_hook(prompt))
if res and not isinstance(res, str):
raise TypeError, u
except KeyboardInterrupt:
return 0
except EOFError:
res = u
except:
print >>sys.stderr, u
traceback.print_exc()
res = u
n = len(res)
p = Console.PyMem_Malloc(n + 1)
_strncpy(cast(p, c_char_p), res, n + 1)
return p
|
u'''Wrap a Python readline so it behaves like GNU readline.
|
28,645 |
def _contained_parameters(expression):
if isinstance(expression, BinaryExp):
return _contained_parameters(expression.op1) | _contained_parameters(expression.op2)
elif isinstance(expression, Function):
return _contained_parameters(expression.expression)
elif isinstance(expression, Parameter):
return {expression}
else:
return set()
|
Determine which parameters are contained in this expression.
:param Expression expression: expression involving parameters
:return: set of parameters contained in this expression
:rtype: set
|
28,646 |
def compress(self, input_path):
self.log([u"Compressing into this container", input_path])
if self.file_path is None:
self.log_exc(u"The container path has not been set", None, True, TypeError)
if self.actual_container is None:
self.log_exc(u"The actual container object has not been set", None, True, TypeError)
if not gf.directory_exists(input_path):
self.log_exc(u"The input path is not an existing directory", None, True, ValueError)
gf.ensure_parent_directory(input_path)
self.actual_container.compress(input_path)
|
Compress the contents of the given directory.
:param string input_path: path of the input directory
:raises: TypeError: if the container path has not been set
:raises: ValueError: if ``input_path`` is not an existing directory
:raises: OSError: if an error occurred compressing the given container
(e.g., empty file, damaged file, etc.)
|
28,647 |
def predict_density(self, Xnew, Ynew):
pred_f_mean, pred_f_var = self._build_predict(Xnew)
return self.likelihood.predict_density(pred_f_mean, pred_f_var, Ynew)
|
Compute the (log) density of the data Ynew at the points Xnew
Note that this computes the log density of the data individually,
ignoring correlations between them. The result is a matrix the same
shape as Ynew containing the log densities.
|
28,648 |
def num(value):
if re_hex_num.match(value):
return int(value, base=16)
else:
return int(value)
|
Convert a value from one of several bases to an int.
|
28,649 |
def get_requirements_transform_cfme(config):
def requirement_transform(requirement):
requirement = copy.deepcopy(requirement)
if "id" in requirement:
del requirement["id"]
return requirement
return requirement_transform
|
Return requirement transformation function for CFME.
|
28,650 |
def iter(self, match="*", count=1000):
for field, value in self._client.hscan_iter(
self.key_prefix, match=match, count=count):
yield self._decode(field)
|
:see::meth:RedisMap.iter
|
28,651 |
def is_inbound_presence_filter(cb):
try:
handlers = get_magic_attr(cb)
except AttributeError:
return False
hs = HandlerSpec(
(_apply_inbound_presence_filter, ())
)
return hs in handlers
|
Return true if `cb` has been decorated with
:func:`inbound_presence_filter`.
|
28,652 |
def process_module(self, node):
with node.stream() as stream:
self.append_stream(self.linter.current_name, stream, node.file_encoding)
|
process a module
the module's content is accessible via the stream object
stream must implement the readlines method
|
28,653 |
def _delete_network(self, request, network):
try:
api.neutron.network_delete(request, network.id)
LOG.debug(
, network.id)
msg = _(
) % network.name
redirect = self.get_failure_url()
messages.info(request, msg)
raise exceptions.Http302(redirect)
except Exception as e:
LOG.info(,
{: network.id, : e})
msg = _() % network.name
redirect = self.get_failure_url()
exceptions.handle(request, msg, redirect=redirect)
|
Delete the created network when subnet creation failed.
|
28,654 |
def enable_compression(self,
force: Optional[Union[bool, ContentCoding]]=None
) -> None:
if type(force) == bool:
force = ContentCoding.deflate if force else ContentCoding.identity
warnings.warn("Using boolean for force is deprecated
DeprecationWarning)
elif force is not None:
assert isinstance(force, ContentCoding), ("force should one of "
"None, bool or "
"ContentEncoding")
self._compression = True
self._compression_force = force
|
Enables response compression encoding.
|
28,655 |
def load_config(config_file=None, profile=None, client=None,
endpoint=None, token=None, solver=None, proxy=None):
if profile is None:
profile = os.getenv("DWAVE_PROFILE")
if config_file == False:
section = {}
elif config_file == True:
section = load_profile_from_files(None, profile)
else:
if config_file is None:
config_file = os.getenv("DWAVE_CONFIG_FILE")
filenames = None
if config_file:
if isinstance(config_file, six.string_types):
filenames = [config_file]
else:
filenames = config_file
section = load_profile_from_files(filenames, profile)
section[] = client or os.getenv("DWAVE_API_CLIENT", section.get())
section[] = endpoint or os.getenv("DWAVE_API_ENDPOINT", section.get())
section[] = token or os.getenv("DWAVE_API_TOKEN", section.get())
section[] = solver or os.getenv("DWAVE_API_SOLVER", section.get())
section[] = proxy or os.getenv("DWAVE_API_PROXY", section.get())
return section
|
Load D-Wave Cloud Client configuration based on a configuration file.
Configuration values can be specified in multiple ways, ranked in the following
order (with 1 the highest ranked):
1. Values specified as keyword arguments in :func:`load_config()`. These values replace
values read from a configuration file, and therefore must be **strings**, including float
values for timeouts, boolean flags (tested for "truthiness"), and solver feature
constraints (a dictionary encoded as JSON).
2. Values specified as environment variables.
3. Values specified in the configuration file.
Configuration-file format is described in :mod:`dwave.cloud.config`.
If the location of the configuration file is not specified, auto-detection
searches for existing configuration files in the standard directories
of :func:`get_configfile_paths`.
If a configuration file explicitly specified, via an argument or
environment variable, does not exist or is unreadable, loading fails with
:exc:`~dwave.cloud.exceptions.ConfigFileReadError`. Loading fails
with :exc:`~dwave.cloud.exceptions.ConfigFileParseError` if the file is
readable but invalid as a configuration file.
Similarly, if a profile explicitly specified, via an argument or
environment variable, is not present in the loaded configuration, loading fails
with :exc:`ValueError`. Explicit profile selection also fails if the configuration
file is not explicitly specified, detected on the system, or defined via
an environment variable.
Environment variables: ``DWAVE_CONFIG_FILE``, ``DWAVE_PROFILE``, ``DWAVE_API_CLIENT``,
``DWAVE_API_ENDPOINT``, ``DWAVE_API_TOKEN``, ``DWAVE_API_SOLVER``, ``DWAVE_API_PROXY``.
Environment variables are described in :mod:`dwave.cloud.config`.
Args:
config_file (str/[str]/None/False/True, default=None):
Path to configuration file(s).
If `None`, the value is taken from `DWAVE_CONFIG_FILE` environment
variable if defined. If the environment variable is undefined or empty,
auto-detection searches for existing configuration files in the standard
directories of :func:`get_configfile_paths`.
If `False`, loading from file(s) is skipped; if `True`, forces auto-detection
(regardless of the `DWAVE_CONFIG_FILE` environment variable).
profile (str, default=None):
Profile name (name of the profile section in the configuration file).
If undefined, inferred from `DWAVE_PROFILE` environment variable if
defined. If the environment variable is undefined or empty, a profile is
selected in the following order:
1. From the default section if it includes a profile key.
2. The first section (after the default section).
3. If no other section is defined besides `[defaults]`, the defaults
section is promoted and selected.
client (str, default=None):
Client type used for accessing the API. Supported values are `qpu`
for :class:`dwave.cloud.qpu.Client` and `sw` for
:class:`dwave.cloud.sw.Client`.
endpoint (str, default=None):
API endpoint URL.
token (str, default=None):
API authorization token.
solver (str, default=None):
:term:`solver` features, as a JSON-encoded dictionary of feature constraints,
the client should use. See :meth:`~dwave.cloud.client.Client.get_solvers` for
semantics of supported feature constraints.
If undefined, the client uses a solver definition from environment variables,
a configuration file, or falls back to the first available online solver.
For backward compatibility, solver name in string format is accepted and
converted to ``{"name": <solver name>}``.
proxy (str, default=None):
URL for proxy to use in connections to D-Wave API. Can include
username/password, port, scheme, etc. If undefined, client
uses the system-level proxy, if defined, or connects directly to the API.
Returns:
dict:
Mapping of configuration keys to values for the profile
(section), as read from the configuration file and optionally overridden by
environment values and specified keyword arguments.
Always contains the `client`, `endpoint`, `token`, `solver`, and `proxy`
keys.
Raises:
:exc:`ValueError`:
Invalid (non-existing) profile name.
:exc:`~dwave.cloud.exceptions.ConfigFileReadError`:
Config file specified or detected could not be opened or read.
:exc:`~dwave.cloud.exceptions.ConfigFileParseError`:
Config file parse failed.
Examples
This example loads the configuration from an auto-detected configuration file
in the home directory of a Windows system user.
>>> import dwave.cloud as dc
>>> dc.config.load_config()
{'client': u'qpu',
'endpoint': u'https://url.of.some.dwavesystem.com/sapi',
'proxy': None,
'solver': u'EXAMPLE_2000Q_SYSTEM_A',
'token': u'DEF-987654321987654321987654321'}
>>> See which configuration file was loaded
>>> dc.config.get_configfile_paths()
[u'C:\\Users\\jane\\AppData\\Local\\dwavesystem\\dwave\\dwave.conf']
Additional examples are given in :mod:`dwave.cloud.config`.
|
28,656 |
def percbend(x, y, beta=.2):
from scipy.stats import t
X = np.column_stack((x, y))
nx = X.shape[0]
M = np.tile(np.median(X, axis=0), nx).reshape(X.shape)
W = np.sort(np.abs(X - M), axis=0)
m = int((1 - beta) * nx)
omega = W[m - 1, :]
P = (X - M) / omega
P[np.isinf(P)] = 0
P[np.isnan(P)] = 0
a = np.zeros((2, nx))
for c in [0, 1]:
psi = P[:, c]
i1 = np.where(psi < -1)[0].size
i2 = np.where(psi > 1)[0].size
s = X[:, c].copy()
s[np.where(psi < -1)[0]] = 0
s[np.where(psi > 1)[0]] = 0
pbos = (np.sum(s) + omega[c] * (i2 - i1)) / (s.size - i1 - i2)
a[c] = (X[:, c] - pbos) / omega[c]
a[a <= -1] = -1
a[a >= 1] = 1
a, b = a
r = (a * b).sum() / np.sqrt((a**2).sum() * (b**2).sum())
tval = r * np.sqrt((nx - 2) / (1 - r**2))
pval = 2 * t.sf(abs(tval), nx - 2)
return r, pval
|
Percentage bend correlation (Wilcox 1994).
Parameters
----------
x, y : array_like
First and second set of observations. x and y must be independent.
beta : float
Bending constant for omega (0 <= beta <= 0.5).
Returns
-------
r : float
Percentage bend correlation coefficient.
pval : float
Two-tailed p-value.
Notes
-----
Code inspired by Matlab code from Cyril Pernet and Guillaume Rousselet.
References
----------
.. [1] Wilcox, R.R., 1994. The percentage bend correlation coefficient.
Psychometrika 59, 601–616. https://doi.org/10.1007/BF02294395
.. [2] Pernet CR, Wilcox R, Rousselet GA. Robust Correlation Analyses:
False Positive and Power Validation Using a New Open Source Matlab
Toolbox. Frontiers in Psychology. 2012;3:606.
doi:10.3389/fpsyg.2012.00606.
|
28,657 |
def _add_gateway_node(self, gw_type, routing_node_gateway, network=None):
if self.level != :
raise ModificationAborted(
.format(self))
if self.related_element_type == :
return self._add_gateway_node_on_tunnel(routing_node_gateway)
routing_node = list(gateway_by_type(self, type=gw_type, on_network=network))
_networks = [netwk for netwk in self if netwk.ip == network] if network is \
not None else list(self)
gateway_element_type = routing_node_gateway.routing_node_element
modified = False
for network in _networks:
if getattr(network, , None):
network.data.setdefault(, []).append(
routing_node_gateway)
modified = True
break
this_network_node = network.routing_node_element
if routing_node and any(netwk for _intf, netwk, gw in routing_node
if netwk.routing_node_element == this_network_node and
gateway_element_type == gw.routing_node_element):
for gw in network:
if gw.routing_node_element == gateway_element_type:
existing_dests = [node.routing_node_element for node in gw]
for destination in routing_node_gateway.destinations:
is_valid_destination = False
if destination not in existing_dests:
dest_ipv4, dest_ipv6 = _which_ip_protocol(destination)
if len(network.ip.split()) > 1:
if dest_ipv6:
is_valid_destination = True
else:
if dest_ipv4:
is_valid_destination = True
if is_valid_destination:
gw.data.setdefault(, []).append(
{: , : destination.href,
: destination.name})
modified = True
else:
routing_node_gateway)
modified = True
if modified:
self.update()
return modified
|
Add a gateway node to existing routing tree. Gateways are only added if
they do not already exist. If they do exist, check the destinations of
the existing gateway and add destinations that are not already there.
A current limitation is that if a gateway doesn't exist and the
destinations specified do not have IP addresses that are valid, they
are still added (i.e. IPv4 gateway with IPv6 destination is considered
invalid).
:param Routing self: the routing node, should be the interface routing node
:param str gw_type: type of gateway, i.e. netlink, ospfv2_area, etc
:param RoutingNodeGateway route_node_gateway: gateway element
:param str network: network to bind to. If none, all networks
:return: Whether a change was made or not
:rtype: bool
|
28,658 |
def _make_passage_kwargs(urn, reference):
kwargs = {}
if urn is not None:
if reference is not None:
kwargs["urn"] = URN("{}:{}".format(urn.upTo(URN.VERSION), reference))
else:
kwargs["urn"] = urn
return kwargs
|
Little helper used by CapitainsCtsPassage here to comply with parents args
:param urn: URN String
:param reference: Reference String
:return: Dictionary of arguments with URN based on identifier and reference
|
28,659 |
def stick_perm(presenter, egg, dist_dict, strategy):
np.random.seed()
egg_pres, egg_rec, egg_features, egg_dist_funcs = parse_egg(egg)
regg = order_stick(presenter, egg, dist_dict, strategy)
regg_pres, regg_rec, regg_features, regg_dist_funcs = parse_egg(regg)
regg_pres = list(regg_pres)
egg_pres = list(egg_pres)
idx = [egg_pres.index(r) for r in regg_pres]
weights = compute_feature_weights_dict(list(regg_pres), list(regg_pres), list(regg_features), dist_dict)
orders = idx
return weights, orders
|
Computes weights for one reordering using stick-breaking method
|
28,660 |
def Luv_to_LCHuv(cobj, *args, **kwargs):
lch_l = cobj.luv_l
lch_c = math.sqrt(math.pow(cobj.luv_u, 2.0) + math.pow(cobj.luv_v, 2.0))
lch_h = math.atan2(float(cobj.luv_v), float(cobj.luv_u))
if lch_h > 0:
lch_h = (lch_h / math.pi) * 180
else:
lch_h = 360 - (math.fabs(lch_h) / math.pi) * 180
return LCHuvColor(
lch_l, lch_c, lch_h, observer=cobj.observer, illuminant=cobj.illuminant)
|
Convert from CIE Luv to LCH(uv).
|
28,661 |
def _setPrivate(self, private):
self.private = private
self.public = pow(self.generator, self.private, self.modulus)
|
This is here to make testing easier
|
28,662 |
def from_file(path):
_name, ext = os.path.splitext(path)
ext = ext.lower()[1:]
seg = pydub.AudioSegment.from_file(path, ext)
return AudioSegment(seg, path)
|
Returns an AudioSegment object from the given file based on its file extension.
If the extension is wrong, this will throw some sort of error.
:param path: The path to the file, including the file extension.
:returns: An AudioSegment instance from the file.
|
28,663 |
def example_delete_topics(a, topics):
fs = a.delete_topics(topics, operation_timeout=30)
for topic, f in fs.items():
try:
f.result()
print("Topic {} deleted".format(topic))
except Exception as e:
print("Failed to delete topic {}: {}".format(topic, e))
|
delete topics
|
28,664 |
def ConsultarDepositosAcopio(self, sep="||"):
"Retorna los depósitos de acopio pertenencientes al contribuyente"
ret = self.client.consultarDepositosAcopio(
auth={
: self.Token, : self.Sign,
: self.Cuit, },
)[]
self.__analizar_errores(ret)
array = ret.get(, [])
if sep is None:
return array
else:
return [("%s %%s %s %%s %s %%s %s %%s %s" % (sep, sep, sep, sep, sep)) %
(it[], it[], it[], it[])
for it in array]
|
Retorna los depósitos de acopio pertenencientes al contribuyente
|
28,665 |
def _checkMemberName(name):
assert isinstance(name, str)
restrictionLevel = None
if not name.endswith():
if name.startswith():
restrictionLevel =
elif name.startswith():
restrictionLevel =
return restrictionLevel
|
See if a member name indicates that it should be private.
Private variables in Python (starting with a double underscore but
not ending in a double underscore) and bed lumps (variables that
are not really private but are by common convention treated as
protected because they begin with a single underscore) get Doxygen
tags labeling them appropriately.
|
28,666 |
def copy(self, target_parent, name=None, include_children=True, include_instances=True):
if self.category == Category.MODEL and target_parent.category == Category.MODEL:
copied_model = relocate_model(part=self, target_parent=target_parent, name=name,
include_children=include_children)
if include_instances:
instances_to_be_copied = list(self.instances())
parent_instances = list(target_parent.instances())
for parent_instance in parent_instances:
for instance in instances_to_be_copied:
instance.populate_descendants()
move_part_instance(part_instance=instance, target_parent=parent_instance,
part_model=self, name=instance.name, include_children=include_children)
return copied_model
elif self.category == Category.INSTANCE and target_parent.category == Category.INSTANCE:
copied_instance = relocate_instance(part=self, target_parent=target_parent, name=name,
include_children=include_children)
return copied_instance
else:
raise IllegalArgumentError()
|
Copy the `Part` to target parent, both of them having the same category.
.. versionadded:: 2.3
:param target_parent: `Part` object under which the desired `Part` is copied
:type target_parent: :class:`Part`
:param name: how the copied top-level `Part` should be called
:type name: basestring
:param include_children: True to copy also the descendants of `Part`.
:type include_children: bool
:param include_instances: True to copy also the instances of `Part` to ALL the instances of target_parent.
:type include_instances: bool
:returns: copied :class:`Part` model.
:raises IllegalArgumentError: if part and target_parent have different `Category`
:raises IllegalArgumentError: if part and target_parent are identical
Example
-------
>>> model_to_copy = client.model(name='Model to be copied')
>>> bike = client.model('Bike')
>>> model_to_copy.copy(target_parent=bike, name='Copied model',
>>> include_children=True,
>>> include_instances=True)
|
28,667 |
def _update_val(self, val, result_score_list):
self._update_pbar()
if val == :
self._update_pbar(pbar_msg=(
.format(self._pbar.n)))
result_score_list.append(-float())
else:
result_score_list.append(val)
return result_score_list
|
Update values in the list of result scores and self._pbar during pipeline evaluation.
Parameters
----------
val: float or "Timeout"
CV scores
result_score_list: list
A list of CV scores
Returns
-------
result_score_list: list
A updated list of CV scores
|
28,668 |
def keyed_ordering(cls):
if not in cls.__dict__:
raise TypeError("keyed_ordering requires a __key__ method")
for name in (, , , , , ):
if name in cls.__dict__:
continue
method = _keyed_ordering_impl(name, cls)
setattr(cls, name, method)
return cls
|
Class decorator to generate all six rich comparison methods, based on a
``__key__`` method.
Many simple classes are wrappers for very simple data, and want to defer
comparisons to that data. Rich comparison is very flexible and powerful,
but makes this simple case tedious to set up. There's the standard
library's ``total_ordering`` decorator, but it still requires you to write
essentially the same method twice, and doesn't correctly handle
``NotImplemented`` before 3.4. It also doesn't automatically generate
``__ne__`` from ``__eq__``, which is a common gotcha.
With this decorator, comparisons will be done on the return value of
``__key__``, in much the same way as the ``key`` argument to ``sorted``.
For example, if you have a class representing a span of time:
.. code-block:: python
@keyed_ordering
class TimeSpan(object):
def __init__(self, start, end):
self.start = start
self.end = end
def __key__(self):
return (self.start, self.end)
This is equivalent to the following, assuming 3.4's ``total_ordering``:
.. code-block:: python
@total_ordering
class TimeSpan(object):
def __init__(self, start, end):
self.start = start
self.end = end
def __eq__(self, other):
if not isinstance(other, TimeSpan):
return NotImplemented
return (self.start, self.end) == (other.start, other.end)
def __ne__(self, other):
if not isinstance(other, TimeSpan):
return NotImplemented
return (self.start, self.end) != (other.start, other.end)
def __lt__(self, other):
if not isinstance(other, TimeSpan):
return NotImplemented
return (self.start, self.end) < (other.start, other.end)
The ``NotImplemented`` check is based on the class being decorated, so
subclassses can still be correctly compared.
You may also implement some of the rich comparison methods in the decorated
class, in which case they'll be left alone.
|
28,669 |
def discover(timeout=1, retries=1):
locations = []
group = (, 1900)
service =
message = .join([,
,
,
,
, , ]).format(group=group, st=service)
socket.setdefaulttimeout(timeout)
for _ in range(retries):
sock = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)
sock.sendto(message.encode(), group)
while True:
try:
response = sock.recv(2048).decode()
for line in response.split():
if line.startswith():
location = line.split()[1].strip()
if not location in locations:
locations.append(location)
except socket.timeout:
break
devices = [RaumfeldDevice(location) for location in locations]
return sorted([device for device in devices
if device.model_description == ],
key=lambda device: device.friendly_name)
|
Discover Raumfeld devices in the network
:param timeout: The timeout in seconds
:param retries: How often the search should be retried
:returns: A list of raumfeld devices, sorted by name
|
28,670 |
def get_term_freq_mat(self):
freq_mat = np.zeros(shape=(self.get_num_terms(), self.get_num_categories()), dtype=int)
for cat_i in range(self.get_num_categories()):
freq_mat[:, cat_i] = self._X[self._y == cat_i, :].sum(axis=0)
return freq_mat
|
Returns
-------
np.array with columns as categories and rows as terms
|
28,671 |
def set_bot_permissions(calendar_id, userid, level):
url = _make_set_permissions_url(
calendar_id, userid, level)
return _process_resp(url,
get_bot_resource(url),
_is_permission_set
)
|
:param calendar_id: an integer representing calendar ID
:param userid: a string representing the user's UW NetID
:param level: a string representing the permission level
:return: True if request is successful, False otherwise.
raise DataFailureException or a corresponding TrumbaException
if the request failed or an error code has been returned.
|
28,672 |
def _is_current_user(self, some_user):
current_user = self.remote_store.get_current_user()
return current_user.id == some_user.id
|
Is the specified user the current user?
:param some_user: RemoteUser user we want to check against the current user
:return: boolean: True if the current user is the passed in user
|
28,673 |
def scale(val, src, dst):
if val < src[0]:
return dst[0]
if val > src[1]:
return dst[1]
return ((val - src[0]) / (src[1] - src[0])) * (dst[1] - dst[0]) + dst[0]
|
Scale value from src range to dst range.
If value outside bounds, it is clipped and set to
the low or high bound of dst.
Ex:
scale(0, (0.0, 99.0), (-1.0, 1.0)) == -1.0
scale(-5, (0.0, 99.0), (-1.0, 1.0)) == -1.0
|
28,674 |
def xml2object(xml, verbose=False):
msg += str(e)
raise Exception, msg
return xml_object
|
Generate XML object model from XML file or XML text
This is the inverse operation to the __str__ representation
(up to whitespace).
Input xml can be either an
* xml file
* open xml file object
Return XML_document instance.
|
28,675 |
def modified_lu(q):
q = q.assemble()
m, b = q.shape[0], q.shape[1]
S = np.zeros(b)
q_work = np.copy(q)
for i in range(b):
S[i] = -1 * np.sign(q_work[i, i])
q_work[i, i] -= S[i]
q_work[(i + 1):m, i] /= q_work[i, i]
q_work[(i + 1):m, (i + 1):b] -= np.outer(q_work[(i + 1):m, i],
q_work[i, (i + 1):b])
L = np.tril(q_work)
for i in range(b):
L[i, i] = 1
U = np.triu(q_work)[:b, :]
return ray.get(core.numpy_to_dist.remote(ray.put(L))), U, S
|
Perform a modified LU decomposition of a matrix.
This takes a matrix q with orthonormal columns, returns l, u, s such that
q - s = l * u.
Args:
q: A two dimensional orthonormal matrix q.
Returns:
A tuple of a lower triangular matrix l, an upper triangular matrix u,
and a a vector representing a diagonal matrix s such that
q - s = l * u.
|
28,676 |
def save(store, *args, **kwargs):
if len(args) == 0 and len(kwargs) == 0:
raise ValueError()
if len(args) == 1 and len(kwargs) == 0:
save_array(store, args[0])
else:
save_group(store, *args, **kwargs)
|
Convenience function to save an array or group of arrays to the local file system.
Parameters
----------
store : MutableMapping or string
Store or path to directory in file system or name of zip file.
args : ndarray
NumPy arrays with data to save.
kwargs
NumPy arrays with data to save.
Examples
--------
Save an array to a directory on the file system (uses a :class:`DirectoryStore`)::
>>> import zarr
>>> import numpy as np
>>> arr = np.arange(10000)
>>> zarr.save('data/example.zarr', arr)
>>> zarr.load('data/example.zarr')
array([ 0, 1, 2, ..., 9997, 9998, 9999])
Save an array to a Zip file (uses a :class:`ZipStore`)::
>>> zarr.save('data/example.zip', arr)
>>> zarr.load('data/example.zip')
array([ 0, 1, 2, ..., 9997, 9998, 9999])
Save several arrays to a directory on the file system (uses a
:class:`DirectoryStore` and stores arrays in a group)::
>>> import zarr
>>> import numpy as np
>>> a1 = np.arange(10000)
>>> a2 = np.arange(10000, 0, -1)
>>> zarr.save('data/example.zarr', a1, a2)
>>> loader = zarr.load('data/example.zarr')
>>> loader
<LazyLoader: arr_0, arr_1>
>>> loader['arr_0']
array([ 0, 1, 2, ..., 9997, 9998, 9999])
>>> loader['arr_1']
array([10000, 9999, 9998, ..., 3, 2, 1])
Save several arrays using named keyword arguments::
>>> zarr.save('data/example.zarr', foo=a1, bar=a2)
>>> loader = zarr.load('data/example.zarr')
>>> loader
<LazyLoader: bar, foo>
>>> loader['foo']
array([ 0, 1, 2, ..., 9997, 9998, 9999])
>>> loader['bar']
array([10000, 9999, 9998, ..., 3, 2, 1])
Store several arrays in a single zip file (uses a :class:`ZipStore`)::
>>> zarr.save('data/example.zip', foo=a1, bar=a2)
>>> loader = zarr.load('data/example.zip')
>>> loader
<LazyLoader: bar, foo>
>>> loader['foo']
array([ 0, 1, 2, ..., 9997, 9998, 9999])
>>> loader['bar']
array([10000, 9999, 9998, ..., 3, 2, 1])
See Also
--------
save_array, save_group
|
28,677 |
def format_argspec_plus(fn, grouped=True):
spec = callable(fn) and inspect.getargspec(fn) or fn
args = inspect.formatargspec(*spec)
if spec[0]:
self_arg = spec[0][0]
elif spec[1]:
self_arg = % spec[1]
else:
self_arg = None
apply_pos = inspect.formatargspec(spec[0], spec[1], spec[2])
defaulted_vals = spec[3] is not None and spec[0][0-len(spec[3]):] or ()
apply_kw = inspect.formatargspec(spec[0], spec[1], spec[2], defaulted_vals,
formatvalue=lambda x: + x)
if grouped:
return dict(args=args, self_arg=self_arg,
apply_pos=apply_pos, apply_kw=apply_kw)
else:
return dict(args=args[1:-1], self_arg=self_arg,
apply_pos=apply_pos[1:-1], apply_kw=apply_kw[1:-1])
|
Returns a dictionary of formatted, introspected function arguments.
A enhanced variant of inspect.formatargspec to support code generation.
fn
An inspectable callable or tuple of inspect getargspec() results.
grouped
Defaults to True; include (parens, around, argument) lists
Returns:
args
Full inspect.formatargspec for fn
self_arg
The name of the first positional argument, varargs[0], or None
if the function defines no positional arguments.
apply_pos
args, re-written in calling rather than receiving syntax. Arguments are
passed positionally.
apply_kw
Like apply_pos, except keyword-ish args are passed as keywords.
Example::
>>> format_argspec_plus(lambda self, a, b, c=3, **d: 123)
{'args': '(self, a, b, c=3, **d)',
'self_arg': 'self',
'apply_kw': '(self, a, b, c=c, **d)',
'apply_pos': '(self, a, b, c, **d)'}
|
28,678 |
def build_tables(
table_names_to_dataframes,
table_names_to_primary_keys={},
table_names_to_indices={}):
tables = []
for table_name, df in table_names_to_dataframes.items():
table_indices = table_names_to_indices.get(table_name, [])
primary_key = table_names_to_primary_keys.get(table_name)
table = DatabaseTable.from_dataframe(
name=table_name,
df=df,
indices=table_indices,
primary_key=primary_key)
tables.append(table)
return tables
|
Parameters
----------
table_names_to_dataframes : dict
Dictionary mapping each table name to a DataFrame
table_names_to_primary_keys : dict
Dictionary mapping each table to its primary key
table_names_to_indices : dict
Dictionary mapping each table to a set of indices
Returns list of DatabaseTable objects
|
28,679 |
def teams(self, page=None, year=None, simple=False, keys=False):
if page is not None:
if year:
if keys:
return self._get( % (year, page))
else:
return [Team(raw) for raw in self._get( % (year, page, if simple else ))]
else:
if keys:
return self._get( % page)
else:
return [Team(raw) for raw in self._get( % (page, if simple else ))]
else:
teams = []
target = 0
while True:
page_teams = self.teams(page=target, year=year, simple=simple, keys=keys)
if page_teams:
teams.extend(page_teams)
else:
break
target += 1
return teams
|
Get list of teams.
:param page: Page of teams to view. Each page contains 500 teams.
:param year: View teams from a specific year.
:param simple: Get only vital data.
:param keys: Set to true if you only want the teams' keys rather than full data on them.
:return: List of Team objects or string keys.
|
28,680 |
def rmtree_p(self):
try:
self.rmtree()
except OSError:
_, e, _ = sys.exc_info()
if e.errno != errno.ENOENT:
raise
return self
|
Like :meth:`rmtree`, but does not raise an exception if the
directory does not exist.
|
28,681 |
def _remove_hidden_parts(projected_surface):
surface = np.copy(projected_surface)
surface[~_make_occlusion_mask(projected_surface)] = np.nan
return surface
|
Removes parts of a projected surface that are not visible.
Args:
projected_surface (surface): the surface to use
Returns:
surface: A projected surface.
|
28,682 |
def pbkdf2(password, salt, outlen, digesttype="sha1", iterations=2000):
dgst = DigestType(digesttype)
out = create_string_buffer(outlen)
if isinstance(password,chartype):
pwd = password.encode("utf-8")
else:
pwd = password
res = libcrypto.PKCS5_PBKDF2_HMAC(pwd, len(pwd), salt, len(salt),
iterations, dgst.digest, outlen, out)
if res <= 0:
raise LibCryptoError("error computing PBKDF2")
return out.raw
|
Interface to PKCS5_PBKDF2_HMAC function
Parameters:
@param password - password to derive key from
@param salt - random salt to use for key derivation
@param outlen - number of bytes to derive
@param digesttype - name of digest to use to use (default sha1)
@param iterations - number of iterations to use
@returns outlen bytes of key material derived from password and salt
|
28,683 |
def hdel(self, key, field, *fields):
return self.execute(b, key, field, *fields)
|
Delete one or more hash fields.
|
28,684 |
def _get_operation_input_field_values(self, metadata, file_input):
input_args = metadata[][][]
vals_dict = metadata[][][]
names = [
arg[] for arg in input_args if ( in arg) == file_input
]
return {name: vals_dict[name] for name in names if name in vals_dict}
|
Returns a dictionary of envs or file inputs for an operation.
Args:
metadata: operation metadata field
file_input: True to return a dict of file inputs, False to return envs.
Returns:
A dictionary of input field name value pairs
|
28,685 |
def wait(self, timeout=-1):
if self._process is None:
raise RuntimeError()
if timeout == -1:
timeout = self._timeout
if not self._child_exited.wait(timeout):
raise Timeout()
return self.returncode
|
Wait for the child to exit.
Wait for at most *timeout* seconds, or indefinitely if *timeout* is
None. Return the value of the :attr:`returncode` attribute.
|
28,686 |
def deploy(self, *lambdas):
if not self.role:
logger.error()
raise ArgumentsError()
logger.debug(.format(self.lambda_name))
zfh = self.package()
if self.lambda_name in self.get_function_names():
logger.info(.format(self.lambda_name))
response = self.client.update_function_code(
FunctionName=self.lambda_name,
ZipFile=zfh.getvalue(),
Publish=True
)
else:
logger.info(.format(self.lambda_name))
response = self.client.create_function(
FunctionName=self.lambda_name,
Runtime=yaep.env(
,
),
Role=self.role,
Handler=yaep.env(
,
),
Code={
: zfh.getvalue(),
},
Description=yaep.env(
,
.format(self.lambda_name)
),
Timeout=yaep.env(
,
3,
convert_booleans=False,
type_class=int
),
MemorySize=yaep.env(
,
128,
convert_booleans=False,
type_class=int
),
Publish=True
)
status_code = response.get(
, {}
).get()
if status_code in [200, 201]:
logger.info(.format(
self.lambda_name,
response.get(, )
))
else:
logger.error(.format(
self.lambda_name,
response
))
|
Deploys lambdas to AWS
|
28,687 |
def get_tags(self, name):
tags = list()
for tag in self._tags:
if tag[0] == name:
tags.append(tag[1])
return tags
|
Returns a list of tags.
@param str name: The name of the tag.
:rtype: list[str]
|
28,688 |
def run(cmd):
cmd = [pipes.quote(c) for c in cmd]
cmd = " ".join(cmd)
cmd += "; exit 0"
try:
output = subprocess.check_output(cmd,
stderr=subprocess.STDOUT,
shell=True)
except subprocess.CalledProcessError as e:
output = e.output
output = output.decode()
output = output.strip()
return output
|
Run a shell command
|
28,689 |
def state_probability(self, direction, repertoire, purview,):
purview_state = self.purview_state(direction)
index = tuple(node_state if node in purview else 0
for node, node_state in enumerate(purview_state))
return repertoire[index]
|
Compute the probability of the purview in its current state given
the repertoire.
Collapses the dimensions of the repertoire that correspond to the
purview nodes onto their state. All other dimension are already
singular and thus receive 0 as the conditioning index.
Returns:
float: A single probabilty.
|
28,690 |
def add_auth_hook(self, event):
self.log(, event.authenticator_name)
self.auth_hooks[event.authenticator_name] = event.event
|
Register event hook on reception of add_auth_hook-event
|
28,691 |
def F_oneway(*lists):
a = len(lists)
means = [0] * a
vars = [0] * a
ns = [0] * a
alldata = []
tmp = lists
means = map(mean, tmp)
vars = map(var, tmp)
ns = map(len, lists)
for i in range(len(lists)):
alldata = alldata + lists[i]
bign = len(alldata)
sstot = ss(alldata) - (square_of_sums(alldata) / float(bign))
ssbn = 0
for list in lists:
ssbn = ssbn + square_of_sums(list) / float(len(list))
ssbn = ssbn - (square_of_sums(alldata) / float(bign))
sswn = sstot - ssbn
dfbn = a - 1
dfwn = bign - a
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
f = msb / msw
prob = fprob(dfbn, dfwn, f)
return f, prob
|
Performs a 1-way ANOVA, returning an F-value and probability given
any number of groups. From Heiman, pp.394-7.
Usage: F_oneway(*lists) where *lists is any number of lists, one per
treatment group
Returns: F value, one-tailed p-value
|
28,692 |
def get_args(self):
for table in self.tables + self.with_tables:
if type(table) is QueryTable:
self._where.args.update(table.query.get_args())
return self._where.args
|
Gets the args for the query which will be escaped when being executed by the
db. All inner queries are inspected and their args are combined with this
query's args.
:return: all args for this query as a dict
:rtype: dict
|
28,693 |
def get_assignable_gradebook_ids(self, gradebook_id):
mgr = self._get_provider_manager(, local=True)
lookup_session = mgr.get_gradebook_lookup_session(proxy=self._proxy)
gradebooks = lookup_session.get_gradebooks()
id_list = []
for gradebook in gradebooks:
id_list.append(gradebook.get_id())
return IdList(id_list)
|
Gets a list of gradebooks including and under the given gradebook node in which any grade system can be assigned.
arg: gradebook_id (osid.id.Id): the ``Id`` of the
``Gradebook``
return: (osid.id.IdList) - list of assignable gradebook ``Ids``
raise: NullArgument - ``gradebook_id`` is ``null``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
|
28,694 |
def format_subject(subject):
subject_prefix_re = r
m = re.match(subject_prefix_re, subject, re.U)
prefix = u""
if subject.startswith():
prefix = u"[2]"
subject = subject[4:]
elif m is not None:
try:
num = int(m.group(1))
prefix = u"[%d]" % (num+1)
subject = subject[6+len(str(num)):]
except:
pass
return ugettext(u"Re%(prefix)s: %(subject)s") % {
: subject,
: prefix
}
|
Prepends 'Re:' to the subject. To avoid multiple 'Re:'s
a counter is added.
NOTE: Currently unused. First step to fix Issue #48.
FIXME: Any hints how to make this i18n aware are very welcome.
|
28,695 |
def get_window_size(self, windowHandle=):
command = Command.GET_WINDOW_SIZE
if self.w3c:
if windowHandle != :
warnings.warn("Only window is supported for W3C compatibile browsers.")
size = self.get_window_rect()
else:
size = self.execute(command, {: windowHandle})
if size.get(, None) is not None:
size = size[]
return {k: size[k] for k in (, )}
|
Gets the width and height of the current window.
:Usage:
::
driver.get_window_size()
|
28,696 |
def create_detector(self, detector):
resp = self._post(self._u(self._DETECTOR_ENDPOINT_SUFFIX),
data=detector)
resp.raise_for_status()
return resp.json()
|
Creates a new detector.
Args:
detector (object): the detector model object. Will be serialized as
JSON.
Returns:
dictionary of the response (created detector model).
|
28,697 |
def _set_fcoe(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=fcoe.fcoe, is_container=, presence=False, yang_name="fcoe", rest_name="fcoe", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__fcoe = t
if hasattr(self, ):
self._set()
|
Setter method for fcoe, mapped from YANG variable /hardware/custom_profile/kap_custom_profile/fcoe (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_fcoe is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fcoe() directly.
|
28,698 |
def SetFlushInterval(self, flush_interval):
self._flush_interval = flush_interval
logger.debug(.format(flush_interval))
|
Set the flush interval.
Args:
flush_interval (int): number of events to buffer before doing a bulk
insert.
|
28,699 |
def modelshort(self):
try:
name = SHORT_MODELNAMES[self.model]
if hasattr(self,):
name += .format(self.index)
return name
except KeyError:
raise KeyError( % self.model)
|
Short version of model name
Dictionary defined in ``populations.py``::
SHORT_MODELNAMES = {'Planets':'pl',
'EBs':'eb',
'HEBs':'heb',
'BEBs':'beb',
'Blended Planets':'bpl',
'Specific BEB':'sbeb',
'Specific HEB':'sheb'}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.