id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
11,010 | def display_method(*, boolean=None, ordering=None, description=None, empty_value=None):
"""
Conveniently add attributes to a display method::
@admin.display_method(
boolean=True,
ordering='-publish_date',
description='Is Published?',
)
def is_published(self, obj):
return obj.publish_date is not None
This is equivalent to setting the attributes on the method directly::
def is_published(self, obj):
return obj.publish_date is not None
is_published.boolean = True
is_published.admin_order_field = '-publish_date'
is_published.short_description = 'Is Published?'
"""
def decorator(f):
if boolean is not None:
f.boolean = boolean
if ordering is not None:
f.admin_order_field = ordering
if description is not None:
f.short_description = description
if empty_value is not None:
f.empty_value_display = empty_value
return f
return decorator
| def display_method(*, boolean=None, ordering=None, description=None, empty_value=None):
"""
Conveniently add attributes to a display function::
@admin.display_method(
boolean=True,
ordering='-publish_date',
description='Is Published?',
)
def is_published(self, obj):
return obj.publish_date is not None
This is equivalent to setting the attributes on the method directly::
def is_published(self, obj):
return obj.publish_date is not None
is_published.boolean = True
is_published.admin_order_field = '-publish_date'
is_published.short_description = 'Is Published?'
"""
def decorator(f):
if boolean is not None:
f.boolean = boolean
if ordering is not None:
f.admin_order_field = ordering
if description is not None:
f.short_description = description
if empty_value is not None:
f.empty_value_display = empty_value
return f
return decorator
|
34,700 | def test_pass_arguments_to_rasa_train(
stack_config_path, monkeypatch: MonkeyPatch
) -> None:
# Create parser
parser = argparse.ArgumentParser()
sub_parser = parser.add_subparsers()
interactive.add_subparser(sub_parser, [])
# Parse interactive command
args = parser.parse_args(["interactive", "--config", stack_config_path])
interactive._set_not_required_args(args)
# Mock actual training
mock = Mock(return_value=TrainingResult(code=0))
monkeypatch.setattr(rasa, "train", mock.method)
# If the `Namespace` object does not have all required fields this will throw
train.train(args)
# Assert `train` was actually called
mock.method.assert_called_once()
| def test_pass_arguments_to_rasa_train(
stack_config_path: Text, monkeypatch: MonkeyPatch
) -> None:
# Create parser
parser = argparse.ArgumentParser()
sub_parser = parser.add_subparsers()
interactive.add_subparser(sub_parser, [])
# Parse interactive command
args = parser.parse_args(["interactive", "--config", stack_config_path])
interactive._set_not_required_args(args)
# Mock actual training
mock = Mock(return_value=TrainingResult(code=0))
monkeypatch.setattr(rasa, "train", mock.method)
# If the `Namespace` object does not have all required fields this will throw
train.train(args)
# Assert `train` was actually called
mock.method.assert_called_once()
|
57,537 | def run(sockets): # pragma: no cover
pass
| def run(sockets):
pass # pragma: no cover
|
42,910 | def graph_embed(A, mean_photon=1.0, make_traceless=False, atol=1e-08):
r"""Embed a graph into a Gaussian state.
Given a graph in terms of a symmetric adjacency matrix
(in general with arbitrary complex off-diagonal and real diagonal entries),
returns the squeezing parameters and interferometer necessary for
creating the Gaussian state whose off-diagonal parts are proportional to that matrix.
Uses :func:`takagi`.
Args:
A (array[complex]): square, symmetric (weighted) adjacency matrix of the graph
max_mean_photon (float): It guarantees that the mean photon number in the pure Gaussian state
representing the graph satisfies :math:`\sum_i sinh(r_{i})^2 ==` ``mean_photon``.
make_traceless (bool): Removes the trace of the input matrix, by performing the transformation
:math:`\tilde{A} = A-\mathrm{tr}(A) \I/n`. This may reduce the amount of squeezing needed to encode
the graph.
rtol (float): relative tolerance used when checking if the input matrix is symmetric.
atol (float): absolute tolerance used when checking if the input matrix is symmetric.
Returns:
tuple[array, array]: squeezing parameters of the input
state to the interferometer, and the unitary matrix representing the interferometer
"""
(m, n) = A.shape
if m != n:
raise ValueError("The matrix is not square.")
# if not np.allclose(A, np.transpose(A), rtol=rtol, atol=atol)
# raise ValueError("The matrix is not symmetric.")
if make_traceless:
A = A - np.trace(A) * np.identity(n) / n
scale = find_scaling_adjacency_matrix(A, mean_photon)
A = scale * A
s, U = takagi(A, tol=atol)
vals = -np.arctanh(s)
return vals, U
| def graph_embed(A, mean_photon=1.0, make_traceless=False, atol=1e-08):
r"""Embed a graph into a Gaussian state.
Given a graph in terms of a symmetric adjacency matrix
(in general with arbitrary complex off-diagonal and real diagonal entries),
returns the squeezing parameters and interferometer necessary for
creating the Gaussian state whose off-diagonal parts are proportional to that matrix.
Uses :func:`takagi`.
Args:
A (array[complex]): square, symmetric (weighted) adjacency matrix of the graph
max_mean_photon (float): It guarantees that the mean photon number in the pure Gaussian state
representing the graph satisfies :math:`\sum_i sinh(r_{i})^2 ==` ``mean_photon``.
make_traceless (bool): Removes the trace of the input matrix, by performing the transformation
:math:`\tilde{A} = A-\mathrm{tr}(A) \I/n`. This may reduce the amount of squeezing needed to encode
the graph.
rtol (float): relative tolerance used when checking if the input matrix is symmetric.
atol (float): absolute tolerance used when checking if the input matrix is symmetric
Returns:
tuple[array, array]: squeezing parameters of the input
state to the interferometer, and the unitary matrix representing the interferometer
"""
(m, n) = A.shape
if m != n:
raise ValueError("The matrix is not square.")
# if not np.allclose(A, np.transpose(A), rtol=rtol, atol=atol)
# raise ValueError("The matrix is not symmetric.")
if make_traceless:
A = A - np.trace(A) * np.identity(n) / n
scale = find_scaling_adjacency_matrix(A, mean_photon)
A = scale * A
s, U = takagi(A, tol=atol)
vals = -np.arctanh(s)
return vals, U
|
58,217 | def is_error_code(status_code):
"""Returns a boolean representing whether or not a status code is an error code.
Error status codes by default are 500-599.
You may also enable custom error codes::
from ddtrace import config
config.http_server.error_statuses = '401-404,419'
Ranges and singular error codes are permitted and can be separated using commas.
"""
try:
error_str = config.http_server.error_statuses
error_ranges = error_str.split(",")
for error_range in error_ranges:
values = error_range.split("-")
min_code = int(values[0])
if len(values) == 2:
max_code = int(values[1])
else:
max_code = min_code
if min_code > max_code:
tmp = min_code
min_code = max_code
max_code = tmp
if min_code <= int(status_code) <= max_code:
return True
except AttributeError:
if 500 <= int(status_code) <= 599:
return True
return False
| def is_error_code(status_code):
"""Returns a boolean representing whether or not a status code is an error code.
Error status codes by default are 500-599.
You may also enable custom error codes::
from ddtrace import config
config.http_server.error_statuses = '401-404,419'
Ranges and singular error codes are permitted and can be separated using commas.
"""
try:
error_str = config.http_server.error_statuses
error_ranges = error_str.split(",")
for error_range in error_ranges:
values = error_range.split("-")
values = [int(v) for v in values]
if min(values) <= int(status_code) <= max(values):
return True
except AttributeError:
if 500 <= int(status_code) <= 599:
return True
return False
|
21,989 | def read_version():
fn = os.path.join(os.path.dirname(__file__), "sydent", "__init__.py")
with open(fn) as fp:
f = fp.read()
return re.search(r"^__version__ = \"(.*)\"", f).group(1)
| def read_version():
fn = os.path.join(os.path.dirname(__file__), "sydent", "__init__.py")
with open(fn) as fp:
f = fp.read()
return re.search(r'^__version__ = "(.*)"', f).group(1)
|
51,652 | def module(*args):
module_cmd = eval(_cmd_template) # So we can monkeypatch for testing
if args[0] in module_change_commands:
# Do the module manipulation, then output the environment in JSON
# and read the JSON back in the parent process to update os.environ
# For python, we use the same python running the Spack process, because
# we can guarantee its existence. We have to do some LD_LIBRARY_PATH
# shenanigans to ensure python will run.
# LD_LIBRARY_PATH under which Spack ran
os.environ['SPACK_LD_LIBRARY_PATH'] = spack.main.spack_ld_library_path
# suppress output from module function
module_cmd += ' >/dev/null;'
# Capture the new LD_LIBRARY_PATH after `module` was run
module_cmd += 'export SPACK_NEW_LD_LIBRARY_PATH="$LD_LIBRARY_PATH";'
# Set LD_LIBRARY_PATH to value at Spack startup time to ensure that
# python executable finds its libraries
module_cmd += 'LD_LIBRARY_PATH="$SPACK_LD_LIBRARY_PATH" '
# Execute the python command
module_cmd += '%s -c "%s";' % (sys.executable, py_cmd)
# If LD_LIBRARY_PATH was set after `module`, dump the old value because
# we have since corrupted it to ensure python would run.
# dump SPACKIGNORE as a placeholder for parsing if LD_LIBRARY_PATH null
module_cmd += 'echo "${SPACK_NEW_LD_LIBRARY_PATH:-SPACKIGNORE}"'
module_p = subprocess.Popen(module_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
executable="/bin/bash")
# Cray modules spit out warnings that we cannot supress.
# This hack skips to the last output (the environment)
env_out = str(module_p.communicate()[0].decode()).strip().split('\n')
# The environment dumped as json
env_json = env_out[-2]
# Either the uncorrupted $LD_LIBRARY_PATH or SPACKIGNORE
new_ld_library_path = env_out[-1]
# Update os.environ with new dict
env_dict = json.loads(env_json)
os.environ.clear()
os.environ.update(env_dict)
# Override restored LD_LIBRARY_PATH with pre-python value
if new_ld_library_path == 'SPACKIGNORE':
os.environ.pop('LD_LIBRARY_PATH', None)
else:
os.environ['LD_LIBRARY_PATH'] = new_ld_library_path
else:
# Simply execute commands that don't change state and return output
module_p = subprocess.Popen(module_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
executable="/bin/bash")
# Decode and str to return a string object in both python 2 and 3
return str(module_p.communicate()[0].decode())
| def module(*args):
module_cmd = eval(_cmd_template) # So we can monkeypatch for testing
if args[0] in module_change_commands:
# Do the module manipulation, then output the environment in JSON
# and read the JSON back in the parent process to update os.environ
# For python, we use the same python running the Spack process, because
# we can guarantee its existence. We have to do some LD_LIBRARY_PATH
# shenanigans to ensure python will run.
# LD_LIBRARY_PATH under which Spack ran
os.environ['SPACK_LD_LIBRARY_PATH'] = spack.main.spack_ld_library_path
# suppress output from module function
module_cmd += ' >/dev/null;'
# Capture the new LD_LIBRARY_PATH after `module` was run
module_cmd += 'export SPACK_NEW_LD_LIBRARY_PATH="$LD_LIBRARY_PATH";'
# Set LD_LIBRARY_PATH to value at Spack startup time to ensure that
# python executable finds its libraries
module_cmd += 'LD_LIBRARY_PATH="$SPACK_LD_LIBRARY_PATH" '
# Execute the python command
module_cmd += '%s -c "%s";' % (sys.executable, py_cmd)
# If LD_LIBRARY_PATH was set after `module`, dump the old value because
# we have since corrupted it to ensure python would run.
# dump SPACKIGNORE as a placeholder for parsing if LD_LIBRARY_PATH null
module_cmd += 'echo "${SPACK_NEW_LD_LIBRARY_PATH:-SPACKIGNORE}"'
module_p = subprocess.Popen(module_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
executable="/bin/bash")
# Cray modules spit out warnings that we cannot supress.
# This hack skips to the last output (the environment)
env_out = str(module_p.communicate()[0].decode()).strip().split('\n')
# The environment dumped as json
env_json = env_out[-2]
# Either the uncorrupted $LD_LIBRARY_PATH or SPACKIGNORE
new_ld_library_path = env_out[-1]
# Update os.environ with new dict
env_dict = json.loads(env_json)
os.environ.clear()
os.environ.update(env_dict)
# Override restored LD_LIBRARY_PATH with pre-python value
if new_ld_library_path == 'SPACKIGNORE':
os.environ.pop('LD_LIBRARY_PATH', None)
else:
os.environ['LD_LIBRARY_PATH'] = new_ld_library_path
else:
# Simply execute commands that don't change state and return output
module_p = subprocess.Popen(module_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
executable="/bin/bash")
# Decode and str to return a string object in both python 2 and 3
return str(module_p.communicate()[0].decode())
|
13,276 | def test_thread_save_draw():
shared_strategy = st.recursive(st.integers(), lambda s: st.lists(s, max_size=3))
errors = []
@given(data=st.data())
def test(data):
try:
data.draw(shared_strategy)
except Exception as exc:
errors.append(exc)
threads = []
for _ in range(2):
threads.append(threading.Thread(target=test))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
assert not errors
| def test_drawing_from_recursive_strategy_is_thread_safe():
shared_strategy = st.recursive(st.integers(), lambda s: st.lists(s, max_size=3))
errors = []
@given(data=st.data())
def test(data):
try:
data.draw(shared_strategy)
except Exception as exc:
errors.append(exc)
threads = []
for _ in range(2):
threads.append(threading.Thread(target=test))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
assert not errors
|
57,976 | def run_op_command(device: Union[Panorama, Firewall], cmd: str, **kwargs) -> Element:
result: Element = device.op(cmd, **kwargs)
if "status" in result:
if result.attrib.get("status") != "success":
raise OpCommandError
return result
| def run_op_command(device: Union[Panorama, Firewall], cmd: str, **kwargs) -> Element:
result: Element = device.op(cmd, **kwargs)
if "status" in result and result.attrib.get("status") != "success":
raise OpCommandError
if result.attrib.get("status") != "success":
raise OpCommandError
return result
|
57,782 | def get_value_to_set(args):
value = args.get('value')
apply_if_empty = strtobool(args.get('applyIfEmpty', 'false'))
if value is None or (apply_if_empty and is_value_empty(value)):
value = args.get('defaultValue')
if isinstance(value, list):
for i, item in enumerate(value):
value[i] = encode_string_results(item)
return value
else:
return encode_string_results(value)
| def get_value_to_set(args):
value = args.get('value')
apply_if_empty = strtobool(args.get('applyIfEmpty', 'false'))
if value is None or (apply_if_empty and is_value_empty(value)):
value = args.get('defaultValue')
if isinstance(value, list):
for key, item in enumerate(value):
value[key] = encode_string_results(item)
return value
else:
return encode_string_results(value)
|
4,553 | def permuted_ols(tested_vars, target_vars, confounding_vars=None,
model_intercept=True, n_perm=10000, two_sided_test=True,
random_state=None, n_jobs=1, verbose=0):
"""Massively univariate group analysis with permuted OLS.
Tested variates are independently fitted to target variates descriptors
(e.g. brain imaging signal) according to a linear model solved with an
Ordinary Least Squares criterion.
Confounding variates may be included in the model.
Permutation testing is used to assess the significance of the relationship
between the tested variates and the target variates [Anderson,
Winkler]. A max-type procedure is used to obtain family-wise
corrected p-values.
The specific permutation scheme implemented here is the one of
[Freedman & Lane]. Its has been demonstrated in [Anderson] that this
scheme conveys more sensitivity than alternative schemes. This holds
for neuroimaging applications, as discussed in details in [Winkler].
Permutations are performed on parallel computing units. Each of them
performs a fraction of permutations on the whole dataset. Thus, the max
t-score amongst data descriptors can be computed directly, which avoids
storing all the computed t-scores.
The variates should be given C-contiguous. target_vars are fortran-ordered
automatically to speed-up computations.
Parameters
----------
tested_vars : array-like, shape=(n_samples, n_regressors)
Explanatory variates, fitted and tested independently from each others.
target_vars : array-like, shape=(n_samples, n_descriptors)
fMRI data, trying to be explained by explanatory and confounding
variates.
confounding_vars : array-like, shape=(n_samples, n_covars)
Confounding variates (covariates), fitted but not tested.
If None, no confounding variate is added to the model
(except maybe a constant column according to the value of
`model_intercept`)
model_intercept : bool,
If True, a constant column is added to the confounding variates
unless the tested variate is already the intercept.
n_perm : int,
Number of permutations to perform.
Permutations are costly but the more are performed, the more precision
one gets in the p-values estimation.
two_sided_test : boolean,
If True, performs an unsigned t-test. Both positive and negative
effects are considered; the null hypothesis is that the effect is zero.
If False, only positive effects are considered as relevant. The null
hypothesis is that the effect is zero or negative.
random_state : int or None,
Seed for random number generator, to have the same permutations
in each computing units.
n_jobs : int,
Number of parallel workers.
If 0 is provided, all CPUs are used.
A negative number indicates that all the CPUs except (abs(n_jobs) - 1)
ones will be used.
verbose: int, optional
verbosity level (0 means no message).
Returns
-------
pvals : array-like, shape=(n_regressors, n_descriptors)
Negative log10 p-values associated with the significance test of the
n_regressors explanatory variates against the n_descriptors target
variates. Family-wise corrected p-values.
score_orig_data : numpy.ndarray, shape=(n_regressors, n_descriptors)
t-statistic associated with the significance test of the n_regressors
explanatory variates against the n_descriptors target variates.
The ranks of the scores into the h0 distribution correspond to the
p-values.
h0_fmax : array-like, shape=(n_perm, )
Distribution of the (max) t-statistic under the null hypothesis
(obtained from the permutations). Array is sorted.
References
----------
* Anderson, M. J. & Robinson, J. (2001).
Permutation tests for linear models.
Australian & New Zealand Journal of Statistics, 43(1), 75-88.
* Winkler, A. M. et al. (2014).
Permutation inference for the general linear model.
Neuroimage.
* Freedman, D. & Lane, D. (1983).
A nonstochastic interpretation of reported significance levels.
J. Bus. Econ. Stats., 1(4), 292-298
"""
# initialize the seed of the random generator
rng = check_random_state(random_state)
# check n_jobs (number of CPUs)
if n_jobs == 0: # invalid according to joblib's conventions
raise ValueError("'n_jobs == 0' is not a valid choice. "
"Please provide a positive number of CPUs, or -1 "
"for all CPUs, or a negative number (-i) for "
"'all but (i-1)' CPUs (joblib conventions).")
elif n_jobs < 0:
n_jobs = max(1, joblib.cpu_count() - int(n_jobs) + 1)
else:
n_jobs = min(n_jobs, joblib.cpu_count())
# make target_vars F-ordered to speed-up computation
if target_vars.ndim != 2:
raise ValueError("'target_vars' should be a 2D array. "
"An array with %d dimension%s was passed"
% (target_vars.ndim,
"s" if target_vars.ndim > 1 else ""))
target_vars = np.asfortranarray(target_vars) # efficient for chunking
n_descriptors = target_vars.shape[1]
if np.any(np.all(target_vars == 0, axis=0)):
warnings.warn("Some descriptors in 'target_vars' have zeros across all "
"samples. These descriptors will be ignored during null "
"distribution generation.")
# check explanatory variates dimensions
if tested_vars.ndim == 1:
tested_vars = np.atleast_2d(tested_vars).T
n_samples, n_regressors = tested_vars.shape
# check if explanatory variates is intercept (constant) or not
if (n_regressors == 1 and np.unique(tested_vars).size == 1):
intercept_test = True
else:
intercept_test = False
# optionally add intercept
if model_intercept and not intercept_test:
if confounding_vars is not None:
confounding_vars = np.hstack(
(confounding_vars, np.ones((n_samples, 1))))
else:
confounding_vars = np.ones((n_samples, 1))
### OLS regression on original data
if confounding_vars is not None:
# step 1: extract effect of covars from target vars
covars_orthonormalized = orthonormalize_matrix(confounding_vars)
if not covars_orthonormalized.flags['C_CONTIGUOUS']:
# useful to developer
warnings.warn('Confounding variates not C_CONTIGUOUS.')
covars_orthonormalized = np.ascontiguousarray(
covars_orthonormalized)
targetvars_normalized = normalize_matrix_on_axis(
target_vars).T # faster with F-ordered target_vars_chunk
if not targetvars_normalized.flags['C_CONTIGUOUS']:
# useful to developer
warnings.warn('Target variates not C_CONTIGUOUS.')
targetvars_normalized = np.ascontiguousarray(targetvars_normalized)
beta_targetvars_covars = np.dot(targetvars_normalized,
covars_orthonormalized)
targetvars_resid_covars = targetvars_normalized - np.dot(
beta_targetvars_covars, covars_orthonormalized.T)
targetvars_resid_covars = normalize_matrix_on_axis(
targetvars_resid_covars, axis=1)
# step 2: extract effect of covars from tested vars
testedvars_normalized = normalize_matrix_on_axis(tested_vars.T, axis=1)
beta_testedvars_covars = np.dot(testedvars_normalized,
covars_orthonormalized)
testedvars_resid_covars = testedvars_normalized - np.dot(
beta_testedvars_covars, covars_orthonormalized.T)
testedvars_resid_covars = normalize_matrix_on_axis(
testedvars_resid_covars, axis=1).T.copy()
else:
targetvars_resid_covars = normalize_matrix_on_axis(target_vars).T
testedvars_resid_covars = normalize_matrix_on_axis(tested_vars).copy()
covars_orthonormalized = None
# check arrays contiguousity (for the sake of code efficiency)
if not targetvars_resid_covars.flags['C_CONTIGUOUS']:
# useful to developer
warnings.warn('Target variates not C_CONTIGUOUS.')
targetvars_resid_covars = np.ascontiguousarray(targetvars_resid_covars)
if not testedvars_resid_covars.flags['C_CONTIGUOUS']:
# useful to developer
warnings.warn('Tested variates not C_CONTIGUOUS.')
testedvars_resid_covars = np.ascontiguousarray(testedvars_resid_covars)
# step 3: original regression (= regression on residuals + adjust t-score)
# compute t score for original data
scores_original_data = _t_score_with_covars_and_normalized_design(
testedvars_resid_covars, targetvars_resid_covars.T,
covars_orthonormalized)
if two_sided_test:
sign_scores_original_data = np.sign(scores_original_data)
scores_original_data = np.fabs(scores_original_data)
### Permutations
# parallel computing units perform a reduced number of permutations each
if n_perm > n_jobs:
n_perm_chunks = np.asarray([n_perm / n_jobs] * n_jobs, dtype=int)
n_perm_chunks[-1] += n_perm % n_jobs
elif n_perm > 0:
warnings.warn('The specified number of permutations is %d and '
'the number of jobs to be performed in parallel has '
'set to %s. This is incompatible so only %d jobs will '
'be running. You may want to perform more permutations '
'in order to take the most of the available computing '
'ressources.' % (n_perm, n_jobs, n_perm))
n_perm_chunks = np.ones(n_perm, dtype=int)
else: # 0 or negative number of permutations => original data scores only
if two_sided_test:
scores_original_data = (scores_original_data
* sign_scores_original_data)
return np.asarray([]), scores_original_data.T, np.asarray([])
# actual permutations, seeded from a random integer between 0 and maximum
# value represented by np.int32 (to have a large entropy).
ret = joblib.Parallel(n_jobs=n_jobs, verbose=verbose)(
joblib.delayed(_permuted_ols_on_chunk)(
scores_original_data, testedvars_resid_covars,
targetvars_resid_covars.T, thread_id + 1, covars_orthonormalized,
n_perm=n_perm, n_perm_chunk=n_perm_chunk,
intercept_test=intercept_test, two_sided_test=two_sided_test,
random_state=rng.randint(1, np.iinfo(np.int32).max - 1),
verbose=verbose)
for thread_id,n_perm_chunk in enumerate(n_perm_chunks))
# reduce results
scores_as_ranks_parts, h0_fmax_parts = zip(*ret)
h0_fmax = np.hstack((h0_fmax_parts))
scores_as_ranks = np.zeros((n_regressors, n_descriptors))
for scores_as_ranks_part in scores_as_ranks_parts:
scores_as_ranks += scores_as_ranks_part
# convert ranks into p-values
pvals = (n_perm + 1 - scores_as_ranks) / float(1 + n_perm)
# put back sign on scores if it was removed in the case of a two-sided test
# (useful to distinguish between positive and negative effects)
if two_sided_test:
scores_original_data = scores_original_data * sign_scores_original_data
return - np.log10(pvals), scores_original_data.T, h0_fmax[0]
| def permuted_ols(tested_vars, target_vars, confounding_vars=None,
model_intercept=True, n_perm=10000, two_sided_test=True,
random_state=None, n_jobs=1, verbose=0):
"""Massively univariate group analysis with permuted OLS.
Tested variates are independently fitted to target variates descriptors
(e.g. brain imaging signal) according to a linear model solved with an
Ordinary Least Squares criterion.
Confounding variates may be included in the model.
Permutation testing is used to assess the significance of the relationship
between the tested variates and the target variates [Anderson,
Winkler]. A max-type procedure is used to obtain family-wise
corrected p-values.
The specific permutation scheme implemented here is the one of
[Freedman & Lane]. Its has been demonstrated in [Anderson] that this
scheme conveys more sensitivity than alternative schemes. This holds
for neuroimaging applications, as discussed in details in [Winkler].
Permutations are performed on parallel computing units. Each of them
performs a fraction of permutations on the whole dataset. Thus, the max
t-score amongst data descriptors can be computed directly, which avoids
storing all the computed t-scores.
The variates should be given C-contiguous. target_vars are fortran-ordered
automatically to speed-up computations.
Parameters
----------
tested_vars : array-like, shape=(n_samples, n_regressors)
Explanatory variates, fitted and tested independently from each others.
target_vars : array-like, shape=(n_samples, n_descriptors)
fMRI data, trying to be explained by explanatory and confounding
variates.
confounding_vars : array-like, shape=(n_samples, n_covars)
Confounding variates (covariates), fitted but not tested.
If None, no confounding variate is added to the model
(except maybe a constant column according to the value of
`model_intercept`)
model_intercept : bool,
If True, a constant column is added to the confounding variates
unless the tested variate is already the intercept.
n_perm : int,
Number of permutations to perform.
Permutations are costly but the more are performed, the more precision
one gets in the p-values estimation.
two_sided_test : boolean,
If True, performs an unsigned t-test. Both positive and negative
effects are considered; the null hypothesis is that the effect is zero.
If False, only positive effects are considered as relevant. The null
hypothesis is that the effect is zero or negative.
random_state : int or None,
Seed for random number generator, to have the same permutations
in each computing units.
n_jobs : int,
Number of parallel workers.
If 0 is provided, all CPUs are used.
A negative number indicates that all the CPUs except (abs(n_jobs) - 1)
ones will be used.
verbose: int, optional
verbosity level (0 means no message).
Returns
-------
pvals : array-like, shape=(n_regressors, n_descriptors)
Negative log10 p-values associated with the significance test of the
n_regressors explanatory variates against the n_descriptors target
variates. Family-wise corrected p-values.
score_orig_data : numpy.ndarray, shape=(n_regressors, n_descriptors)
t-statistic associated with the significance test of the n_regressors
explanatory variates against the n_descriptors target variates.
The ranks of the scores into the h0 distribution correspond to the
p-values.
h0_fmax : array-like, shape=(n_perm, )
Distribution of the (max) t-statistic under the null hypothesis
(obtained from the permutations). Array is sorted.
References
----------
* Anderson, M. J. & Robinson, J. (2001).
Permutation tests for linear models.
Australian & New Zealand Journal of Statistics, 43(1), 75-88.
* Winkler, A. M. et al. (2014).
Permutation inference for the general linear model.
Neuroimage.
* Freedman, D. & Lane, D. (1983).
A nonstochastic interpretation of reported significance levels.
J. Bus. Econ. Stats., 1(4), 292-298
"""
# initialize the seed of the random generator
rng = check_random_state(random_state)
# check n_jobs (number of CPUs)
if n_jobs == 0: # invalid according to joblib's conventions
raise ValueError("'n_jobs == 0' is not a valid choice. "
"Please provide a positive number of CPUs, or -1 "
"for all CPUs, or a negative number (-i) for "
"'all but (i-1)' CPUs (joblib conventions).")
elif n_jobs < 0:
n_jobs = max(1, joblib.cpu_count() - int(n_jobs) + 1)
else:
n_jobs = min(n_jobs, joblib.cpu_count())
# make target_vars F-ordered to speed-up computation
if target_vars.ndim != 2:
raise ValueError("'target_vars' should be a 2D array. "
"An array with %d dimension%s was passed"
% (target_vars.ndim,
"s" if target_vars.ndim > 1 else ""))
target_vars = np.asfortranarray(target_vars) # efficient for chunking
n_descriptors = target_vars.shape[1]
if np.any(np.all(target_vars == 0, axis=0)):
warnings.warn("Some descriptors in 'target_vars' have zeros across all "
"samples. These descriptors will be ignored during null "
"distribution generation.")
# check explanatory variates dimensions
if tested_vars.ndim == 1:
tested_vars = np.atleast_2d(tested_vars).T
n_samples, n_regressors = tested_vars.shape
# check if explanatory variates is intercept (constant) or not
if (n_regressors == 1 and np.unique(tested_vars).size == 1):
intercept_test = True
else:
intercept_test = False
# optionally add intercept
if model_intercept and not intercept_test:
if confounding_vars is not None:
confounding_vars = np.hstack(
(confounding_vars, np.ones((n_samples, 1))))
else:
confounding_vars = np.ones((n_samples, 1))
### OLS regression on original data
if confounding_vars is not None:
# step 1: extract effect of covars from target vars
covars_orthonormalized = orthonormalize_matrix(confounding_vars)
if not covars_orthonormalized.flags['C_CONTIGUOUS']:
# useful to developer
warnings.warn('Confounding variates not C_CONTIGUOUS.')
covars_orthonormalized = np.ascontiguousarray(
covars_orthonormalized)
targetvars_normalized = normalize_matrix_on_axis(
target_vars).T # faster with F-ordered target_vars_chunk
if not targetvars_normalized.flags['C_CONTIGUOUS']:
# useful to developer
warnings.warn('Target variates not C_CONTIGUOUS.')
targetvars_normalized = np.ascontiguousarray(targetvars_normalized)
beta_targetvars_covars = np.dot(targetvars_normalized,
covars_orthonormalized)
targetvars_resid_covars = targetvars_normalized - np.dot(
beta_targetvars_covars, covars_orthonormalized.T)
targetvars_resid_covars = normalize_matrix_on_axis(
targetvars_resid_covars, axis=1)
# step 2: extract effect of covars from tested vars
testedvars_normalized = normalize_matrix_on_axis(tested_vars.T, axis=1)
beta_testedvars_covars = np.dot(testedvars_normalized,
covars_orthonormalized)
testedvars_resid_covars = testedvars_normalized - np.dot(
beta_testedvars_covars, covars_orthonormalized.T)
testedvars_resid_covars = normalize_matrix_on_axis(
testedvars_resid_covars, axis=1).T.copy()
else:
targetvars_resid_covars = normalize_matrix_on_axis(target_vars).T
testedvars_resid_covars = normalize_matrix_on_axis(tested_vars).copy()
covars_orthonormalized = None
# check arrays contiguousity (for the sake of code efficiency)
if not targetvars_resid_covars.flags['C_CONTIGUOUS']:
# useful to developer
warnings.warn('Target variates not C_CONTIGUOUS.')
targetvars_resid_covars = np.ascontiguousarray(targetvars_resid_covars)
if not testedvars_resid_covars.flags['C_CONTIGUOUS']:
# useful to developer
warnings.warn('Tested variates not C_CONTIGUOUS.')
testedvars_resid_covars = np.ascontiguousarray(testedvars_resid_covars)
# step 3: original regression (= regression on residuals + adjust t-score)
# compute t score for original data
scores_original_data = _t_score_with_covars_and_normalized_design(
testedvars_resid_covars, targetvars_resid_covars.T,
covars_orthonormalized)
if two_sided_test:
sign_scores_original_data = np.sign(scores_original_data)
scores_original_data = np.fabs(scores_original_data)
### Permutations
# parallel computing units perform a reduced number of permutations each
if n_perm > n_jobs:
n_perm_chunks = np.asarray([n_perm / n_jobs] * n_jobs, dtype=int)
n_perm_chunks[-1] += n_perm % n_jobs
elif n_perm > 0:
warnings.warn('The specified number of permutations is %d and '
'the number of jobs to be performed in parallel has '
'set to %s. This is incompatible so only %d jobs will '
'be running. You may want to perform more permutations '
'in order to take the most of the available computing '
'ressources.' % (n_perm, n_jobs, n_perm))
n_perm_chunks = np.ones(n_perm, dtype=int)
else: # 0 or negative number of permutations => original data scores only
if two_sided_test:
scores_original_data = (scores_original_data
* sign_scores_original_data)
return np.asarray([]), scores_original_data.T, np.asarray([])
# actual permutations, seeded from a random integer between 0 and maximum
# value represented by np.int32 (to have a large entropy).
ret = joblib.Parallel(n_jobs=n_jobs, verbose=verbose)(
joblib.delayed(_permuted_ols_on_chunk)(
scores_original_data, testedvars_resid_covars,
targetvars_resid_covars.T, thread_id + 1, covars_orthonormalized,
n_perm=n_perm, n_perm_chunk=n_perm_chunk,
intercept_test=intercept_test, two_sided_test=two_sided_test,
random_state=rng.randint(1, np.iinfo(np.int32).max - 1),
verbose=verbose)
for thread_id, n_perm_chunk in enumerate(n_perm_chunks))
# reduce results
scores_as_ranks_parts, h0_fmax_parts = zip(*ret)
h0_fmax = np.hstack((h0_fmax_parts))
scores_as_ranks = np.zeros((n_regressors, n_descriptors))
for scores_as_ranks_part in scores_as_ranks_parts:
scores_as_ranks += scores_as_ranks_part
# convert ranks into p-values
pvals = (n_perm + 1 - scores_as_ranks) / float(1 + n_perm)
# put back sign on scores if it was removed in the case of a two-sided test
# (useful to distinguish between positive and negative effects)
if two_sided_test:
scores_original_data = scores_original_data * sign_scores_original_data
return - np.log10(pvals), scores_original_data.T, h0_fmax[0]
|
2,291 | def _make_array_out(X_out, index, get_feature_names_out, *,
array_out="default"):
"""Construct array container based on global configuration.
Parameters
----------
X_out: {ndarray, sparse matrix} of shape (n_samples, n_features_out)
Output data to be wrapped.
index: array-like of shape (n_samples,)
Index of output data.
get_features_names_out: callable
Returns the feature names out. If the callable returns None, then
the feature names will be ["X0", "X1", ...].
array_out : {"default", "pandas"}, default="default"
Specify the output array type. If "pandas", a pandas DataFrame is
returned. If "default", an array-like without feature names is
returned.
Return
------
array_out: {ndarray, sparse matrix, dataframe} of shape \
(n_samples, n_features_out)
Wrapped array with feature names.
"""
if array_out not in {'default', 'pandas'}:
raise ValueError("array_out must be 'default' or 'pandas'")
if array_out == "default":
return X_out
feature_names_out = get_feature_names_out()
if feature_names_out is None:
feature_names_out = [f'X{i}' for i in range(X_out.shape[1])]
# array_out == "pandas"
import pandas as pd
if sp_sparse.issparse(X_out):
make_dataframe = pd.DataFrame.sparse.from_spmatrix
else:
make_dataframe = pd.DataFrame
return make_dataframe(X_out, columns=feature_names_out, index=index)
| def _make_array_out(X_out, index, get_feature_names_out, *,
array_out="default"):
"""Construct array container based on the value of `array_out`.
Parameters
----------
X_out: {ndarray, sparse matrix} of shape (n_samples, n_features_out)
Output data to be wrapped.
index: array-like of shape (n_samples,)
Index of output data.
get_features_names_out: callable
Returns the feature names out. If the callable returns None, then
the feature names will be ["X0", "X1", ...].
array_out : {"default", "pandas"}, default="default"
Specify the output array type. If "pandas", a pandas DataFrame is
returned. If "default", an array-like without feature names is
returned.
Return
------
array_out: {ndarray, sparse matrix, dataframe} of shape \
(n_samples, n_features_out)
Wrapped array with feature names.
"""
if array_out not in {'default', 'pandas'}:
raise ValueError("array_out must be 'default' or 'pandas'")
if array_out == "default":
return X_out
feature_names_out = get_feature_names_out()
if feature_names_out is None:
feature_names_out = [f'X{i}' for i in range(X_out.shape[1])]
# array_out == "pandas"
import pandas as pd
if sp_sparse.issparse(X_out):
make_dataframe = pd.DataFrame.sparse.from_spmatrix
else:
make_dataframe = pd.DataFrame
return make_dataframe(X_out, columns=feature_names_out, index=index)
|
44,578 | def merge_fields(a, b):
"""Merge ECS field sets with custom field sets."""
a = copy.deepcopy(a)
b = copy.deepcopy(b)
for key in b:
if key not in a:
a[key] = b[key]
continue
# merge field details
if 'normalize' in b[key]['field_details']:
a[key].setdefault('field_details', {})
a[key]['field_details'].setdefault('normalize', [])
a[key]['field_details']['normalize'].extend(b[key]['field_details'].pop('normalize'))
if 'multi_fields' in b[key]['field_details']:
a[key].setdefault('field_details', {})
a[key]['field_details'].setdefault('multi_fields', set())
a[key]['field_details']['multi_fields'] = dedup_and_merge_lists(
a[key]['field_details']['multi_fields'], b[key]['field_details']['multi_fields'])
# if we don't do this then the update call below will overwrite a's field_details, with the original
# contents of b, which undoes our merging the multi_fields
del b[key]['field_details']['multi_fields']
a[key]['field_details'].update(b[key]['field_details'])
# merge schema details
if 'schema_details' in b[key]:
asd = a[key]['schema_details']
bsd = b[key]['schema_details']
if 'reusable' in b[key]['schema_details']:
asd.setdefault('reusable', {})
if 'top_level' in bsd['reusable']:
asd['reusable']['top_level'] = bsd['reusable']['top_level']
else:
asd['reusable'].setdefault('top_level', True)
if 'order' in bsd['reusable']:
asd['reusable']['order'] = bsd['reusable']['order']
asd['reusable'].setdefault('expected', [])
asd['reusable']['expected'].extend(bsd['reusable']['expected'])
bsd.pop('reusable')
asd.update(bsd)
# merge nested fields
if 'fields' in b[key]:
a[key].setdefault('fields', {})
a[key]['fields'] = merge_fields(a[key]['fields'], b[key]['fields'])
return a
| def merge_fields(a, b):
"""Merge ECS field sets with custom field sets."""
a = copy.deepcopy(a)
b = copy.deepcopy(b)
for key in b:
if key not in a:
a[key] = b[key]
continue
# merge field details
if 'normalize' in b[key]['field_details']:
a[key].setdefault('field_details', {})
a[key]['field_details'].setdefault('normalize', [])
a[key]['field_details']['normalize'].extend(b[key]['field_details'].pop('normalize'))
if 'multi_fields' in b[key]['field_details']:
a[key].setdefault('field_details', {})
a[key]['field_details'].setdefault('multi_fields', [])
a[key]['field_details']['multi_fields'] = dedup_and_merge_lists(
a[key]['field_details']['multi_fields'], b[key]['field_details']['multi_fields'])
# if we don't do this then the update call below will overwrite a's field_details, with the original
# contents of b, which undoes our merging the multi_fields
del b[key]['field_details']['multi_fields']
a[key]['field_details'].update(b[key]['field_details'])
# merge schema details
if 'schema_details' in b[key]:
asd = a[key]['schema_details']
bsd = b[key]['schema_details']
if 'reusable' in b[key]['schema_details']:
asd.setdefault('reusable', {})
if 'top_level' in bsd['reusable']:
asd['reusable']['top_level'] = bsd['reusable']['top_level']
else:
asd['reusable'].setdefault('top_level', True)
if 'order' in bsd['reusable']:
asd['reusable']['order'] = bsd['reusable']['order']
asd['reusable'].setdefault('expected', [])
asd['reusable']['expected'].extend(bsd['reusable']['expected'])
bsd.pop('reusable')
asd.update(bsd)
# merge nested fields
if 'fields' in b[key]:
a[key].setdefault('fields', {})
a[key]['fields'] = merge_fields(a[key]['fields'], b[key]['fields'])
return a
|
35,041 | def batch_matmul(
tensor_a,
tensor_b,
oshape=None,
out_dtype=None,
transpose_a=False,
transpose_b=True,
auto_scheduler_rewritten_layout="",
):
"""Computes batch matrix multiplication of `A` and `B` when `A` and `B` are
data in batch. Supports broadcasting for batch dimension.
The A & B can be transposed. For legacy reason, we use NT format(tensor_a non-transposed
and tensor_b transposed) by default.
Parameters
----------
tensor_a : tvm.te.Tensor
3-D with shape [batch, M, K] or [batch, K, M]
tensor_b : tvm.te.Tensor
3-D with shape [batch, K, N] or [batch, N, K]
oshape : List[Optional]
Explicit intended output shape of the computation. Can be useful in cases
with dynamic input shapes.
auto_scheduler_rewritten_layout: Optional[str] = ""
The layout after auto-scheduler's layout rewrite pass.
out_dtype : Optional[str]
Specifies the output data type for mixed precision batch matmul
transpose_a : Optional[bool] = False
Whether the data tensor is in transposed format.
transpose_b : Optional[bool] = True
Whether the weight tensor is in transposed format.
Returns
-------
output : tvm.te.Tensor
3-D with shape [batch, M, N]
"""
assert len(tensor_a.shape) == 3, "only support 3-dim batch_matmul"
if transpose_a:
XB, XK, XI = get_const_tuple(tensor_a.shape)
else:
XB, XI, XK = get_const_tuple(tensor_a.shape)
if auto_scheduler_rewritten_layout:
# Infer shape for the rewritten layout
YB, YK, YJ = auto_scheduler.get_shape_from_rewritten_layout(
auto_scheduler_rewritten_layout, ["b", "k", "j"]
)
auto_scheduler.remove_index_check(tensor_b)
else:
assert len(tensor_b.shape) == 3, "only support 3-dim batch_matmul"
if transpose_b:
YB, YJ, YK = get_const_tuple(tensor_b.shape)
else:
YB, YK, YJ = get_const_tuple(tensor_b.shape)
assert XK == YK or isinstance(YK, tvm.tir.expr.Var), "shapes of x and y is inconsistent"
k = te.reduce_axis((0, XK), name="k")
if oshape is None:
assert XB == YB or XB == 1 or YB == 1, "batch dimension doesn't match"
batch = (
tvm.tir.Any()
if isinstance(XB, tvm.tir.expr.Var) or isinstance(YB, tvm.tir.expr.Var)
else te.max(XB, YB)
)
oshape = (batch, XI, YJ)
if out_dtype is None:
out_dtype = tensor_a.dtype
if (transpose_a, transpose_b) == (True, True):
compute_lambda = lambda b, i, j: te.sum(
tensor_a[b if XB != 1 else 0, k, i].astype(out_dtype)
* tensor_b[b if YB != 1 else 0, j, k].astype(out_dtype),
axis=k,
)
compute_name = "T_batch_matmul_TT"
elif (transpose_a, transpose_b) == (True, False):
compute_lambda = lambda b, i, j: te.sum(
tensor_a[b if XB != 1 else 0, k, i].astype(out_dtype)
* tensor_b[b if YB != 1 else 0, k, j].astype(out_dtype),
axis=k,
)
compute_name = "T_batch_matmul_TN"
elif (transpose_a, transpose_b) == (False, True):
compute_lambda = lambda b, i, j: te.sum(
tensor_a[b if XB != 1 else 0, i, k].astype(out_dtype)
* tensor_b[b if YB != 1 else 0, j, k].astype(out_dtype),
axis=k,
)
compute_name = "T_batch_matmul_NT"
else: # (transpose_a, transpose_b) == (False, False):
compute_lambda = lambda b, i, j: te.sum(
tensor_a[b if XB != 1 else 0, i, k].astype(out_dtype)
* tensor_b[b if YB != 1 else 0, k, j].astype(out_dtype),
axis=k,
)
compute_name = "T_batch_matmul_NN"
output = te.compute(
oshape,
compute_lambda,
name=compute_name,
tag="batch_matmul",
attrs={"layout_free_placeholders": [tensor_b]},
)
if auto_scheduler_rewritten_layout:
output = auto_scheduler.rewrite_compute_body(output, auto_scheduler_rewritten_layout)
return output
| def batch_matmul(
tensor_a,
tensor_b,
oshape=None,
out_dtype=None,
transpose_a=False,
transpose_b=True,
auto_scheduler_rewritten_layout="",
):
"""Computes batch matrix multiplication of `A` and `B` when `A` and `B` are
data in batch. Supports broadcasting for batch dimension.
The A & B can be transposed. For legacy reason, we use NT format(tensor_a non-transposed
and tensor_b transposed) by default.
Parameters
----------
tensor_a : tvm.te.Tensor
3-D with shape [batch, M, K] or [batch, K, M]
tensor_b : tvm.te.Tensor
3-D with shape [batch, K, N] or [batch, N, K]
oshape : List[Optional]
Explicit intended output shape of the computation. Can be useful in cases
with dynamic input shapes.
auto_scheduler_rewritten_layout: Optional[str] = ""
The layout after auto-scheduler's layout rewrite pass.
out_dtype : Optional[str]
Specifies the output data type for mixed precision batch matmul
transpose_a : Optional[bool] = False
Whether the data tensor is in transposed format.
transpose_b : Optional[bool] = True
Whether the weight tensor is in transposed format.
Returns
-------
output : tvm.te.Tensor
3-D with shape [batch, M, N]
"""
assert len(tensor_a.shape) == 3, "only support 3-dim batch_matmul"
if transpose_a:
XB, XK, XI = get_const_tuple(tensor_a.shape)
else:
XB, XI, XK = get_const_tuple(tensor_a.shape)
if auto_scheduler_rewritten_layout:
# Infer shape for the rewritten layout
YB, YK, YJ = auto_scheduler.get_shape_from_rewritten_layout(
auto_scheduler_rewritten_layout, ["b", "k", "j"]
)
auto_scheduler.remove_index_check(tensor_b)
else:
assert len(tensor_b.shape) == 3, "only support 3-dim batch_matmul"
if transpose_b:
YB, YJ, YK = get_const_tuple(tensor_b.shape)
else:
YB, YK, YJ = get_const_tuple(tensor_b.shape)
assert XK == YK or isinstance(YK, tvm.tir.expr.Var), "shapes of x and y are inconsistent"
k = te.reduce_axis((0, XK), name="k")
if oshape is None:
assert XB == YB or XB == 1 or YB == 1, "batch dimension doesn't match"
batch = (
tvm.tir.Any()
if isinstance(XB, tvm.tir.expr.Var) or isinstance(YB, tvm.tir.expr.Var)
else te.max(XB, YB)
)
oshape = (batch, XI, YJ)
if out_dtype is None:
out_dtype = tensor_a.dtype
if (transpose_a, transpose_b) == (True, True):
compute_lambda = lambda b, i, j: te.sum(
tensor_a[b if XB != 1 else 0, k, i].astype(out_dtype)
* tensor_b[b if YB != 1 else 0, j, k].astype(out_dtype),
axis=k,
)
compute_name = "T_batch_matmul_TT"
elif (transpose_a, transpose_b) == (True, False):
compute_lambda = lambda b, i, j: te.sum(
tensor_a[b if XB != 1 else 0, k, i].astype(out_dtype)
* tensor_b[b if YB != 1 else 0, k, j].astype(out_dtype),
axis=k,
)
compute_name = "T_batch_matmul_TN"
elif (transpose_a, transpose_b) == (False, True):
compute_lambda = lambda b, i, j: te.sum(
tensor_a[b if XB != 1 else 0, i, k].astype(out_dtype)
* tensor_b[b if YB != 1 else 0, j, k].astype(out_dtype),
axis=k,
)
compute_name = "T_batch_matmul_NT"
else: # (transpose_a, transpose_b) == (False, False):
compute_lambda = lambda b, i, j: te.sum(
tensor_a[b if XB != 1 else 0, i, k].astype(out_dtype)
* tensor_b[b if YB != 1 else 0, k, j].astype(out_dtype),
axis=k,
)
compute_name = "T_batch_matmul_NN"
output = te.compute(
oshape,
compute_lambda,
name=compute_name,
tag="batch_matmul",
attrs={"layout_free_placeholders": [tensor_b]},
)
if auto_scheduler_rewritten_layout:
output = auto_scheduler.rewrite_compute_body(output, auto_scheduler_rewritten_layout)
return output
|
10,278 | def main():
friendly_names = {
'lvm': 'LVM2_member',
}
fstypes = set(FILESYSTEMS.keys()) - set(friendly_names.values()) | set(friendly_names.keys())
# There is no "single command" to manipulate filesystems, so we map them all out and their options
module = AnsibleModule(
argument_spec=dict(
fstype=dict(required=True, aliases=['type'],
choices=list(fstypes)),
dev=dict(required=True, aliases=['device']),
opts=dict(),
force=dict(type='bool', default=False),
resizefs=dict(type='bool', default=False),
mount_dir=dict(default=False)
),
supports_check_mode=True,
)
dev = module.params['dev']
fstype = module.params['fstype']
opts = module.params['opts']
force = module.params['force']
resizefs = module.params['resizefs']
mount_dir = module.params['mount_dir']
if fstype in friendly_names:
fstype = friendly_names[fstype]
changed = False
try:
klass = FILESYSTEMS[fstype]
except KeyError:
module.fail_json(changed=False, msg="module does not support this filesystem (%s) yet." % fstype)
if not os.path.exists(dev):
module.fail_json(msg="Device %s not found." % dev)
dev = Device(module, dev)
cmd = module.get_bin_path('blkid', required=True)
rc, raw_fs, err = module.run_command("%s -c /dev/null -o value -s TYPE %s" % (cmd, dev))
# In case blkid isn't able to identify an existing filesystem, device is considered as empty,
# then this existing filesystem would be overwritten even if force isn't enabled.
fs = raw_fs.strip()
filesystem = klass(module)
same_fs = fs and FILESYSTEMS.get(fs) == FILESYSTEMS[fstype]
if same_fs and not resizefs and not force:
module.exit_json(changed=False)
elif same_fs and resizefs:
if not filesystem.GROW:
module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % fstype)
out = filesystem.grow(dev, mount_dir=mount_dir)
module.exit_json(changed=True, msg=out)
elif fs and not force:
module.fail_json(msg="'%s' is already used as %s, use force=yes to overwrite" % (dev, fs), rc=rc, err=err)
# create fs
filesystem.create(opts, dev)
changed = True
module.exit_json(changed=changed)
| def main():
friendly_names = {
'lvm': 'LVM2_member',
}
fstypes = set(FILESYSTEMS.keys()) - set(friendly_names.values()) | set(friendly_names.keys())
# There is no "single command" to manipulate filesystems, so we map them all out and their options
module = AnsibleModule(
argument_spec=dict(
fstype=dict(required=True, aliases=['type'],
choices=list(fstypes)),
dev=dict(required=True, aliases=['device']),
opts=dict(),
force=dict(type='bool', default=False),
resizefs=dict(type='bool', default=False),
mount_dir=dict()
),
supports_check_mode=True,
)
dev = module.params['dev']
fstype = module.params['fstype']
opts = module.params['opts']
force = module.params['force']
resizefs = module.params['resizefs']
mount_dir = module.params['mount_dir']
if fstype in friendly_names:
fstype = friendly_names[fstype]
changed = False
try:
klass = FILESYSTEMS[fstype]
except KeyError:
module.fail_json(changed=False, msg="module does not support this filesystem (%s) yet." % fstype)
if not os.path.exists(dev):
module.fail_json(msg="Device %s not found." % dev)
dev = Device(module, dev)
cmd = module.get_bin_path('blkid', required=True)
rc, raw_fs, err = module.run_command("%s -c /dev/null -o value -s TYPE %s" % (cmd, dev))
# In case blkid isn't able to identify an existing filesystem, device is considered as empty,
# then this existing filesystem would be overwritten even if force isn't enabled.
fs = raw_fs.strip()
filesystem = klass(module)
same_fs = fs and FILESYSTEMS.get(fs) == FILESYSTEMS[fstype]
if same_fs and not resizefs and not force:
module.exit_json(changed=False)
elif same_fs and resizefs:
if not filesystem.GROW:
module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % fstype)
out = filesystem.grow(dev, mount_dir=mount_dir)
module.exit_json(changed=True, msg=out)
elif fs and not force:
module.fail_json(msg="'%s' is already used as %s, use force=yes to overwrite" % (dev, fs), rc=rc, err=err)
# create fs
filesystem.create(opts, dev)
changed = True
module.exit_json(changed=changed)
|
55,355 | def split_dataset(dataset, val_data_fraction, test_data_fraction=0):
"""
Splits a dataset of type tf.data.Dataset into a training, validation, and
optionally test dataset using given ratios. Fractions are rounded up to
two decimal places.
Inspired by: https://stackoverflow.com/a/59696126
Args:
dataset (tf.data.Dataset): the input dataset to split.
val_data_fraction (float): the fraction of the validation data
between 0 and 1.
test_data_fraction (float): the fraction of the test data between 0
and 1.
Returns:
(tf.data.Dataset, tf.data.Dataset, tf.data.Dataset): a tuple of
(training, validation, test).
"""
val_data_percent = round(val_data_fraction * 100)
if not 0 <= val_data_percent <= 100:
raise ValueError('val_data_fraction must be ∈ [0,1].')
test_data_percent = round(test_data_fraction * 100)
if not 0 <= test_data_percent <= 100:
raise ValueError('test_data_fraction must be ∈ [0,1].')
if val_data_percent + test_data_percent >= 100:
raise ValueError('sum of val_data_fraction and '
+ 'test_data_fraction must be ∈ [0,1].')
dataset = dataset.enumerate()
val_dataset = dataset.filter(lambda f, data: f % 100 <= val_data_percent)
train_dataset = dataset.filter(lambda f, data:
f % 100 > test_data_percent + val_data_percent)
test_dataset = dataset.filter(lambda f, data: f % 100 > val_data_percent and
f % 100 <= val_data_percent + test_data_percent)
# remove enumeration
train_dataset = train_dataset.map(lambda f, data: data)
val_dataset = val_dataset.map(lambda f, data: data)
test_dataset = test_dataset.map(lambda f, data: data)
return train_dataset, val_dataset, test_dataset
| def split_dataset(dataset, val_data_fraction, test_data_fraction=0):
"""
Splits a dataset of type tf.data.Dataset into a training, validation, and
optionally test dataset using given ratios. Fractions are rounded up to
two decimal places.
Inspired by: https://stackoverflow.com/a/59696126
Args:
dataset (tf.data.Dataset): the input dataset to split.
val_data_fraction (float): the fraction of the validation data
between 0 and 1.
test_data_fraction (float): the fraction of the test data between 0
and 1.
Returns:
(tf.data.Dataset, tf.data.Dataset, tf.data.Dataset): a tuple of
(training, validation, test).
"""
val_data_percent = round(val_data_fraction * 100)
if not 0 <= val_data_percent <= 100:
raise ValueError('val_data_fraction must be ∈ [0,1].')
test_data_percent = round(test_data_fraction * 100)
if not 0 <= test_data_percent <= 100:
raise ValueError('test_data_fraction must be ∈ [0,1].')
if val_data_percent + test_data_percent >= 100:
raise ValueError('sum of val_data_fraction and '
'test_data_fraction must be ∈ [0,1].')
dataset = dataset.enumerate()
val_dataset = dataset.filter(lambda f, data: f % 100 <= val_data_percent)
train_dataset = dataset.filter(lambda f, data:
f % 100 > test_data_percent + val_data_percent)
test_dataset = dataset.filter(lambda f, data: f % 100 > val_data_percent and
f % 100 <= val_data_percent + test_data_percent)
# remove enumeration
train_dataset = train_dataset.map(lambda f, data: data)
val_dataset = val_dataset.map(lambda f, data: data)
test_dataset = test_dataset.map(lambda f, data: data)
return train_dataset, val_dataset, test_dataset
|
31,821 | def list_host_action(action):
if action == 'mute':
# If the end time is in the past no error is returned and the host is not muted. Results returned are the same.
data = {
"message": str(demisto.args()['message']),
"end": int(demisto.args()['end'])
}
res = http_request('POST', '/api/v1/host/' + str(demisto.args()['hostname']) + '/mute', DATA=data)
else:
res = http_request('POST', '/api/v1/host/' + str(demisto.args()['hostname']) + '/unmute')
if res.get('action'):
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': res,
'HumanReadable': tableToMarkdown("Action Result", res)})
else:
demisto.results("No hosts affected")
| def list_host_action(action):
if action == 'mute':
# If the end time is in the past no error is returned and the host is not muted. Results returned are the same.
data = {
"message": str(demisto.args()['message']),
"end": int(demisto.args().get('end'))
}
res = http_request('POST', '/api/v1/host/' + str(demisto.args()['hostname']) + '/mute', DATA=data)
else:
res = http_request('POST', '/api/v1/host/' + str(demisto.args()['hostname']) + '/unmute')
if res.get('action'):
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': res,
'HumanReadable': tableToMarkdown("Action Result", res)})
else:
demisto.results("No hosts affected")
|
24,642 | def _vspace_iterator(vspace, MAX_ITERATIONS=500, err=1e-10):
r"""
Returns an array of null point object, representing
the null points of the given vector space.
Parameters
----------
vspace: array_like
The vector space as constructed by the vector_space function which is
A 1 by 3 array with the first element containing the coordinates,
the second element containing the vector values,
and the third element containing the delta values for each dimension.
MAX_ITERATIONS: int
The maximum iterations of the Newton-Raphson method.
The default value is 500.
err: float
The threshold/error that determines if convergence has occured
using the Newton-Raphson method.
The default value is ``1e-10``.
Returns
-------
array_like of `~plasmapy.analysis.nullpoint.NullPoint`
An array of NullPoint objects representing the nullpoints
of the given vector space.
"""
nullpoints = []
for i in range(len(vspace[0][0]) - 1):
for j in range(len(vspace[0][0][0]) - 1):
for k in range(len(vspace[0][0][0][0]) - 1):
if _reduction(vspace, [i, j, k]):
if _trilinear_analysis(vspace, [i, j, k]):
loc = _locate_null_point(vspace, [i, j, k], MAX_ITERATIONS, err)
if loc is not None:
p = NullPoint(loc, "N/A")
if p not in nullpoints:
nullpoints.append(p)
return nullpoints
| def _vspace_iterator(vspace, MAX_ITERATIONS=500, err=1e-10):
r"""
Returns an array of null point objects, representing
the null points of the given vector space.
Parameters
----------
vspace: array_like
The vector space as constructed by the vector_space function which is
A 1 by 3 array with the first element containing the coordinates,
the second element containing the vector values,
and the third element containing the delta values for each dimension.
MAX_ITERATIONS: int
The maximum iterations of the Newton-Raphson method.
The default value is 500.
err: float
The threshold/error that determines if convergence has occured
using the Newton-Raphson method.
The default value is ``1e-10``.
Returns
-------
array_like of `~plasmapy.analysis.nullpoint.NullPoint`
An array of NullPoint objects representing the nullpoints
of the given vector space.
"""
nullpoints = []
for i in range(len(vspace[0][0]) - 1):
for j in range(len(vspace[0][0][0]) - 1):
for k in range(len(vspace[0][0][0][0]) - 1):
if _reduction(vspace, [i, j, k]):
if _trilinear_analysis(vspace, [i, j, k]):
loc = _locate_null_point(vspace, [i, j, k], MAX_ITERATIONS, err)
if loc is not None:
p = NullPoint(loc, "N/A")
if p not in nullpoints:
nullpoints.append(p)
return nullpoints
|
47,094 | def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str:
"""
Formats a user-agent string with basic info about a request.
"""
ua = "transformers/{}; python/{}".format(__version__, sys.version.split()[0])
if is_torch_available():
ua += f"; torch/{_torch_version}"
if is_tf_available():
ua += f"; tensorflow/{_tf_version}"
if is_training_run_on_sagemaker():
ua += "; " + "; ".join("{}/{}".format(k, v) for k, v in define_sagemaker_information().items())
if isinstance(user_agent, dict):
ua += "; " + "; ".join("{}/{}".format(k, v) for k, v in user_agent.items())
elif isinstance(user_agent, str):
ua += "; " + user_agent
return ua
| def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str:
"""
Formats a user-agent string with basic info about a request.
"""
ua = "transformers/{}; python/{}".format(__version__, sys.version.split()[0])
if is_torch_available():
ua += f"; torch/{_torch_version}"
if is_tf_available():
ua += f"; tensorflow/{_tf_version}"
if is_training_run_on_sagemaker():
ua += "; " + "; ".join(f"{k}/{v}" for k, v in define_sagemaker_information().items())
if isinstance(user_agent, dict):
ua += "; " + "; ".join("{}/{}".format(k, v) for k, v in user_agent.items())
elif isinstance(user_agent, str):
ua += "; " + user_agent
return ua
|
18,536 | def _createtarball(env, spec_yaml=None, packages=None, add_spec=True,
add_deps=True, output_location=os.getcwd(),
signing_key=None, force=False, make_relative=False,
unsigned=False, allow_root=False, rebuild_index=False):
if spec_yaml:
packages = set()
with open(spec_yaml, 'r') as fd:
yaml_text = fd.read()
tty.debug('createtarball read spec yaml:')
tty.debug(yaml_text)
s = Spec.from_yaml(yaml_text)
packages.add('/{0}'.format(s.dag_hash()))
elif packages:
packages = packages
elif env:
packages = env.concretized_user_specs
else:
tty.die("build cache file creation requires at least one" +
" installed package spec, an activate environment," +
" or else a path to a yaml file containing a spec" +
" to install")
pkgs = set(packages)
specs = set()
mirror = spack.mirror.MirrorCollection().lookup(output_location)
outdir = url_util.format(mirror.push_url)
msg = 'Buildcache files will be output to %s/build_cache' % outdir
tty.msg(msg)
matches = find_matching_specs(pkgs, env=env)
if matches:
tty.debug('Found at least one matching spec')
for match in matches:
tty.debug('examining match {0}'.format(match.format()))
if match.external or match.virtual:
tty.debug('skipping external or virtual spec %s' %
match.format())
else:
if add_spec:
tty.debug('adding matching spec %s' % match.format())
specs.add(match)
else:
tty.debug('skipping matching spec %s' % match.format())
if not add_deps:
continue
tty.debug('recursing dependencies')
for d, node in match.traverse(order='post',
depth=True,
deptype=('link', 'run')):
# skip root, since it's handled above
if d == 0:
continue
if node.external or node.virtual:
tty.debug('skipping external or virtual dependency %s' %
node.format())
else:
tty.debug('adding dependency %s' % node.format())
specs.add(node)
tty.debug('writing tarballs to %s/build_cache' % outdir)
for spec in specs:
tty.debug('creating binary cache file for package %s ' % spec.format())
try:
bindist.build_tarball(spec, outdir, force, make_relative,
unsigned, allow_root, signing_key,
rebuild_index)
except Exception as e:
tty.warn('%s' % e)
pass
| def _createtarball(env, spec_yaml=None, packages=None, add_spec=True,
add_deps=True, output_location=os.getcwd(),
signing_key=None, force=False, make_relative=False,
unsigned=False, allow_root=False, rebuild_index=False):
if spec_yaml:
packages = set()
with open(spec_yaml, 'r') as fd:
yaml_text = fd.read()
tty.debug('createtarball read spec yaml:')
tty.debug(yaml_text)
s = Spec.from_yaml(yaml_text)
packages.add('/{0}'.format(s.dag_hash()))
elif packages:
packages = packages
elif env:
packages = env.concretized_user_specs
else:
tty.die("build cache file creation requires at least one" +
" installed package spec, an activate environment," +
" or else a path to a yaml file containing a spec" +
" to install")
pkgs = set(packages)
specs = set()
mirror = spack.mirror.MirrorCollection().lookup(output_location)
outdir = url_util.format(mirror.push_url)
msg = 'Buildcache files will be output to %s/build_cache' % outdir
tty.msg(msg)
matches = find_matching_specs(pkgs, env=env)
if matches:
tty.debug('Found at least one matching spec')
for match in matches:
tty.debug('examining match {0}'.format(match.format()))
if match.external or match.virtual:
tty.debug('skipping external or virtual spec %s' %
match.format())
else:
if add_spec:
tty.debug('adding matching spec %s' % match.format())
specs.add(match)
else:
tty.debug('skipping matching spec %s' % match.format())
if not add_deps:
continue
tty.debug('recursing dependencies')
for d, node in match.traverse(order='post',
depth=True,
deptype=('link', 'run')):
# skip root, since it's handled above
if d == 0:
continue
if node.external or node.virtual:
tty.debug('skipping external or virtual dependency %s' %
node.format())
else:
tty.debug('adding dependency %s' % node.format())
specs.add(node)
tty.debug('writing tarballs to %s/build_cache' % outdir)
for spec in specs:
tty.debug('creating binary cache file for package %s ' % spec.format())
try:
bindist.build_tarball(spec, outdir, force, make_relative,
unsigned, allow_root, signing_key,
rebuild_index)
except Exception as e:
tty.warn(e)
|
53,079 | def getWxLang(lang):
import wx
locale = wx.Locale()
wxLang = locale.FindLanguageInfo(lang)
if not wxLang and '_' in lang:
wxLang = locale.FindLanguageInfo(lang.split('_')[0])
# #8064: Wx might know the language, but may not actually contain a translation database for that language.
# If we try to initialize this language, wx will show a warning dialog.
# #9089: some languages (such as Aragonese) do not have language info, causing language getter to fail.
# In this case, wxLang is already set to None.
# Therefore treat these situations like wx not knowing the language at all.
if wxLang and not locale.IsAvailable(wxLang.Language):
wxLang = None
return wxLang
| def getWxLang(lang):
import wx
locale = wx.Locale()
wxLang = locale.FindLanguageInfo(lang)
if not wxLang and '_' in lang:
wxLang = locale.FindLanguageInfo(lang.split('_')[0])
# #8064: Wx might know the language, but may not actually contain a translation database for that language.
# If we try to initialize this language, wx will show a warning dialog.
# #9089: some languages (such as Aragonese) do not have language info, causing language getter to fail.
lang: str = languageHandler.getLanguage()
# In this case, wxLang is already set to None.
# Therefore treat these situations like wx not knowing the language at all.
if wxLang and not locale.IsAvailable(wxLang.Language):
wxLang = None
return wxLang
|
15,617 | def get_salus_devices(hass: HomeAssistantType, data: dict) -> Dict[str, Any]:
"""Get a list of available Salus devices in user account."""
api = Api(
data[CONF_USERNAME],
data[CONF_PASSWORD],
)
return api.get_devices()
| def get_salus_devices(hass: HomeAssistant, data: dict) -> Dict[str, Any]:
"""Get a list of available Salus devices in user account."""
api = Api(
data[CONF_USERNAME],
data[CONF_PASSWORD],
)
return api.get_devices()
|
55,042 | def net_flow_constraint(graph: nx.DiGraph) -> qml.Hamiltonian:
r"""Calculates the `net flow constraint <https://doi.org/10.1080/0020739X.2010.526248>`__
Hamiltonian.
The net-zero flow constraint is, for all :math:`i`:
.. math:: \sum_{j, (i, j) \in E} x_{ij} = \sum_{j, (j, i) \in E} x_{ji},
where :math:`E` are the edges of the graph and :math:`x_{ij}` is a binary number that selects
whether to include the edge :math:`(i, j)`.
The corresponding qubit Hamiltonian is:
.. math::
\frac{1}{4}\sum_{i \in V} \left((d_{i}^{\rm out} - d_{i}^{\rm in})\mathbb{I} -
\sum_{j, (i, j) \in E} Z_{ij} + \sum_{j, (j, i) \in E} Z_{ji} \right)^{2},
where :math:`V` are the graph vertices, :math:`d_{i}^{\rm out}` and :math:`d_{i}^{\rm in}` are
the outdegree and indegree, respectively, and :math:`Z_{ij}` is a qubit Pauli-Z matrix acting
upon the qubit specified by the pair :math:`(i, j)`. Note that this function omits the
:math:`1/4` constant factor.
This Hamiltonian is minimized by selecting edges such that each node has a net zero flow.
Args:
graph (nx.DiGraph): the graph specifying possible edges
Returns:
qml.Hamiltonian: the net-flow constraint Hamiltonian
Raises:
ValueError: if the input graph is not directed
"""
if not hasattr(graph, "in_edges") or not hasattr(graph, "out_edges"):
raise ValueError("Input graph must be directed")
hamiltonian = qml.Hamiltonian([], [])
for node in graph.nodes:
hamiltonian += _inner_net_flow_constraint_hamiltonian(graph, node)
return hamiltonian
| def net_flow_constraint(graph: nx.DiGraph) -> qml.Hamiltonian:
r"""Calculates the `net flow constraint <https://doi.org/10.1080/0020739X.2010.526248>`__
Hamiltonian for the maximum-weighted cycle problem.
The net-zero flow constraint is, for all :math:`i`:
.. math:: \sum_{j, (i, j) \in E} x_{ij} = \sum_{j, (j, i) \in E} x_{ji},
where :math:`E` are the edges of the graph and :math:`x_{ij}` is a binary number that selects
whether to include the edge :math:`(i, j)`.
The corresponding qubit Hamiltonian is:
.. math::
\frac{1}{4}\sum_{i \in V} \left((d_{i}^{\rm out} - d_{i}^{\rm in})\mathbb{I} -
\sum_{j, (i, j) \in E} Z_{ij} + \sum_{j, (j, i) \in E} Z_{ji} \right)^{2},
where :math:`V` are the graph vertices, :math:`d_{i}^{\rm out}` and :math:`d_{i}^{\rm in}` are
the outdegree and indegree, respectively, and :math:`Z_{ij}` is a qubit Pauli-Z matrix acting
upon the qubit specified by the pair :math:`(i, j)`. Note that this function omits the
:math:`1/4` constant factor.
This Hamiltonian is minimized by selecting edges such that each node has a net zero flow.
Args:
graph (nx.DiGraph): the graph specifying possible edges
Returns:
qml.Hamiltonian: the net-flow constraint Hamiltonian
Raises:
ValueError: if the input graph is not directed
"""
if not hasattr(graph, "in_edges") or not hasattr(graph, "out_edges"):
raise ValueError("Input graph must be directed")
hamiltonian = qml.Hamiltonian([], [])
for node in graph.nodes:
hamiltonian += _inner_net_flow_constraint_hamiltonian(graph, node)
return hamiltonian
|
45,925 | def _draw_pixel(
image: torch.Tensor,
x: int,
y: int,
color: torch.Tensor,
):
r"""Draws a pixel into an image.
Args:
image: the input image to where to draw the lines with shape (C,H,W).
x: the x coordinate of the pixel.
y: the y coordinate of the pixel.
color: the color of the pixel with shape (3).
Return:
Nothing is returned
"""
image[:, y, x] = color
| def _draw_pixel(
image: torch.Tensor,
x: int,
y: int,
color: torch.Tensor,
):
r"""Draws a pixel into an image.
Args:
image: the input image to where to draw the lines with shape (C,H,W).
x: the x coordinate of the pixel.
y: the y coordinate of the pixel.
color: the color of the pixel with shape (3).
Return:
Nothing is returned.
"""
image[:, y, x] = color
|
38,982 | def PrivateAttr(
default: Any = Undefined,
*,
default_factory: Optional[NoArgAnyCallable] = None,
) -> Any:
"""
Indicates that attribute is only used internally and never mixed with regular fields.
Types or values of private attrs are not checked by pydantic and it's up to you to keep them relevant.
Private attrs are stored in model __slots__.
:param default: the attribute’s default value
:param default_factory: callable that will be called when a default value is needed for this attribute
If both `default` and `default_factory` are set, an error is raised.
"""
if default is not Undefined and default_factory is not None:
raise TypeError('cannot specify both default and default_factory')
return ModelPrivateAttr(
default,
default_factory=default_factory,
)
| def PrivateAttr(
default: Any = Undefined,
*,
default_factory: Optional[NoArgAnyCallable] = None,
) -> Any:
"""
Indicates that attribute is only used internally and never mixed with regular fields.
Types or values of private attrs are not checked by pydantic and it's up to you to keep them relevant.
Private attrs are stored in model __slots__.
:param default: the attribute’s default value
:param default_factory: callable that will be called when a default value is needed for this attribute
If both `default` and `default_factory` are set, an error is raised.
"""
if default is not Undefined and default_factory is not None:
raise ValueError('cannot specify both default and default_factory')
return ModelPrivateAttr(
default,
default_factory=default_factory,
)
|
38,242 | def concat_enc_outs(
input: torch.LongTensor,
enc_out: torch.Tensor,
mask: torch.BoolTensor,
embedding_size: int,
padding_idx: int,
right_padded: bool = True,
) -> Tuple[torch.Tensor, torch.BoolTensor]:
"""
Concatenate Encoder Outputs.
Does the whole "FiD" thing; each query/document pair is independently encoded in the
Encoder, so we need to concatenate all the outputs prior to sending to the decoder.
:param input:
[bsz, seqlen] original input to the encoder
:param enc_out:
[bsz * n_docs, seqlen] output representations from the encoder
:param mask:
encoder mask
:param embedding_size:
emb/hidden size of the enc representations
:param padding_idx:
pad token index; used for mask purposes.
:param right_padded:
whether the input is right padded
:return (new_out, new_mask):
return the encoder output and encoder mask, appropriately concatenated.
"""
bsz, n_docs = input.size(0), enc_out.size(0) // input.size(0)
split_enc_out = enc_out.split([n_docs] * bsz, dim=0)
split_mask = mask.split([n_docs] * bsz, dim=0)
concat_outs: List[torch.Tensor] = []
concat_lengths = []
for i in range(bsz):
mask_i = split_mask[i].view(-1)
out_i = split_enc_out[i].reshape(-1, embedding_size)[mask_i]
concat_outs.append(out_i)
concat_lengths.append(out_i.size(0))
new_out = enc_out.new(bsz, max(concat_lengths), embedding_size)
new_mask: torch.BoolTensor = mask.new(bsz, max(concat_lengths)) # type: ignore
new_out.fill_(padding_idx)
new_mask.fill_(False)
for i, (out_i, length_i) in enumerate(zip(concat_outs, concat_lengths)):
if right_padded:
new_out[i, :length_i] = out_i
new_mask[i, :length_i] = True
else:
new_out[i, new_out.size(1) - length_i :] = out_i
new_mask[i, new_out.size(1) - length_i :] = True
return new_out, new_mask
| def concat_enc_outs(
input: torch.LongTensor,
enc_out: torch.Tensor,
mask: torch.BoolTensor,
embedding_size: int,
padding_idx: int,
right_padded: bool = True,
) -> Tuple[torch.Tensor, torch.BoolTensor]:
"""
Concatenate Encoder Outputs.
Does the whole "FiD" thing; each query/document pair is independently encoded in the
Encoder, so we need to concatenate all the outputs prior to sending to the decoder.
:param input:
[bsz, seqlen] original input to the encoder
:param enc_out:
[bsz * n_docs, seqlen] output representations from the encoder
:param mask:
encoder mask
:param embedding_size:
emb/hidden size of the enc representations
:param padding_idx:
pad token index; used for mask purposes.
:param right_padded:
whether the input is right padded (true) or left padded (false)
:return (new_out, new_mask):
return the encoder output and encoder mask, appropriately concatenated.
"""
bsz, n_docs = input.size(0), enc_out.size(0) // input.size(0)
split_enc_out = enc_out.split([n_docs] * bsz, dim=0)
split_mask = mask.split([n_docs] * bsz, dim=0)
concat_outs: List[torch.Tensor] = []
concat_lengths = []
for i in range(bsz):
mask_i = split_mask[i].view(-1)
out_i = split_enc_out[i].reshape(-1, embedding_size)[mask_i]
concat_outs.append(out_i)
concat_lengths.append(out_i.size(0))
new_out = enc_out.new(bsz, max(concat_lengths), embedding_size)
new_mask: torch.BoolTensor = mask.new(bsz, max(concat_lengths)) # type: ignore
new_out.fill_(padding_idx)
new_mask.fill_(False)
for i, (out_i, length_i) in enumerate(zip(concat_outs, concat_lengths)):
if right_padded:
new_out[i, :length_i] = out_i
new_mask[i, :length_i] = True
else:
new_out[i, new_out.size(1) - length_i :] = out_i
new_mask[i, new_out.size(1) - length_i :] = True
return new_out, new_mask
|
23,849 | def _forced_schema_2(conanfile):
version = conanfile.conf.get("tools.cmake.cmaketoolchain.presets:max_schema_version",
check_type=int, default=4)
if version < 2:
raise ConanException("The minimun value for 'tools.cmake.cmaketoolchain.presets:"
"schema_version' is 2")
if version < 4:
return True
return False
| def _forced_schema_2(conanfile):
version = conanfile.conf.get("tools.cmake.cmaketoolchain.presets:max_schema_version",
check_type=int, default=4)
if version < 2:
raise ConanException("The minimum value for 'tools.cmake.cmaketoolchain.presets:"
"schema_version' is 2")
if version < 4:
return True
return False
|
37,314 | def _parse_common_args(backend, qobj_id, qobj_header, shots,
memory, max_credits, seed_simulator, init_qubits,
**run_config):
"""Resolve the various types of args allowed to the assemble() function through
duck typing, overriding args, etc. Refer to the assemble() docstring for details on
what types of inputs are allowed.
Here the args are resolved by converting them to standard instances, and prioritizing
them in case a run option is passed through multiple args (explicitly setting an arg
has more priority than the arg set by backend)
Returns:
RunConfig: a run config, which is a standardized object that configures the qobj
and determines the runtime environment.
Raises:
QiskitError: if the memory arg is True and the backend does not support
memory. Also if shots exceeds max_shots for the configured backend.
"""
# grab relevant info from backend if it exists
backend_config = None
if backend:
backend_config = backend.configuration()
# check for memory flag applied to backend that does not support memory
if memory and not backend_config.memory:
raise QiskitError("memory not supported by backend {}"
.format(backend_config.backend_name))
# an identifier for the Qobj
qobj_id = qobj_id or str(uuid.uuid4())
# The header that goes at the top of the Qobj (and later Result)
# we process it as dict, then write entries that are not None to a QobjHeader object
qobj_header = qobj_header or {}
if isinstance(qobj_header, QobjHeader):
qobj_header = qobj_header.to_dict()
backend_name = getattr(backend_config, 'backend_name', None)
backend_version = getattr(backend_config, 'backend_version', None)
qobj_header = {**dict(backend_name=backend_name, backend_version=backend_version),
**qobj_header}
qobj_header = QobjHeader(**{k: v for k, v in qobj_header.items() if v is not None})
max_shots = getattr(backend_config, 'max_shots', None)
if shots is None:
if max_shots:
shots = min(1024, max_shots)
else:
shots = 1024
elif isinstance(shots, int) and max_shots and max_shots < shots:
raise QiskitError(
'Number of shots specified: %s exceeds max_shots property of the '
'backend: %s.' % (shots, max_shots))
elif not isinstance(shots, int):
raise TypeError('The attribute \'shots\' must be of type int')
# create run configuration and populate
run_config_dict = dict(shots=shots,
memory=memory,
max_credits=max_credits,
seed_simulator=seed_simulator,
init_qubits=init_qubits,
**run_config)
return qobj_id, qobj_header, run_config_dict
| def _parse_common_args(backend, qobj_id, qobj_header, shots,
memory, max_credits, seed_simulator, init_qubits,
**run_config):
"""Resolve the various types of args allowed to the assemble() function through
duck typing, overriding args, etc. Refer to the assemble() docstring for details on
what types of inputs are allowed.
Here the args are resolved by converting them to standard instances, and prioritizing
them in case a run option is passed through multiple args (explicitly setting an arg
has more priority than the arg set by backend)
Returns:
RunConfig: a run config, which is a standardized object that configures the qobj
and determines the runtime environment.
Raises:
QiskitError: if the memory arg is True and the backend does not support
memory. Also if shots exceeds max_shots for the configured backend.
"""
# grab relevant info from backend if it exists
backend_config = None
if backend:
backend_config = backend.configuration()
# check for memory flag applied to backend that does not support memory
if memory and not backend_config.memory:
raise QiskitError("memory not supported by backend {}"
.format(backend_config.backend_name))
# an identifier for the Qobj
qobj_id = qobj_id or str(uuid.uuid4())
# The header that goes at the top of the Qobj (and later Result)
# we process it as dict, then write entries that are not None to a QobjHeader object
qobj_header = qobj_header or {}
if isinstance(qobj_header, QobjHeader):
qobj_header = qobj_header.to_dict()
backend_name = getattr(backend_config, 'backend_name', None)
backend_version = getattr(backend_config, 'backend_version', None)
qobj_header = {**dict(backend_name=backend_name, backend_version=backend_version),
**qobj_header}
qobj_header = QobjHeader(**{k: v for k, v in qobj_header.items() if v is not None})
max_shots = getattr(backend_config, 'max_shots', None)
if shots is None:
if max_shots:
shots = min(1024, max_shots)
else:
shots = 1024
elif isinstance(shots, int) and max_shots and max_shots < shots:
raise QiskitError(
'Number of shots specified: %s exceeds max_shots property of the '
'backend: %s.' % (shots, max_shots))
elif not isinstance(shots, int):
raise TypeError('The attribute "shots" must be of type int')
# create run configuration and populate
run_config_dict = dict(shots=shots,
memory=memory,
max_credits=max_credits,
seed_simulator=seed_simulator,
init_qubits=init_qubits,
**run_config)
return qobj_id, qobj_header, run_config_dict
|
23,244 | def verify_needs_extensions(app: "Sphinx", config: Config) -> None:
"""Check that the needed extensions have an acceptable version if they are loaded.
Warns if a needed extension is not loaded.
:raises VersionRequirementError: if the version of a needed extension is
unknown or older than the required version.
"""
if config.needs_extensions is None:
return
for extname, reqversion in config.needs_extensions.items():
extension = app.extensions.get(extname)
if extension is None:
logger.warning(__('The %s extension is required by needs_extensions settings, '
'but it is not loaded.'), extname)
continue
fulfilled = True
if extension.version == 'unknown version':
fulfilled = False
else:
try:
if Version(reqversion) > Version(extension.version):
fulfilled = False
except InvalidVersion:
if reqversion > extension.version:
fulfilled = False
if not fulfilled:
raise VersionRequirementError(__('This project needs the extension %s at least in '
'version %s and therefore cannot be built with '
'the loaded version (%s).') %
(extname, reqversion, extension.version))
| def verify_needs_extensions(app: "Sphinx", config: Config) -> None:
"""Check that the needed extensions have an acceptable version if they are loaded.
Warns if a needed extension is not loaded.
:raises VersionRequirementError: if the version of an extension in :confval:`needs_extension` is
unknown or older than the required version.
"""
if config.needs_extensions is None:
return
for extname, reqversion in config.needs_extensions.items():
extension = app.extensions.get(extname)
if extension is None:
logger.warning(__('The %s extension is required by needs_extensions settings, '
'but it is not loaded.'), extname)
continue
fulfilled = True
if extension.version == 'unknown version':
fulfilled = False
else:
try:
if Version(reqversion) > Version(extension.version):
fulfilled = False
except InvalidVersion:
if reqversion > extension.version:
fulfilled = False
if not fulfilled:
raise VersionRequirementError(__('This project needs the extension %s at least in '
'version %s and therefore cannot be built with '
'the loaded version (%s).') %
(extname, reqversion, extension.version))
|
42,909 | def graph_embed(A, mean_photon=1.0, make_traceless=False, atol=1e-08):
r"""Embed a graph into a Gaussian state.
Given a graph in terms of a symmetric adjacency matrix
(in general with arbitrary complex off-diagonal and real diagonal entries),
returns the squeezing parameters and interferometer necessary for
creating the Gaussian state whose off-diagonal parts are proportional to that matrix.
Uses :func:`takagi`.
Args:
A (array[complex]): square, symmetric (weighted) adjacency matrix of the graph
max_mean_photon (float): It guarantees that the mean photon number in the pure Gaussian state
representing the graph satisfies :math:`\sum_i sinh(r_{i})^2 ==` ``mean_photon``.
make_traceless (bool): Removes the trace of the input matrix, by performing the transformation
:math:`\tilde{A} = A-\mathrm{tr}(A) \I/n`. This may reduce the amount of squeezing needed to encode
the graph.
rtol (float): relative tolerance used when checking if the input matrix is symmetric.
atol (float): absolute tolerance used when checking if the input matrix is symmetric.
Returns:
tuple[array, array]: squeezing parameters of the input
state to the interferometer, and the unitary matrix representing the interferometer
"""
(m, n) = A.shape
if m != n:
raise ValueError("The matrix is not square.")
# if not np.allclose(A, np.transpose(A), rtol=rtol, atol=atol)
# raise ValueError("The matrix is not symmetric.")
if make_traceless:
A = A - np.trace(A) * np.identity(n) / n
scale = find_scaling_adjacency_matrix(A, mean_photon)
A = scale * A
s, U = takagi(A, tol=atol)
vals = -np.arctanh(s)
return vals, U
| def graph_embed(A, mean_photon=1.0, make_traceless=False, atol=1e-08):
r"""Embed a graph into a Gaussian state.
Given a graph in terms of a symmetric adjacency matrix
(in general with arbitrary complex off-diagonal and real diagonal entries),
returns the squeezing parameters and interferometer necessary for
creating the Gaussian state whose off-diagonal parts are proportional to that matrix.
Uses :func:`takagi`.
Args:
A (array[complex]): square, symmetric (weighted) adjacency matrix of the graph
max_mean_photon (float): It guarantees that the mean photon number in the pure Gaussian state
representing the graph satisfies :math:`\sum_i sinh(r_{i})^2 ==` ``mean_photon``.
make_traceless (bool): Removes the trace of the input matrix, by performing the transformation
:math:`\tilde{A} = A-\mathrm{tr}(A) \I/n`. This may reduce the amount of squeezing needed to encode
the graph.
rtol (float): relative tolerance used when checking if the input matrix is symmetric
atol (float): absolute tolerance used when checking if the input matrix is symmetric.
Returns:
tuple[array, array]: squeezing parameters of the input
state to the interferometer, and the unitary matrix representing the interferometer
"""
(m, n) = A.shape
if m != n:
raise ValueError("The matrix is not square.")
# if not np.allclose(A, np.transpose(A), rtol=rtol, atol=atol)
# raise ValueError("The matrix is not symmetric.")
if make_traceless:
A = A - np.trace(A) * np.identity(n) / n
scale = find_scaling_adjacency_matrix(A, mean_photon)
A = scale * A
s, U = takagi(A, tol=atol)
vals = -np.arctanh(s)
return vals, U
|
30,724 | def fetch_blobs():
"""Download one or more blobs from provided event_id
"""
event_id = demisto.getArg('event_id')
if demisto.getArg('timestamp'):
timestamp = dateutil.parser.parse(demisto.getArg('timestamp'))
now = dateutil.parser.parse(datetime.utcnow().isoformat())
diff = now.replace(tzinfo=timezone.utc) - timestamp.replace(tzinfo=timezone.utc)
# We need to wait three minutes from the time of the event since pcap
# are sent little later to make sure we record most of the triggered traffic
wait_delta = timedelta(minutes=3)
if diff < wait_delta:
ec = {'CTS.EventID': event_id}
return_results([
{
'Type': entryTypes['note'],
'EntryContext': ec,
'HumanReadable': '### CTS blob delayd\n'
+ 'The download has been delayed for '
+ str(wait_delta.seconds - diff.seconds)
+ ' seconds.',
'Contents': ec,
'ContentsFormat': formats['json']
}])
# Now wait to make sure the blob has been stored in the backend
time.sleep(int(wait_delta.seconds - diff.seconds))
result_blobs = http_request('GET', '/artifacts/blobs/%s' % event_id)
if 'blobs' in result_blobs and len(result_blobs['blobs']) > 0:
for blob in result_blobs['blobs']:
blob_id = blob['blob_id']
d = download(blob['url'])
return_results(fileResult(blob_id + '.pcap', base64.decodebytes(d.content)))
ec = {'CTS.HasBlob': True}
return_results([
{
'Type': entryTypes['note'],
'EntryContext': ec,
'Contents': ec,
'ContentsFormat': formats['json']
}])
else:
ec = {'CTS.HasBlob': False}
return_results([
{
'Type': entryTypes['note'],
'EntryContext': ec,
'Contents': ec,
'ContentsFormat': formats['json']
}])
| def fetch_blobs():
"""Download one or more blobs from provided event_id
"""
event_id = demisto.args().get('event_id')
if demisto.getArg('timestamp'):
timestamp = dateutil.parser.parse(demisto.getArg('timestamp'))
now = dateutil.parser.parse(datetime.utcnow().isoformat())
diff = now.replace(tzinfo=timezone.utc) - timestamp.replace(tzinfo=timezone.utc)
# We need to wait three minutes from the time of the event since pcap
# are sent little later to make sure we record most of the triggered traffic
wait_delta = timedelta(minutes=3)
if diff < wait_delta:
ec = {'CTS.EventID': event_id}
return_results([
{
'Type': entryTypes['note'],
'EntryContext': ec,
'HumanReadable': '### CTS blob delayd\n'
+ 'The download has been delayed for '
+ str(wait_delta.seconds - diff.seconds)
+ ' seconds.',
'Contents': ec,
'ContentsFormat': formats['json']
}])
# Now wait to make sure the blob has been stored in the backend
time.sleep(int(wait_delta.seconds - diff.seconds))
result_blobs = http_request('GET', '/artifacts/blobs/%s' % event_id)
if 'blobs' in result_blobs and len(result_blobs['blobs']) > 0:
for blob in result_blobs['blobs']:
blob_id = blob['blob_id']
d = download(blob['url'])
return_results(fileResult(blob_id + '.pcap', base64.decodebytes(d.content)))
ec = {'CTS.HasBlob': True}
return_results([
{
'Type': entryTypes['note'],
'EntryContext': ec,
'Contents': ec,
'ContentsFormat': formats['json']
}])
else:
ec = {'CTS.HasBlob': False}
return_results([
{
'Type': entryTypes['note'],
'EntryContext': ec,
'Contents': ec,
'ContentsFormat': formats['json']
}])
|
20,388 | def wikimedia_replacement():
if util.config.wikimedia_tile_source_replacement == 'OSM-with-warning':
param.main.param.warning('Wikimedia tile source no longer available outside '
'wikimedia domain, switching to OpenStreetMap (OSM) tile '
'source. You can set wikimedia_tile_source_replacement '
'to your chosen replacement tile source URL in hv.config'
' to disable this warning. See release notes for HoloViews'
' 1.14.4 for more details')
return Tiles('https://c.tile.openstreetmap.org/{Z}/{X}/{Y}.png', name="OSM")
else:
return Tiles(util.config.wikimedia_tile_source_replacement, name="Wikipedia")
| def wikimedia_replacement():
if util.config.wikimedia_tile_source_replacement == 'OSM-with-warning':
param.main.param.warning('Wikimedia tile source no longer available outside '
'wikimedia domain as of April 2021; switching to OpenStreetMap (OSM) tile '
'source. You can set wikimedia_tile_source_replacement '
'to your chosen replacement tile source URL in hv.config'
' to disable this warning. See release notes for HoloViews'
' 1.14.4 for more details')
return Tiles('https://c.tile.openstreetmap.org/{Z}/{X}/{Y}.png', name="OSM")
else:
return Tiles(util.config.wikimedia_tile_source_replacement, name="Wikipedia")
|
10,752 | def literally(obj):
"""Forces numba to take *obj* as an Literal value.
*obj* must be either a literal or an argument of the caller function, where
the argument must be bounded to a literal. The literal requirement
propagates up the call stack.
This function is intercepted by the compiler to alter its behavior to
wrap corresponding function parameters as ``Literal``. It has **no effect**
outside of nopython-mode (interpreter, and objectmode).
The current implementation detects literal arguments in two ways:
1. Scan uses of ``literally`` via a rewrite pass.
2. ``literally`` is overloaded to raise ``numba.errors.ForceLiteralArg``
to signal the dispatcher to treat the corresponding parameter
differently. This mode is to support indirect use (via a function call).
The execution semantic of this function is equivalent to an identity
function.
See :ghfile:`numba/tests/test_literal_dispatch.py` for examples.
"""
return obj
| def literally(obj):
"""Forces numba to take *obj* as an Literal value.
*obj* must be either a literal or an argument of the caller function, where
the argument must be bound to a literal. The literal requirement
propagates up the call stack.
This function is intercepted by the compiler to alter its behavior to
wrap corresponding function parameters as ``Literal``. It has **no effect**
outside of nopython-mode (interpreter, and objectmode).
The current implementation detects literal arguments in two ways:
1. Scan uses of ``literally`` via a rewrite pass.
2. ``literally`` is overloaded to raise ``numba.errors.ForceLiteralArg``
to signal the dispatcher to treat the corresponding parameter
differently. This mode is to support indirect use (via a function call).
The execution semantic of this function is equivalent to an identity
function.
See :ghfile:`numba/tests/test_literal_dispatch.py` for examples.
"""
return obj
|
56,638 | def normalize_ddc(ddc):
"""
:param str ddc:
:rtype: list of str
"""
ddc = collapse_multiple_space(ddc.strip()).replace('/', '').replace("'", '')
results = []
for match in DDC_RE.finditer(ddc):
parts = match.groupdict()
prefix = ''
suffix = ''
# DDCs should start at word boundaries
start = match.start()
if start > 0 and re.search(r'\b', ddc[start - 1]):
continue
# And end at them
end = match.end()
if end < (len(ddc) - 1) and re.search(r'\b', ddc[end]):
continue
# Some old standard which isn't used anymore; might need to filter these
# out, but they should sort OK so let's keep them.
if parts['neg']:
prefix += '-'
# Juvenile prefix
if parts['j']:
prefix += 'j'
# Star should be at end
if parts['prestar'] or parts['poststar']:
suffix = '*'
# Series suffix
if parts['s']:
suffix += ' s'
# Biographical
if parts['B']:
suffix += ' B'
# Not at all sure
if parts['ninetwo']:
suffix += parts['ninetwo']
# And now the actual number!
if parts['number']:
# Numbers in parenthesis are "series" numbers
end = match.end('number')
if end < len(ddc) and ddc[end] == ')':
suffix += ' s'
# pad the integer part of the number
number_parts = parts['number'].split('.')
integer = number_parts[0]
# Copy decimal without losing precision
decimal = '.' + number_parts[1] if len(number_parts) > 1 else ''
number = '%03d%s' % (int(integer), decimal)
# Handle [Fic] or [E]
elif parts['fic']:
number = '[%s]' % parts['fic'].title()
else:
continue
results.append(prefix + number + suffix)
return results
| def normalize_ddc(ddc):
"""
:param str ddc:
:rtype: list of str
"""
ddc = collapse_multiple_space(ddc.strip()).replace('/', '').replace("'", '')
results = []
for match in DDC_RE.finditer(ddc):
parts = match.groupdict()
prefix = ''
suffix = ''
# DDCs should start at word boundaries
start = match.start()
if start > 0 and re.search(r'\b', ddc[start - 1]):
continue
# And end at them
end = match.end()
if end < (len(ddc) - 1) and re.search(r'\b', ddc[end]):
continue
# Some old standard which isn't used anymore; might need to filter these
# out, but they should sort OK so let's keep them.
if parts['neg']:
prefix += '-'
# Juvenile prefix
if parts['j']:
prefix += 'j'
# Star should be at end
if parts['prestar'] or parts['poststar']:
suffix = '*'
# Series suffix
for key, value in {'s': ' s', 'B': ' B', 'ninetwo': parts['ninetwo']}.items():
if parts[key]:
suffix += value
# And now the actual number!
if parts['number']:
# Numbers in parenthesis are "series" numbers
end = match.end('number')
if end < len(ddc) and ddc[end] == ')':
suffix += ' s'
# pad the integer part of the number
number_parts = parts['number'].split('.')
integer = number_parts[0]
# Copy decimal without losing precision
decimal = '.' + number_parts[1] if len(number_parts) > 1 else ''
number = '%03d%s' % (int(integer), decimal)
# Handle [Fic] or [E]
elif parts['fic']:
number = '[%s]' % parts['fic'].title()
else:
continue
results.append(prefix + number + suffix)
return results
|
39,921 | def collect(alice: Alice,
handpicked_ursulas: Optional[Set[Ursula]] = None,
iterations: Optional[int] = None,
run_forever: bool = False
) -> None:
"""Collects grant success and failure rates."""
policies, i, success, fail = dict(), 0, 0, 0
while True:
print(f'Attempt {i+1} of {iterations}')
start = maya.now()
try:
policy = metric_grant(alice=alice, handpicked_ursulas=handpicked_ursulas)
except Exception as e:
fail += 1
print(f'GRANT FAIL\n{e}')
else:
success += 1
policies[policy.public_key.hex()] = policy # track
print(f"PEK:{policy.public_key.hex()}")
# timeit
end = maya.now()
delta = end - start
print(f"Completed in {(delta).total_seconds()} seconds.")
if i == iterations and not run_forever:
break # Exit
# score
elif i != iterations:
if fail > 0:
print(f'{fail}/{i+1} ({(fail/(i+1))*100}%) failure rate')
if success > 0:
print(f'{success}/{i+1} ({(success/(i+1))*100}%) success rate')
print(f'Waiting {SAMPLE_RATE} seconds until next sample. ')
time.sleep(SAMPLE_RATE)
i += 1
| def collect(alice: Alice,
handpicked_ursulas: Optional[Set[Ursula]] = None,
iterations: Optional[int] = None,
run_forever: bool = False
) -> None:
"""Collects grant success and failure rates."""
policies, i, success, fail = dict(), 0, 0, 0
while True:
print(f"Attempt {i+1} of {iterations if iterations is not None else 'infinite'}")
start = maya.now()
try:
policy = metric_grant(alice=alice, handpicked_ursulas=handpicked_ursulas)
except Exception as e:
fail += 1
print(f'GRANT FAIL\n{e}')
else:
success += 1
policies[policy.public_key.hex()] = policy # track
print(f"PEK:{policy.public_key.hex()}")
# timeit
end = maya.now()
delta = end - start
print(f"Completed in {(delta).total_seconds()} seconds.")
if i == iterations and not run_forever:
break # Exit
# score
elif i != iterations:
if fail > 0:
print(f'{fail}/{i+1} ({(fail/(i+1))*100}%) failure rate')
if success > 0:
print(f'{success}/{i+1} ({(success/(i+1))*100}%) success rate')
print(f'Waiting {SAMPLE_RATE} seconds until next sample. ')
time.sleep(SAMPLE_RATE)
i += 1
|
43,972 | def generate_hamiltonian(mol, cutoff=1.0e-12):
r"""Return a function that computes the qubit hamiltonian.
Args:
mol (Molecule): the molecule object
cutoff (float): cutoff value for discarding the negligible electronic integrals
Returns:
function: function that computes the the qubit hamiltonian
**Example**
>>> symbols = ['H', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], requires_grad = False)
>>> alpha = np.array([[3.42525091, 0.62391373, 0.1688554],
>>> [3.42525091, 0.62391373, 0.1688554]], requires_grad=True)
>>> mol = Molecule(symbols, geometry, alpha=alpha)
>>> args = [alpha]
>>> h = generate_hamiltonian(mol)(*args)
>>> h.terms[0]
tensor([ 0.29817879+0.j, 0.20813365+0.j, 0.20813365+0.j,
0.17860977+0.j, 0.04256036+0.j, -0.04256036+0.j,
-0.04256036+0.j, 0.04256036+0.j, -0.34724873+0.j,
0.13290293+0.j, -0.34724873+0.j, 0.17546329+0.j,
0.17546329+0.j, 0.13290293+0.j, 0.18470917+0.j], requires_grad=True)
"""
def hamiltonian(*args):
r"""Compute the qubit hamiltonian.
Args:
args (array[array[float]]): initial values of the differentiable parameters
Returns:
Hamiltonian: the qubit Hamiltonian
"""
h_ferm = generate_fermionic_hamiltonian(mol, cutoff)(*args)
for n, t in enumerate(h_ferm[1]):
if len(t) == 0:
h = qml.Hamiltonian([h_ferm[0][n]], [qml.Identity(0)])
elif len(t) == 2:
op = _generate_qubit_operator(t)
if op != 0:
for i, o in enumerate(op[1]):
if len(o) == 0:
op[1][i] = qml.Identity(0)
if len(o) == 1:
op[1][i] = _return_pauli(o[0][1])(o[0][0])
if len(o) > 1:
k = qml.Identity(0)
for j, o_ in enumerate(o):
k = k @ _return_pauli(o_[1])(o_[0])
op[1][i] = k
h = h + qml.Hamiltonian(np.array(op[0]) * h_ferm[0][n], op[1])
elif len(t) == 4:
op = _generate_qubit_operator(t)
if op != 0:
for i, o in enumerate(op[1]):
if len(o) == 0:
op[1][i] = qml.Identity(0)
if len(o) == 1:
op[1][i] = _return_pauli(o[0][1])(o[0][0])
if len(o) > 1:
k = qml.Identity(0)
for j, o_ in enumerate(o):
k = k @ _return_pauli(o_[1])(o_[0])
op[1][i] = k
h = h + qml.Hamiltonian(np.array(op[0]) * h_ferm[0][n], op[1])
return h
return hamiltonian
| def generate_hamiltonian(mol, cutoff=1.0e-12):
r"""Return a function that computes the qubit hamiltonian.
Args:
mol (Molecule): the molecule object
cutoff (float): cutoff value for discarding the negligible electronic integrals
Returns:
function: function that computes the qubit hamiltonian
**Example**
>>> symbols = ['H', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], requires_grad = False)
>>> alpha = np.array([[3.42525091, 0.62391373, 0.1688554],
>>> [3.42525091, 0.62391373, 0.1688554]], requires_grad=True)
>>> mol = Molecule(symbols, geometry, alpha=alpha)
>>> args = [alpha]
>>> h = generate_hamiltonian(mol)(*args)
>>> h.terms[0]
tensor([ 0.29817879+0.j, 0.20813365+0.j, 0.20813365+0.j,
0.17860977+0.j, 0.04256036+0.j, -0.04256036+0.j,
-0.04256036+0.j, 0.04256036+0.j, -0.34724873+0.j,
0.13290293+0.j, -0.34724873+0.j, 0.17546329+0.j,
0.17546329+0.j, 0.13290293+0.j, 0.18470917+0.j], requires_grad=True)
"""
def hamiltonian(*args):
r"""Compute the qubit hamiltonian.
Args:
args (array[array[float]]): initial values of the differentiable parameters
Returns:
Hamiltonian: the qubit Hamiltonian
"""
h_ferm = generate_fermionic_hamiltonian(mol, cutoff)(*args)
for n, t in enumerate(h_ferm[1]):
if len(t) == 0:
h = qml.Hamiltonian([h_ferm[0][n]], [qml.Identity(0)])
elif len(t) == 2:
op = _generate_qubit_operator(t)
if op != 0:
for i, o in enumerate(op[1]):
if len(o) == 0:
op[1][i] = qml.Identity(0)
if len(o) == 1:
op[1][i] = _return_pauli(o[0][1])(o[0][0])
if len(o) > 1:
k = qml.Identity(0)
for j, o_ in enumerate(o):
k = k @ _return_pauli(o_[1])(o_[0])
op[1][i] = k
h = h + qml.Hamiltonian(np.array(op[0]) * h_ferm[0][n], op[1])
elif len(t) == 4:
op = _generate_qubit_operator(t)
if op != 0:
for i, o in enumerate(op[1]):
if len(o) == 0:
op[1][i] = qml.Identity(0)
if len(o) == 1:
op[1][i] = _return_pauli(o[0][1])(o[0][0])
if len(o) > 1:
k = qml.Identity(0)
for j, o_ in enumerate(o):
k = k @ _return_pauli(o_[1])(o_[0])
op[1][i] = k
h = h + qml.Hamiltonian(np.array(op[0]) * h_ferm[0][n], op[1])
return h
return hamiltonian
|
47,687 | def _download_and_run(
venv_dir: Path,
package_or_url: str,
app: str,
app_args: List[str],
python: str,
pip_args: List[str],
venv_args: List[str],
use_cache: bool,
verbose: bool,
) -> None:
venv = Venv(venv_dir, python=python, verbose=verbose)
venv.create_venv(venv_args, pip_args)
if venv.pipx_metadata.main_package.package is not None:
package = venv.pipx_metadata.main_package.package
else:
package = package_name_from_spec(
package_or_url, python, pip_args=pip_args, verbose=verbose
)
venv.install_package(
package=package,
package_or_url=package_or_url,
pip_args=pip_args,
include_dependencies=False,
include_apps=True,
is_main_package=True,
)
command = _resolve_command(venv, package_or_url, app)
if not use_cache:
# Let future _remove_all_expired_venvs know to remove this
(venv_dir / VENV_EXPIRED_FILENAME).touch()
# These never return
if isinstance(command, EntryPoint):
venv.run_entry_point(command, app_args)
else:
venv.run_app(app, app_args)
| def _download_and_run(
venv_dir: Path,
package_or_url: str,
app: str,
app_args: List[str],
python: str,
pip_args: List[str],
venv_args: List[str],
use_cache: bool,
verbose: bool,
) -> None:
venv = Venv(venv_dir, python=python, verbose=verbose)
venv.create_venv(venv_args, pip_args)
if venv.pipx_metadata.main_package.package is not None:
package = venv.pipx_metadata.main_package.package
else:
package = package_name_from_spec(
package_or_url, python, pip_args=pip_args, verbose=verbose
)
venv.install_package(
package=package,
package_or_url=package_or_url,
pip_args=pip_args,
include_dependencies=False,
include_apps=True,
is_main_package=True,
)
command = _resolve_command(venv, package_or_url, app)
if not use_cache:
# Let future _remove_all_expired_venvs know to remove this
(venv_dir / VENV_EXPIRED_FILENAME).touch()
# These never return
if isinstance(command, EntryPoint):
venv.run_entry_point(command, app_args)
else:
venv.run_app(command, app_args)
|
13,266 | def test_module_with_mock_works():
ghostwriter.magic(unittest.mock)
| def test_module_with_mock_doesnt_break_ghostwriter():
ghostwriter.magic(unittest.mock)
|
35,757 | def ciou_loss(
boxes1: torch.Tensor,
boxes2: torch.Tensor,
reduction: str = "none",
eps: float = 1e-7,
) -> torch.Tensor:
"""
Original Implementation from
https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/losses.py
Args:
boxes1 : (Tensor[N, 4] or Tensor[4]) first set of boxes
boxes2 : (Tensor[N, 4] or Tensor[4]) second set of boxes
reduction : (string, optional) Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: No reduction will be
applied to the output. ``'mean'``: The output will be averaged.
``'sum'``: The output will be summed. Default: ``'none'``
eps : (float, optional): small number to prevent division by zero. Default: 1e-7
Reference:
Complete Intersection over Union Loss (Zhaohui Zheng et. al)
https://arxiv.org/abs/1911.08287
"""
x1, y1, x2, y2 = boxes1.unbind(dim=-1)
x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1)
if (x2 < x1).all():
raise ValueError("x1 is larger than x2")
if (y2 < y1).all():
raise ValueError("y1 is larger than y2")
# Intersection keypoints
xkis1 = torch.max(x1, x1g)
ykis1 = torch.max(y1, y1g)
xkis2 = torch.min(x2, x2g)
ykis2 = torch.min(y2, y2g)
intsct = torch.zeros_like(x1)
mask = (ykis2 > ykis1) & (xkis2 > xkis1)
intsct[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask])
union = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsct + eps
iou = intsct / union
# smallest enclosing box
xc1 = torch.min(x1, x1g)
yc1 = torch.min(y1, y1g)
xc2 = torch.max(x2, x2g)
yc2 = torch.max(y2, y2g)
diag_len = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps
# centers of boxes
x_p = (x2 + x1) / 2
y_p = (y2 + y1) / 2
x_g = (x1g + x2g) / 2
y_g = (y1g + y2g) / 2
distance = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2)
# width and height of boxes
w_pred = x2 - x1
h_pred = y2 - y1
w_gt = x2g - x1g
h_gt = y2g - y1g
v = (4 / (math.pi ** 2)) * torch.pow((torch.atan(w_gt / h_gt) - torch.atan(w_pred / h_pred)), 2)
with torch.no_grad():
alpha = v / (1 - iou + v + eps)
# Eqn. (10)
loss = 1 - iou + (distance / diag_len) + alpha * v
if reduction == "mean":
loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum()
elif reduction == "sum":
loss = loss.sum()
return loss
| def ciou_loss(
boxes1: torch.Tensor,
boxes2: torch.Tensor,
reduction: str = "none",
eps: float = 1e-7,
) -> torch.Tensor:
"""
Original Implementation from
https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/losses.py
Args:
boxes1 : (Tensor[N, 4] or Tensor[4]) first set of boxes
boxes2 : (Tensor[N, 4] or Tensor[4]) second set of boxes
reduction : (string, optional) Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: No reduction will be
applied to the output. ``'mean'``: The output will be averaged.
``'sum'``: The output will be summed. Default: ``'none'``
eps : (float): small number to prevent division by zero. Default: 1e-7
Reference:
Complete Intersection over Union Loss (Zhaohui Zheng et. al)
https://arxiv.org/abs/1911.08287
"""
x1, y1, x2, y2 = boxes1.unbind(dim=-1)
x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1)
if (x2 < x1).all():
raise ValueError("x1 is larger than x2")
if (y2 < y1).all():
raise ValueError("y1 is larger than y2")
# Intersection keypoints
xkis1 = torch.max(x1, x1g)
ykis1 = torch.max(y1, y1g)
xkis2 = torch.min(x2, x2g)
ykis2 = torch.min(y2, y2g)
intsct = torch.zeros_like(x1)
mask = (ykis2 > ykis1) & (xkis2 > xkis1)
intsct[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask])
union = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsct + eps
iou = intsct / union
# smallest enclosing box
xc1 = torch.min(x1, x1g)
yc1 = torch.min(y1, y1g)
xc2 = torch.max(x2, x2g)
yc2 = torch.max(y2, y2g)
diag_len = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps
# centers of boxes
x_p = (x2 + x1) / 2
y_p = (y2 + y1) / 2
x_g = (x1g + x2g) / 2
y_g = (y1g + y2g) / 2
distance = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2)
# width and height of boxes
w_pred = x2 - x1
h_pred = y2 - y1
w_gt = x2g - x1g
h_gt = y2g - y1g
v = (4 / (math.pi ** 2)) * torch.pow((torch.atan(w_gt / h_gt) - torch.atan(w_pred / h_pred)), 2)
with torch.no_grad():
alpha = v / (1 - iou + v + eps)
# Eqn. (10)
loss = 1 - iou + (distance / diag_len) + alpha * v
if reduction == "mean":
loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum()
elif reduction == "sum":
loss = loss.sum()
return loss
|
48,080 | def collect_NME(path):
""" Collects NME values in log file. """
NME = []
beginning = 'OrderedDict([(\'NME\', '
with open(path) as read_file:
content = [line.strip() for line in read_file]
for line in content:
if line.startswith(beginning):
NME.append(float(line.replace(beginning, '')[:-3]))
return NME
| def collect_NME(path):
""" Collects NME values in log file. """
NME = []
beginning = "OrderedDict([('NME', "
with open(path) as read_file:
content = [line.strip() for line in read_file]
for line in content:
if line.startswith(beginning):
NME.append(float(line.replace(beginning, '')[:-3]))
return NME
|
26,721 | def create_dag_runs(dag, num_runs, session):
'''
Create `num_runs` of dag runs for sub-sequent schedules
'''
from airflow.utils import timezone
from airflow.utils.state import State
try:
from airflow.utils.types import DagRunType
ID_PREFIX = f'{DagRunType.SCHEDULED}__'
except ImportError:
from airflow.models.dagrun import DagRun
ID_PREFIX = DagRun.ID_PREFIX
next_run_date = dag.normalize_schedule(dag.start_date or min(t.start_date for t in dag.tasks))
for _ in range(num_runs):
dag.create_dagrun(
run_id=ID_PREFIX + next_run_date.isoformat(),
execution_date=next_run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False,
session=session,
)
next_run_date = dag.following_schedule(next_run_date)
| def create_dag_runs(dag, num_runs, session):
'''
Create `num_runs` of dag runs for sub-sequent schedules
'''
from airflow.utils import timezone
from airflow.utils.state import State
try:
from airflow.utils.types import DagRunType
ID_PREFIX = f'{DagRunType.SCHEDULED.value}__'
except ImportError:
from airflow.models.dagrun import DagRun
ID_PREFIX = DagRun.ID_PREFIX
next_run_date = dag.normalize_schedule(dag.start_date or min(t.start_date for t in dag.tasks))
for _ in range(num_runs):
dag.create_dagrun(
run_id=ID_PREFIX + next_run_date.isoformat(),
execution_date=next_run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False,
session=session,
)
next_run_date = dag.following_schedule(next_run_date)
|
25,131 | def test_open(dataset):
verify(zarr.open(dataset))
| def test_open(dataset):
verify(zarr.open(dataset, "r"))
|
52,721 | def set_log_level_debug(loggers=["vaex"]):
"""set log level to debug"""
for logger in loggers:
logging.getLogger(logger).setLevel(logging.DEBUG)
| def set_log_level_debug(loggers=["vaex"]):
"""set log level to debug"""
for logger in loggers:
import vaex.ml.datasets
return vaex.ml.datasets.load_iris()
|
7,049 | def get_sym_dirs(symlink_dirs: str) -> Dict[str, Dict[str, Any]]:
"""Converts command line entered symlink dirs to a dictionary.
Args:
symlink_dirs (str): As entered by user on cli,
e.g. [log=$DIR, share=$DIR2].
Raises:
WorkflowFilesError: If directory to be symlinked is not in permitted
dirs: run, log, share, work, share/cycle
Returns:
dict: In the same form as would be returned by global config.
e.g. {'localhost': {'log': '$DIR',
'share': '$DIR2'
}
}
"""
# Ensures the same nested dict format which is returned by the glb cfg
symdict: Dict[str, Dict[str, Any]] = {'localhost': {'run': None}}
if symlink_dirs == 'None':
return symdict
symlist = symlink_dirs.replace(" ", "").strip(',').split(',')
for pair in symlist:
try:
key, val = pair.split("=")
except ValueError:
return symdict
if key in ['run', 'log', 'share', 'share/cycle', 'work']:
symdict['localhost'][key] = val
else:
raise WorkflowFilesError(
f"{key} not a valid entry for --symlink-dirs"
)
return symdict
| def get_sym_dirs(symlink_dirs: str) -> Dict[str, Dict[str, Any]]:
"""Converts command line entered symlink dirs to a dictionary.
Args:
symlink_dirs (str): As entered by user on cli,
e.g. [log=$DIR, share=$DIR2].
Raises:
WorkflowFilesError: If directory to be symlinked is not in permitted
dirs: run, log, share, work, share/cycle
Returns:
dict: In the same form as would be returned by global config.
e.g. {'localhost': {'log': '$DIR',
'share': '$DIR2'
}
}
"""
# Ensures the same nested dict format which is returned by the glb cfg
symdict: Dict[str, Dict[str, Any]] = {'localhost': {'run': None}}
if symlink_dirs == 'None':
return symdict
symlist = symlink_dirs.replace(" ", "").strip(',').split(',')
for pair in symlist:
try:
key, val = pair.split("=")
except ValueError:
return symdict
if key in ['run', 'log', 'share', 'share/cycle', 'work']:
symdict['localhost'][key] = val.strip() or None
else:
raise WorkflowFilesError(
f"{key} not a valid entry for --symlink-dirs"
)
return symdict
|
42,008 | def test_interpolation_first_iteration() -> None:
zmap = {0 + 0j: 1, 1 + 1j: 4}
empties = [1 + 0j, 0 + 1j]
initial_zmap_len = len(zmap)
zmap, max_fractional_change = _run_iteration(zmap, empties) # type: ignore
# test loss after first iter
assert max_fractional_change == 1.0
# test if initial pass filled all values
assert len(zmap) == initial_zmap_len + len(empties)
| def test_interpolation_first_iteration() -> None:
zmap = {0 + 0j: 1, 1 + 1j: 4}
empties = [1 + 0j, 0 + 1j]
initial_zmap_len = len(zmap)
max_fractional_change = _run_iteration(zmap, empties) # type: ignore
# test loss after first iter
assert max_fractional_change == 1.0
# test if initial pass filled all values
assert len(zmap) == initial_zmap_len + len(empties)
|
31,071 | def install_simple_logging():
"""
This method implements logging module to print the message only with colors
This function is implemented to support backwards compatibility for functions that cannot yes support the full
`install_logging` method capabilities
"""
if not hasattr(logging, 'success'):
_add_logging_level('SUCCESS', 25)
coloredlogs.install(fmt='%(message)s',
level_styles={
'critical': {'bold': True, 'color': 'red'},
'debug': {'color': 'cyan'},
'error': {'color': 'red'},
'info': {},
'warning': {'color': 'yellow'},
'success': {'color': 'green'}})
| def install_simple_logging():
"""
This method implements logging module to print the message only with colors
This function is implemented to support backward compatibility for functions that cannot yet support the full
`install_logging` method capabilities
"""
if not hasattr(logging, 'success'):
_add_logging_level('SUCCESS', 25)
coloredlogs.install(fmt='%(message)s',
level_styles={
'critical': {'bold': True, 'color': 'red'},
'debug': {'color': 'cyan'},
'error': {'color': 'red'},
'info': {},
'warning': {'color': 'yellow'},
'success': {'color': 'green'}})
|
6,829 | def get_context(doc):
return {"doc": doc, "nowdate": nowdate, "frappeutils": frappe.utils}
| def get_context(doc):
return {"doc": doc, "nowdate": nowdate, "frappe": frappe._dict(utils=frappe.utils)}
|
9,057 | def test_command_from_callable_escaped_regex_pattern(mockbot):
# prepare callable
@plugin.commands('main .*')
def handler(wrapped, trigger):
wrapped.reply('Hi!')
loader.clean_callable(handler, mockbot.settings)
# create rule from a cleaned callable
rule = rules.Command.from_callable(mockbot.settings, handler)
# does not match on ".main anything"
line = ':[email protected] PRIVMSG #sopel :.main anything'
pretrigger = trigger.PreTrigger(mockbot.nick, line)
results = list(rule.match(mockbot, pretrigger))
assert not results, 'Regex command are not allowed since Sopel 8.0'
# match on ".main .*"
line = ':[email protected] PRIVMSG #sopel :.main .*'
pretrigger = trigger.PreTrigger(mockbot.nick, line)
results = list(rule.match(mockbot, pretrigger))
assert len(results) == 1, (
'Command name must be escaped to get an exact match')
result = results[0]
assert result.group(0) == '.main .*'
assert result.group(1) == 'main .*'
| def test_command_from_callable_escaped_regex_pattern(mockbot):
# prepare callable
@plugin.commands('main .*')
def handler(wrapped, trigger):
wrapped.reply('Hi!')
loader.clean_callable(handler, mockbot.settings)
# create rule from a cleaned callable
rule = rules.Command.from_callable(mockbot.settings, handler)
# does not match on ".main anything"
line = ':[email protected] PRIVMSG #sopel :.main anything'
pretrigger = trigger.PreTrigger(mockbot.nick, line)
results = list(rule.match(mockbot, pretrigger))
assert not results, 'Regex commands are not allowed since Sopel 8.0'
# match on ".main .*"
line = ':[email protected] PRIVMSG #sopel :.main .*'
pretrigger = trigger.PreTrigger(mockbot.nick, line)
results = list(rule.match(mockbot, pretrigger))
assert len(results) == 1, (
'Command name must be escaped to get an exact match')
result = results[0]
assert result.group(0) == '.main .*'
assert result.group(1) == 'main .*'
|
7,254 | def create_image_fetcher():
try:
import pooch
except ImportError:
# Without pooch, fallback on the standard data directory
# which for now, includes a few limited data samples
return None, legacy_data_dir
# Pooch expects a `+` to exist in development versions.
# Since scikit-image doesn't follow that convention, we have to manually
# remove `.dev` with a `+` if it exists.
# This helps pooch understand that it should look in master
# to find the required files
pooch_version = __version__.replace('.dev', '+')
url = "https://github.com/alexdesiqueira/scikit-image/tree/goodbye_cameraman/skimage" # test url; checking new image
# Create a new friend to manage your sample data storage
image_fetcher = pooch.create(
# Pooch uses appdirs to select an appropriate directory for the cache
# on each platform.
# https://github.com/ActiveState/appdirs
# On linux this converges to
# '$HOME/.cache/scikit-image'
# With a version qualifier
path=pooch.os_cache("scikit-image"),
base_url=url,
version=pooch_version,
env="SKIMAGE_DATADIR",
registry=registry,
urls=registry_urls,
)
data_dir = osp.join(str(image_fetcher.abspath), 'data')
return image_fetcher, data_dir
| def create_image_fetcher():
try:
import pooch
except ImportError:
# Without pooch, fallback on the standard data directory
# which for now, includes a few limited data samples
return None, legacy_data_dir
# Pooch expects a `+` to exist in development versions.
# Since scikit-image doesn't follow that convention, we have to manually
# remove `.dev` with a `+` if it exists.
# This helps pooch understand that it should look in master
# to find the required files
pooch_version = __version__.replace('.dev', '+')
url = "https://github.com/alexdesiqueira/scikit-image/raw/goodbye_cameraman/skimage/" # test url; checking new image
# Create a new friend to manage your sample data storage
image_fetcher = pooch.create(
# Pooch uses appdirs to select an appropriate directory for the cache
# on each platform.
# https://github.com/ActiveState/appdirs
# On linux this converges to
# '$HOME/.cache/scikit-image'
# With a version qualifier
path=pooch.os_cache("scikit-image"),
base_url=url,
version=pooch_version,
env="SKIMAGE_DATADIR",
registry=registry,
urls=registry_urls,
)
data_dir = osp.join(str(image_fetcher.abspath), 'data')
return image_fetcher, data_dir
|
375 | def make_initial_point_fn(
*,
model,
overrides: Optional[StartDict] = None,
jitter_rvs: Optional[Set[TensorVariable]] = None,
default_strategy: str = "moment",
return_transformed: bool = True,
) -> Callable:
"""Create seeded function that computes initial values for all free model variables.
Parameters
----------
jitter_rvs : set
The set (or list or tuple) of random variables for which a U(-1, +1) jitter should be
added to the initial value. Only available for variables that have a transform or real-valued support.
default_strategy : str
Which of { "moment", "prior" } to prefer if the initval setting for an RV is None.
overrides : dict
Initial value (strategies) to use instead of what's specified in `Model.initial_values`.
return_transformed : bool
If `True` the returned variables will correspond to transformed initial values.
"""
def find_rng_nodes(variables):
return [
node
for node in graph_inputs(variables)
if isinstance(
node,
(
at.random.var.RandomStateSharedVariable,
at.random.var.RandomGeneratorSharedVariable,
),
)
]
sdict_overrides = convert_str_to_rv_dict(model, overrides or {})
initval_srtats = {
**model.initial_values,
**sdict_overrides,
}
initial_values = make_initial_point_expression(
free_rvs=model.free_RVs,
rvs_to_values=model.rvs_to_values,
initval_strategies=initval_srtats,
jitter_rvs=jitter_rvs,
default_strategy=default_strategy,
return_transformed=return_transformed,
)
# Replace original rng shared variables so that we don't mess with them
# when calling the final seeded function
graph = FunctionGraph(outputs=initial_values, clone=False)
rng_nodes = find_rng_nodes(graph.outputs)
new_rng_nodes: List[Union[np.random.RandomState, np.random.Generator]] = []
for rng_node in rng_nodes:
rng_cls: type
if isinstance(rng_node, at.random.var.RandomStateSharedVariable):
rng_cls = np.random.RandomState
else:
rng_cls = np.random.Generator
new_rng_nodes.append(aesara.shared(rng_cls(np.random.PCG64())))
graph.replace_all(zip(rng_nodes, new_rng_nodes), import_missing=True)
func = compile_pymc(inputs=[], outputs=graph.outputs, mode=aesara.compile.mode.FAST_COMPILE)
varnames = []
for var in model.free_RVs:
transform = getattr(model.rvs_to_values[var].tag, "transform", None)
if transform is not None and return_transformed:
name = get_transformed_name(var.name, transform)
else:
name = var.name
varnames.append(name)
def make_seeded_function(func):
rngs = find_rng_nodes(func.maker.fgraph.outputs)
@functools.wraps(func)
def inner(seed, *args, **kwargs):
seeds = [
np.random.PCG64(sub_seed)
for sub_seed in np.random.SeedSequence(seed).spawn(len(rngs))
]
for rng, seed in zip(rngs, seeds):
if isinstance(rng, at.random.var.RandomStateSharedVariable):
new_rng = np.random.RandomState(seed)
else:
new_rng = np.random.Generator(seed)
rng.set_value(new_rng, True)
values = func(*args, **kwargs)
return dict(zip(varnames, values))
return inner
return make_seeded_function(func)
| def make_initial_point_fn(
*,
model,
overrides: Optional[StartDict] = None,
jitter_rvs: Optional[Set[TensorVariable]] = None,
default_strategy: str = "moment",
return_transformed: bool = True,
) -> Callable:
"""Create seeded function that computes initial values for all free model variables.
Parameters
----------
jitter_rvs : set
The set (or list or tuple) of random variables for which a U(-1, +1) jitter should be
added to the initial value. Only available for variables that have a transform or real-valued support.
default_strategy : str
Which of { "moment", "prior" } to prefer if the initval setting for an RV is None.
overrides : dict
Initial value (strategies) to use instead of what's specified in `Model.initial_values`.
return_transformed : bool
If `True` the returned variables will correspond to transformed initial values.
"""
def find_rng_nodes(variables):
return [
node
for node in graph_inputs(variables)
if isinstance(
node,
(
at.random.var.RandomStateSharedVariable,
at.random.var.RandomGeneratorSharedVariable,
),
)
]
sdict_overrides = convert_str_to_rv_dict(model, overrides or {})
initval_strats = {
**model.initial_values,
**sdict_overrides,
}
initial_values = make_initial_point_expression(
free_rvs=model.free_RVs,
rvs_to_values=model.rvs_to_values,
initval_strategies=initval_srtats,
jitter_rvs=jitter_rvs,
default_strategy=default_strategy,
return_transformed=return_transformed,
)
# Replace original rng shared variables so that we don't mess with them
# when calling the final seeded function
graph = FunctionGraph(outputs=initial_values, clone=False)
rng_nodes = find_rng_nodes(graph.outputs)
new_rng_nodes: List[Union[np.random.RandomState, np.random.Generator]] = []
for rng_node in rng_nodes:
rng_cls: type
if isinstance(rng_node, at.random.var.RandomStateSharedVariable):
rng_cls = np.random.RandomState
else:
rng_cls = np.random.Generator
new_rng_nodes.append(aesara.shared(rng_cls(np.random.PCG64())))
graph.replace_all(zip(rng_nodes, new_rng_nodes), import_missing=True)
func = compile_pymc(inputs=[], outputs=graph.outputs, mode=aesara.compile.mode.FAST_COMPILE)
varnames = []
for var in model.free_RVs:
transform = getattr(model.rvs_to_values[var].tag, "transform", None)
if transform is not None and return_transformed:
name = get_transformed_name(var.name, transform)
else:
name = var.name
varnames.append(name)
def make_seeded_function(func):
rngs = find_rng_nodes(func.maker.fgraph.outputs)
@functools.wraps(func)
def inner(seed, *args, **kwargs):
seeds = [
np.random.PCG64(sub_seed)
for sub_seed in np.random.SeedSequence(seed).spawn(len(rngs))
]
for rng, seed in zip(rngs, seeds):
if isinstance(rng, at.random.var.RandomStateSharedVariable):
new_rng = np.random.RandomState(seed)
else:
new_rng = np.random.Generator(seed)
rng.set_value(new_rng, True)
values = func(*args, **kwargs)
return dict(zip(varnames, values))
return inner
return make_seeded_function(func)
|
50,078 | def countstat_current_noise(L, c_ops, wlist=None, rhoss=None, J_ops=None,
sparse=True, method='direct'):
"""
Compute the cross-current noise spectrum for a list of collapse operators
`c_ops` corresponding to monitored currents, given the system
Liouvillian `L`. The current collapse operators `c_ops` should be part
of the dissipative processes in `L`, but the `c_ops` given here does not
necessarily need to be all collapse operators contributing to dissipation
in the Liouvillian. Optionally, the steadystate density matrix `rhoss`
and the current operators `J_ops` correpsonding to the current collapse
operators `c_ops` can also be specified. If either of
`rhoss` and `J_ops` are omitted, they will be computed internally.
'wlist' is an optional list of frequencies at which to evaluate the noise
spectrum.
Parameters
----------
L : :class:`qutip.Qobj`
Qobj representing the system Liouvillian.
c_ops : array / list
List of current collapse operators.
rhoss : :class:`qutip.Qobj` (optional)
The steadystate density matrix corresponding the system Liouvillian
`L`.
wlist : array / list (optional)
List of frequencies at which to evaluate (if none are given, evaluates
at zero frequency)
J_ops : array / list (optional)
List of current superoperators.
sparse : bool [True]
Flag that indicates whether to use sparse or dense matrix methods when
computing the pseudo inverse. Default is false, as sparse solvers
can fail for small systems. For larger systems the sparse solvers
are recommended.
method : str, ['direct']
Method used to compute the noise. The default, 'direct' with
``sparse=True``, compute the noise directly. It is the recommended
method for larger systems. Otherwise, the pseudo inverse is computed
using the given method. Pseudo inverse supports 'splu' and 'spilu' for
sparse matrices and 'direct', 'scipy' and 'numpy' methods for
``sparse=False``.
.. note::
The algoryth is described in page 67 of "Electrons in nanostructures"
C. Flindt, PhD Thesis, available online:
https://orbit.dtu.dk/fedora/objects/orbit:82314/datastreams/file_4732600/content
Returns
--------
I, S : tuple of arrays
The currents `I` corresponding to each current collapse operator
`c_ops` (or, equivalently, each current superopeator `J_ops`) and the
zero-frequency cross-current correlation `S`.
"""
if rhoss is None:
rhoss = steadystate(L, c_ops)
if J_ops is None:
J_ops = [sprepost(c, c.dag()) for c in c_ops]
if wlist is None:
wlist = [0.]
J_ops = [op.data for op in J_ops]
if not sparse or method != 'direct':
current, noise = _noise_pseudoinv(L, wlist, rhoss, J_ops,
sparse, method)
else:
rhoss_vec = operator_to_vector(rhoss).data
current, noise = _noise_direct(L, wlist, rhoss_vec, J_ops)
return current, noise
| def countstat_current_noise(L, c_ops, wlist=None, rhoss=None, J_ops=None,
sparse=True, method='direct'):
"""
Compute the cross-current noise spectrum for a list of collapse operators
`c_ops` corresponding to monitored currents, given the system
Liouvillian `L`. The current collapse operators `c_ops` should be part
of the dissipative processes in `L`, but the `c_ops` given here does not
necessarily need to be all collapse operators contributing to dissipation
in the Liouvillian. Optionally, the steadystate density matrix `rhoss`
and the current operators `J_ops` correpsonding to the current collapse
operators `c_ops` can also be specified. If either of
`rhoss` and `J_ops` are omitted, they will be computed internally.
'wlist' is an optional list of frequencies at which to evaluate the noise
spectrum.
Parameters
----------
L : :class:`qutip.Qobj`
Qobj representing the system Liouvillian.
c_ops : array / list
List of current collapse operators.
rhoss : :class:`qutip.Qobj` (optional)
The steadystate density matrix corresponding the system Liouvillian
`L`.
wlist : array / list (optional)
List of frequencies at which to evaluate (if none are given, evaluates
at zero frequency)
J_ops : array / list (optional)
List of current superoperators.
sparse : bool [True]
Flag that indicates whether to use sparse or dense matrix methods when
computing the pseudo inverse. Default is false, as sparse solvers
can fail for small systems. For larger systems the sparse solvers
are recommended.
method : str, ['direct']
Method used to compute the noise. The default, 'direct' with
``sparse=True``, compute the noise directly. It is the recommended
method for larger systems. Otherwise, the pseudo inverse is computed
using the given method. Pseudo inverse supports 'splu' and 'spilu' for
sparse matrices and 'direct', 'scipy' and 'numpy' methods for
``sparse=False``.
.. note::
The algoryth is described in page 67 of "Electrons in nanostructures"
C. Flindt, PhD Thesis, available online:
https://orbit.dtu.dk/fedora/objects/orbit:82314/datastreams/file_4732600/content
Returns
--------
I, S : tuple of arrays
The currents `I` corresponding to each current collapse operator
`c_ops` (or, equivalently, each current superopeator `J_ops`) and the
zero-frequency cross-current correlation `S`.
"""
if rhoss is None:
rhoss = steadystate(L, c_ops)
if J_ops is None:
J_ops = [sprepost(c, c.dag()) for c in c_ops]
if wlist is None:
wlist = [0.]
J_ops = [op.data for op in J_ops]
if sparse and method == 'direct':
rhoss_vec = operator_to_vector(rhoss).data
current, noise = _noise_direct(L, wlist, rhoss_vec, J_ops)
else:
current, noise = _noise_pseudoinv(L, wlist, rhoss, J_ops,
sparse, method)
return current, noise
|
24,684 | def get_shipit_facts(issuewrapper, inmeta, module_indexer, core_team=[], botnames=[]):
""" Count shipits by maintainers/community/other """
# supershipit - maintainers with isolated commit access
# maintainers - people who maintain this file/module
# community - people who maintain file(s) in the same directory
# other - anyone else who comments with shipit/+1/LGTM
meta = inmeta.copy()
iw = issuewrapper
nmeta = {
u'shipit': False,
u'supershipit': False,
u'owner_pr': False,
u'shipit_ansible': False,
u'shipit_community': False,
u'shipit_count_other': False,
u'shipit_count_community': False,
u'shipit_count_maintainer': False,
u'shipit_count_ansible': False,
u'shipit_count_vtotal': False,
u'shipit_count_historical': False,
u'shipit_actors': None,
u'supershipit_actors': None,
u'community_usernames': [],
u'notify_community_shipit': False,
u'is_rebuild_merge': False,
}
if not iw.is_pullrequest():
return nmeta
# https://github.com/ansible/ansibullbot/issues/1147
meta[u'component_matches'] = [
x for x in meta.get(u'component_matches', [])
if not x[u'repo_filename'].startswith(u'changelogs/fragments/')
]
files = [f for f in iw.files if not f.startswith(u'changelogs/fragments/')]
# https://github.com/ansible/ansibullbot/issues/1238
meta[u'component_matches'] = [
x for x in meta.get(u'component_matches', [])
if not x[u'repo_filename'].startswith(u'test/sanity')
]
files = [f for f in files if not f.startswith(u'test/sanity') and f.endswith(u'ignore.txt')]
module_utils_files_owned = 0 # module_utils files for which submitter is maintainer
if meta[u'is_module_util']:
for f in files:
if f.startswith(u'lib/ansible/module_utils') and f in module_indexer.botmeta[u'files']:
maintainers = module_indexer.botmeta[u'files'][f].get(u'maintainers', [])
if maintainers and (iw.submitter in maintainers):
module_utils_files_owned += 1
modules_files_owned = 0
if not meta[u'is_new_module']:
for f in files:
if f.startswith(u'lib/ansible/modules') and iw.submitter in meta[u'component_maintainers']:
modules_files_owned += 1
nmeta[u'owner_pr'] = modules_files_owned + module_utils_files_owned == len(files)
# https://github.com/ansible/ansibullbot/issues/722
if iw.wip:
logging.debug(u'WIP PRs do not get shipits')
return nmeta
if meta[u'is_needs_revision'] or meta[u'is_needs_rebase']:
logging.debug(u'PRs with needs_revision or needs_rebase label do not get shipits')
return nmeta
supershipiteers_byfile = {}
supershipiteers_byuser = {}
for cm in meta.get('component_matches', []):
_ss = cm.get(u'supershipit', [])
supershipiteers_byfile[cm[u'repo_filename']] = _ss[:]
for ss in _ss:
if ss not in supershipiteers_byuser:
supershipiteers_byuser[ss] = []
supershipiteers_byuser[ss].append(cm[u'repo_filename'])
maintainers = meta.get(u'component_maintainers', [])
maintainers = \
ModuleIndexer.replace_ansible(
maintainers,
core_team,
bots=botnames
)
# community is the other maintainers in the same namespace
community = meta.get(u'component_namespace_maintainers', [])
community = [x for x in community if x != u'ansible' and
x not in core_team and
x != u'DEPRECATED']
# shipit tallies
ansible_shipits = 0
maintainer_shipits = 0
community_shipits = 0
other_shipits = 0
shipit_actors = []
shipit_actors_other = []
supershipiteers_voted = set()
rebuild_merge = False
shipits_historical = set()
for event in iw.history.history:
if event[u'event'] not in [u'commented', u'committed', u'review_approved', u'review_comment']:
continue
if event[u'actor'] in botnames:
continue
# commits reset the counters
if event[u'event'] == u'committed':
logging.info(event)
ansible_shipits = 0
maintainer_shipits = 0
community_shipits = 0
other_shipits = 0
shipit_actors = []
shipit_actors_other = []
supershipiteers_voted = set()
rebuild_merge = False
logging.info('commit detected, resetting shipit tallies')
continue
actor = event[u'actor']
body = event.get(u'body', u'')
body = body.strip()
if not is_approval(body):
continue
# historical shipits (keep track of all of them, even if reset)
shipits_historical.add(actor)
if actor in core_team and is_rebuild_merge(body):
rebuild_merge = True
logging.info(u'%s shipit [rebuild_merge]' % actor)
else:
logging.info(u'%s shipit' % actor)
# super shipits
if actor in supershipiteers_byuser:
supershipiteers_voted.add(actor)
# ansible shipits
if actor in core_team:
if actor not in shipit_actors:
ansible_shipits += 1
shipit_actors.append(actor)
continue
# maintainer shipits
if actor in maintainers:
if actor not in shipit_actors:
maintainer_shipits += 1
shipit_actors.append(actor)
continue
# community shipits
if actor in community:
if actor not in shipit_actors:
community_shipits += 1
shipit_actors.append(actor)
continue
# other shipits
if actor not in shipit_actors_other:
other_shipits += 1
shipit_actors_other.append(actor)
continue
# submitters should count if they are core team/maintainers/community
if iw.submitter in core_team:
if iw.submitter not in shipit_actors:
ansible_shipits += 1
shipit_actors.append(iw.submitter)
shipits_historical.add(iw.submitter)
elif iw.submitter in maintainers:
if iw.submitter not in shipit_actors:
maintainer_shipits += 1
shipit_actors.append(iw.submitter)
shipits_historical.add(iw.submitter)
elif iw.submitter in community:
if iw.submitter not in shipit_actors:
community_shipits += 1
shipit_actors.append(iw.submitter)
shipits_historical.add(iw.submitter)
nmeta[u'shipit_count_other'] = other_shipits
nmeta[u'shipit_count_community'] = community_shipits
nmeta[u'shipit_count_maintainer'] = maintainer_shipits
nmeta[u'shipit_count_ansible'] = ansible_shipits
nmeta[u'shipit_actors'] = shipit_actors
nmeta[u'shipit_actors_other'] = shipit_actors_other
nmeta[u'community_usernames'] = sorted(community)
nmeta[u'shipit_count_historical'] = list(shipits_historical)
nmeta[u'shipit_count_htotal'] = len(list(shipits_historical))
total = community_shipits + maintainer_shipits + ansible_shipits
nmeta[u'shipit_count_vtotal'] = total + other_shipits
if rebuild_merge:
nmeta['is_rebuild_merge'] = True
# include shipits from other people to push over the edge
if total == 1 and other_shipits > 2:
total += other_shipits
if total > 1 or rebuild_merge:
nmeta[u'shipit'] = True
elif meta[u'is_new_module'] or \
(len(maintainers) == 1 and maintainer_shipits == 1):
# don't notify if there is no maintainer or if submitter is the only namespace maintainer
if set(community) - {iw.submitter}:
bpc = iw.history.get_boilerplate_comments()
bpc = [x[0] for x in bpc]
if u'community_shipit_notify' not in bpc:
nmeta[u'notify_community_shipit'] = True
logging.info(u'total shipits: %s' % total)
# supershipit ...
# if a supershipiteer for each file exists and has blessed the PR
# on the current commit, then override all shipit tallies and get this PR merged
if supershipiteers_voted:
nmeta[u'supershipit_actors'] = list(supershipiteers_voted)
cm_files = [x[u'repo_filename'] for x in meta[u'component_matches']]
ss_files = set()
for ssv in supershipiteers_voted:
for fn in supershipiteers_byuser[ssv]:
ss_files.add(fn)
if sorted(set(cm_files)) == sorted(set(ss_files)):
logging.info(u'supershipit enabled on %s' % iw.html_url)
nmeta[u'supershipit'] = True
nmeta[u'shipit'] = True
else:
for cm_file in sorted(cm_files):
if cm_file not in ss_files:
logging.info('%s is not governed by supershipit' % cm_file)
return nmeta
| def get_shipit_facts(issuewrapper, inmeta, module_indexer, core_team=[], botnames=[]):
""" Count shipits by maintainers/community/other """
# supershipit - maintainers with isolated commit access
# maintainers - people who maintain this file/module
# community - people who maintain file(s) in the same directory
# other - anyone else who comments with shipit/+1/LGTM
meta = inmeta.copy()
iw = issuewrapper
nmeta = {
u'shipit': False,
u'supershipit': False,
u'owner_pr': False,
u'shipit_ansible': False,
u'shipit_community': False,
u'shipit_count_other': False,
u'shipit_count_community': False,
u'shipit_count_maintainer': False,
u'shipit_count_ansible': False,
u'shipit_count_vtotal': False,
u'shipit_count_historical': False,
u'shipit_actors': None,
u'supershipit_actors': None,
u'community_usernames': [],
u'notify_community_shipit': False,
u'is_rebuild_merge': False,
}
if not iw.is_pullrequest():
return nmeta
# https://github.com/ansible/ansibullbot/issues/1147
meta[u'component_matches'] = [
x for x in meta.get(u'component_matches', [])
if not x[u'repo_filename'].startswith(u'changelogs/fragments/')
]
files = [f for f in iw.files if not f.startswith(u'changelogs/fragments/')]
# https://github.com/ansible/ansibullbot/issues/1238
meta[u'component_matches'] = [
x for x in meta.get(u'component_matches', [])
if not x[u'repo_filename'].startswith(u'test/sanity')
]
files = [f for f in files if not (f.startswith(u'test/sanity') and f.endswith(u'ignore.txt'))]
module_utils_files_owned = 0 # module_utils files for which submitter is maintainer
if meta[u'is_module_util']:
for f in files:
if f.startswith(u'lib/ansible/module_utils') and f in module_indexer.botmeta[u'files']:
maintainers = module_indexer.botmeta[u'files'][f].get(u'maintainers', [])
if maintainers and (iw.submitter in maintainers):
module_utils_files_owned += 1
modules_files_owned = 0
if not meta[u'is_new_module']:
for f in files:
if f.startswith(u'lib/ansible/modules') and iw.submitter in meta[u'component_maintainers']:
modules_files_owned += 1
nmeta[u'owner_pr'] = modules_files_owned + module_utils_files_owned == len(files)
# https://github.com/ansible/ansibullbot/issues/722
if iw.wip:
logging.debug(u'WIP PRs do not get shipits')
return nmeta
if meta[u'is_needs_revision'] or meta[u'is_needs_rebase']:
logging.debug(u'PRs with needs_revision or needs_rebase label do not get shipits')
return nmeta
supershipiteers_byfile = {}
supershipiteers_byuser = {}
for cm in meta.get('component_matches', []):
_ss = cm.get(u'supershipit', [])
supershipiteers_byfile[cm[u'repo_filename']] = _ss[:]
for ss in _ss:
if ss not in supershipiteers_byuser:
supershipiteers_byuser[ss] = []
supershipiteers_byuser[ss].append(cm[u'repo_filename'])
maintainers = meta.get(u'component_maintainers', [])
maintainers = \
ModuleIndexer.replace_ansible(
maintainers,
core_team,
bots=botnames
)
# community is the other maintainers in the same namespace
community = meta.get(u'component_namespace_maintainers', [])
community = [x for x in community if x != u'ansible' and
x not in core_team and
x != u'DEPRECATED']
# shipit tallies
ansible_shipits = 0
maintainer_shipits = 0
community_shipits = 0
other_shipits = 0
shipit_actors = []
shipit_actors_other = []
supershipiteers_voted = set()
rebuild_merge = False
shipits_historical = set()
for event in iw.history.history:
if event[u'event'] not in [u'commented', u'committed', u'review_approved', u'review_comment']:
continue
if event[u'actor'] in botnames:
continue
# commits reset the counters
if event[u'event'] == u'committed':
logging.info(event)
ansible_shipits = 0
maintainer_shipits = 0
community_shipits = 0
other_shipits = 0
shipit_actors = []
shipit_actors_other = []
supershipiteers_voted = set()
rebuild_merge = False
logging.info('commit detected, resetting shipit tallies')
continue
actor = event[u'actor']
body = event.get(u'body', u'')
body = body.strip()
if not is_approval(body):
continue
# historical shipits (keep track of all of them, even if reset)
shipits_historical.add(actor)
if actor in core_team and is_rebuild_merge(body):
rebuild_merge = True
logging.info(u'%s shipit [rebuild_merge]' % actor)
else:
logging.info(u'%s shipit' % actor)
# super shipits
if actor in supershipiteers_byuser:
supershipiteers_voted.add(actor)
# ansible shipits
if actor in core_team:
if actor not in shipit_actors:
ansible_shipits += 1
shipit_actors.append(actor)
continue
# maintainer shipits
if actor in maintainers:
if actor not in shipit_actors:
maintainer_shipits += 1
shipit_actors.append(actor)
continue
# community shipits
if actor in community:
if actor not in shipit_actors:
community_shipits += 1
shipit_actors.append(actor)
continue
# other shipits
if actor not in shipit_actors_other:
other_shipits += 1
shipit_actors_other.append(actor)
continue
# submitters should count if they are core team/maintainers/community
if iw.submitter in core_team:
if iw.submitter not in shipit_actors:
ansible_shipits += 1
shipit_actors.append(iw.submitter)
shipits_historical.add(iw.submitter)
elif iw.submitter in maintainers:
if iw.submitter not in shipit_actors:
maintainer_shipits += 1
shipit_actors.append(iw.submitter)
shipits_historical.add(iw.submitter)
elif iw.submitter in community:
if iw.submitter not in shipit_actors:
community_shipits += 1
shipit_actors.append(iw.submitter)
shipits_historical.add(iw.submitter)
nmeta[u'shipit_count_other'] = other_shipits
nmeta[u'shipit_count_community'] = community_shipits
nmeta[u'shipit_count_maintainer'] = maintainer_shipits
nmeta[u'shipit_count_ansible'] = ansible_shipits
nmeta[u'shipit_actors'] = shipit_actors
nmeta[u'shipit_actors_other'] = shipit_actors_other
nmeta[u'community_usernames'] = sorted(community)
nmeta[u'shipit_count_historical'] = list(shipits_historical)
nmeta[u'shipit_count_htotal'] = len(list(shipits_historical))
total = community_shipits + maintainer_shipits + ansible_shipits
nmeta[u'shipit_count_vtotal'] = total + other_shipits
if rebuild_merge:
nmeta['is_rebuild_merge'] = True
# include shipits from other people to push over the edge
if total == 1 and other_shipits > 2:
total += other_shipits
if total > 1 or rebuild_merge:
nmeta[u'shipit'] = True
elif meta[u'is_new_module'] or \
(len(maintainers) == 1 and maintainer_shipits == 1):
# don't notify if there is no maintainer or if submitter is the only namespace maintainer
if set(community) - {iw.submitter}:
bpc = iw.history.get_boilerplate_comments()
bpc = [x[0] for x in bpc]
if u'community_shipit_notify' not in bpc:
nmeta[u'notify_community_shipit'] = True
logging.info(u'total shipits: %s' % total)
# supershipit ...
# if a supershipiteer for each file exists and has blessed the PR
# on the current commit, then override all shipit tallies and get this PR merged
if supershipiteers_voted:
nmeta[u'supershipit_actors'] = list(supershipiteers_voted)
cm_files = [x[u'repo_filename'] for x in meta[u'component_matches']]
ss_files = set()
for ssv in supershipiteers_voted:
for fn in supershipiteers_byuser[ssv]:
ss_files.add(fn)
if sorted(set(cm_files)) == sorted(set(ss_files)):
logging.info(u'supershipit enabled on %s' % iw.html_url)
nmeta[u'supershipit'] = True
nmeta[u'shipit'] = True
else:
for cm_file in sorted(cm_files):
if cm_file not in ss_files:
logging.info('%s is not governed by supershipit' % cm_file)
return nmeta
|
11,691 | def gen_suffix_username():
return str(uuid.uuid4()).split("-", 1)[0]
| def suffix_taken_username():
return str(uuid.uuid4()).split("-", 1)[0]
|
31,374 | def pipeline_query_command(client: Client, collection: str, pipeline: str, limit: str = '50', offset: str = '0',
**kwargs) -> Tuple[str, dict, list]:
limit = int(limit)
offset = int(offset)
try:
json_pipeline = validate_json_objects(json.loads(pipeline))
raw_response = client.pipeline_query(
collection=collection,
pipeline=json_pipeline,
)
except JSONDecodeError:
raise DemistoException('The `pipeline` argument is not a valid json.')
if raw_response:
raw_response = raw_response if len(raw_response) <= limit else raw_response[offset:(offset + limit)]
readable_outputs = tableToMarkdown(
f'Total of {len(raw_response)} entries were found in MongoDB collection `{collection}` '
f'with pipeline: {pipeline}:',
t=[entry.get('_id') for entry in raw_response],
headers=['_id'],
)
outputs_objects = list()
for item in raw_response:
item.update({'collection': collection})
outputs_objects.append(item)
outputs = {CONTEXT_KEY: outputs_objects}
return readable_outputs, outputs, raw_response
else:
return 'MongoDB: No results found', {}, raw_response
| def pipeline_query_command(client: Client, collection: str, pipeline: str, limit: str = '50', offset: str = '0',
**kwargs) -> Tuple[str, dict, list]:
limit = int(limit)
offset = int(offset)
try:
json_pipeline = validate_json_objects(json.loads(pipeline))
raw_response = client.pipeline_query(
collection=collection,
pipeline=json_pipeline,
)
except JSONDecodeError:
raise DemistoException('The `pipeline` argument is not a valid json.')
if raw_response:
raw_response = raw_response if len(raw_response) <= limit else raw_response[offset:(offset + limit)]
readable_outputs = tableToMarkdown(
f'Total of {len(raw_response)} entries were found in MongoDB collection: `{collection}` '
f'with pipeline: {pipeline}:',
t=[entry.get('_id') for entry in raw_response],
headers=['_id'],
)
outputs_objects = list()
for item in raw_response:
item.update({'collection': collection})
outputs_objects.append(item)
outputs = {CONTEXT_KEY: outputs_objects}
return readable_outputs, outputs, raw_response
else:
return 'MongoDB: No results found', {}, raw_response
|
44,202 | def factorize(two, tol):
r"""Return double-factorized form of a two-electron tensor.
The second quantized electronic Hamiltonian is constructed in terms of fermionic creation,
:math:`a^{\dagger}` , and annihilation, :math:`a`, operators as
[`arXiv:1902.02134 <https://arxiv.org/pdf/1902.02134.pdf>`_]
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} h_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs}
h_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \beta}^{\dagger} a_{r, \beta} a_{s, \alpha},
where :math:`h_{pq}` and :math:`h_{pqrs}` are the one- and two-electron integrals computed as
.. math::
h_{pq} = \int \phi_p(r)^* \left ( -\frac{\nabla_r^2}{2} - \sum_i \frac{Z_i}{|r-R_i|} \right)
\phi_q(r) dr,
and
.. math::
h_{pqrs} = \int \frac{\phi_p(r_1)^* \phi_q(r_2)^* \phi_r(r_2) \phi_s(r_1)}{|r_1 - r_2|}
dr_1 dr_2.
Rearranging the integrals in the chemist notation, [11|22], gives
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} T_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs}
V_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \alpha} a_{r, \beta}^{\dagger} a_{s, \beta}.
with
.. math::
T_{pq} = h_{ij} - \frac{1}{2} \sum_s h_{pssq}.
and :math:`V` is the two-electron tensor in chemist notation.
The objective of the factorization is to find a set of symmetric matrices, :math:`L`, such that
.. math::
V_{ijkl} = \sum_r L_{ij}^{(r)} L_{kl}^{(r) T}.
with the rank :math:`r \in \mathcal{O}(n)`. The matrices :math:`L` are further diagonalized
and truncated in a second level of factorization.
The algorithm has the following steps
[`arXiv:1902.02134 <https://arxiv.org/pdf/1902.02134.pdf>`_].
1. Matricize the :math:`n \times n \times n \times n` two-electron tensor to a \
:math:`n^2 \times n^2` matrix where n is the number of orbitals.
2. Diagonalize the resulting matrix and keep the :math:`r` eigenvectors that have \
corresponding eigenvalues larger than a threshold.
3. Reshape the selected eigenvectors to :math:`n \times n` matrices.
4. Diagonalize the :math:`n \times n` matrices and keep those that the norm of their \
eigenvalues is larger than a threshold.
Args:
two (array[array[float]]): the two-electron repulsion tensor in the molecular orbital basis
arranged in chemist notation [11|22]
tol (float): cutoff value for discarding the negligible factors
Returns:
tuple(array[float]): array of symmetric matrices (factors) approximating the two-electron
tensor, eigenvalues of the generated factors, eigenvectors of the generated factors
**Example**
>>> symbols = ['H', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [0.74, 0.0, 0.0]], requires_grad = False) / 0.5291772
>>> mol = qml.qchem.Molecule(symbols, geometry)
>>> core, one, two = qml.qchem.electron_integrals(mol)()
>>> two = np.swapaxes(two, 1, 3) # convert to chemist's notation
>>> l, w, v = factorize(two, 1e-5)
>>> print(l)
[[[ 1.06723440e-01 9.73575768e-15]
[ 8.36288956e-15 -1.04898533e-01]]
[[-2.20945401e-13 -4.25688222e-01]
[-4.25688222e-01 -2.98228790e-13]]
[[-8.14472856e-01 5.01669019e-13]
[ 5.01689072e-13 -8.28642140e-01]]]
"""
n = two.shape[0]
two = two.reshape(n * n, n * n)
eigvals, eigvecs = np.linalg.eigh(two)
eigvals = np.array([val for val in eigvals if abs(val) > tol])
eigvecs = eigvecs[:, -len(eigvals) :]
vectors = eigvecs @ np.diag(np.sqrt(abs(eigvals)))
factors = np.array([vectors.reshape(n, n, len(eigvals))[:, :, r] for r in range(len(eigvals))])
eigvals, eigvecs = np.linalg.eigh(factors)
eigvals = np.array([val for val in eigvals if np.sum(abs(eigvals)) > tol])
eigvecs = eigvecs[:, -len(eigvals) :]
return factors, eigvals, eigvecs
| def factorize(two, tol):
r"""Return double-factorized form of a two-electron tensor.
The second quantized electronic Hamiltonian is constructed in terms of fermionic creation,
:math:`a^{\dagger}` , and annihilation, :math:`a`, operators as
[`arXiv:1902.02134 <https://arxiv.org/pdf/1902.02134.pdf>`_]
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} h_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs}
h_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \beta}^{\dagger} a_{r, \beta} a_{s, \alpha},
where :math:`h_{pq}` and :math:`h_{pqrs}` are the one- and two-electron integrals computed as
.. math::
h_{pq} = \int \phi_p(r)^* \left ( -\frac{\nabla_r^2}{2} - \sum_i \frac{Z_i}{|r-R_i|} \right)
\phi_q(r) dr,
and
.. math::
h_{pqrs} = \int \frac{\phi_p(r_1)^* \phi_q(r_2)^* \phi_r(r_2) \phi_s(r_1)}{|r_1 - r_2|}
dr_1 dr_2.
Rearranging the integrals in the chemist notation, [11|22], gives
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} T_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs}
V_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \alpha} a_{r, \beta}^{\dagger} a_{s, \beta}.
with
.. math::
T_{pq} = h_{ij} - \frac{1}{2} \sum_s h_{pssq}.
and :math:`V` is the two-electron tensor in chemist notation.
The objective of the factorization is to find a set of symmetric matrices, :math:`L`, such that
.. math::
V_{ijkl} = \sum_r L_{ij}^{(r)} L_{kl}^{(r) T}.
with the rank :math:`r \in \mathcal{O}(n)`. The matrices :math:`L` are further diagonalized
and truncated in a second level of factorization.
The algorithm has the following steps
[`arXiv:1902.02134 <https://arxiv.org/pdf/1902.02134.pdf>`_].
1. Matricize the :math:`n \times n \times n \times n` two-electron tensor to a \
:math:`n^2 \times n^2` matrix where n is the number of orbitals.
2. Diagonalize the resulting matrix and keep the :math:`r` eigenvectors that have \
corresponding eigenvalues larger than a threshold.
3. Reshape the selected eigenvectors to :math:`n \times n` matrices.
4. Diagonalize the :math:`n \times n` matrices and keep those such that the norm of their \
eigenvalues is larger than a threshold.
Args:
two (array[array[float]]): the two-electron repulsion tensor in the molecular orbital basis
arranged in chemist notation [11|22]
tol (float): cutoff value for discarding the negligible factors
Returns:
tuple(array[float]): array of symmetric matrices (factors) approximating the two-electron
tensor, eigenvalues of the generated factors, eigenvectors of the generated factors
**Example**
>>> symbols = ['H', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [0.74, 0.0, 0.0]], requires_grad = False) / 0.5291772
>>> mol = qml.qchem.Molecule(symbols, geometry)
>>> core, one, two = qml.qchem.electron_integrals(mol)()
>>> two = np.swapaxes(two, 1, 3) # convert to chemist's notation
>>> l, w, v = factorize(two, 1e-5)
>>> print(l)
[[[ 1.06723440e-01 9.73575768e-15]
[ 8.36288956e-15 -1.04898533e-01]]
[[-2.20945401e-13 -4.25688222e-01]
[-4.25688222e-01 -2.98228790e-13]]
[[-8.14472856e-01 5.01669019e-13]
[ 5.01689072e-13 -8.28642140e-01]]]
"""
n = two.shape[0]
two = two.reshape(n * n, n * n)
eigvals, eigvecs = np.linalg.eigh(two)
eigvals = np.array([val for val in eigvals if abs(val) > tol])
eigvecs = eigvecs[:, -len(eigvals) :]
vectors = eigvecs @ np.diag(np.sqrt(abs(eigvals)))
factors = np.array([vectors.reshape(n, n, len(eigvals))[:, :, r] for r in range(len(eigvals))])
eigvals, eigvecs = np.linalg.eigh(factors)
eigvals = np.array([val for val in eigvals if np.sum(abs(eigvals)) > tol])
eigvecs = eigvecs[:, -len(eigvals) :]
return factors, eigvals, eigvecs
|
52,531 | def _upgrade_existing_database(
cur,
current_version,
applied_delta_files,
upgraded,
database_engine,
config,
databases,
is_empty=False,
):
"""Upgrades an existing physical database.
Delta files can either be SQL stored in *.sql files, or python modules
in *.py.
There can be multiple delta files per version. Synapse will keep track of
which delta files have been applied, and will apply any that haven't been
even if there has been no version bump. This is useful for development
where orthogonal schema changes may happen on separate branches.
Different delta files for the same version *must* be orthogonal and give
the same result when applied in any order. No guarantees are made on the
order of execution of these scripts.
This is a no-op of current_version == SCHEMA_VERSION.
Example directory structure:
schema/
delta/
11/
foo.sql
...
12/
foo.sql
bar.py
...
full_schemas/
...
In the example, if current_version is 11, then foo.sql will be run if and
only if `upgraded` is True. Then `foo.sql` and `bar.py` would be run in
some arbitrary order.
Note: we apply the delta files from the specified data stores as well as
those in the top-level schema. We apply all delta files across data stores
for a version before applying those in the next version.
Args:
cur (Cursor)
current_version (int): The current version of the schema.
applied_delta_files (list): A list of deltas that have already been
applied.
upgraded (bool): Whether the current version was generated by having
applied deltas or from full schema file. If `True` the function
will never apply delta files for the given `current_version`, since
the current_version wasn't generated by applying those delta files.
database_engine (DatabaseEngine)
config (synapse.config.homeserver.HomeServerConfig|None):
None if we are initialising a blank database, otherwise the application
config
databases (list[str]): The names of the databases to instantiate
on the given physical database.
is_empty (bool): Is this a blank database? I.e. do we need to run the
upgrade portions of the delta scripts.
"""
if is_empty:
assert not applied_delta_files
else:
assert config
if current_version > SCHEMA_VERSION:
raise ValueError(
"Cannot use this database as it is too "
+ "new for the server to understand"
)
# some of the deltas assume that config.server_name is set correctly, so now
# is a good time to run the sanity check.
if not is_empty and "main" in databases:
from synapse.storage.databases.main import check_database_before_upgrade
check_database_before_upgrade(cur, database_engine, config)
start_ver = current_version
if not upgraded:
start_ver += 1
logger.debug("applied_delta_files: %s", applied_delta_files)
if isinstance(database_engine, PostgresEngine):
specific_engine_extension = ".postgres"
else:
specific_engine_extension = ".sqlite"
specific_engine_extensions = (".sqlite", ".postgres")
for v in range(start_ver, SCHEMA_VERSION + 1):
logger.info("Upgrading schema to v%d", v)
# We need to search both the global and per data store schema
# directories for schema updates.
# First we find the directories to search in
delta_dir = os.path.join(dir_path, "schema", "delta", str(v))
directories = [delta_dir]
for data_store in databases:
directories.append(
os.path.join(
dir_path, "databases", data_store, "schema", "delta", str(v)
)
)
# Used to check if we have any duplicate file names
file_name_counter = Counter()
# Now find which directories have anything of interest.
directory_entries = []
for directory in directories:
logger.debug("Looking for schema deltas in %s", directory)
try:
file_names = os.listdir(directory)
directory_entries.extend(
_DirectoryListing(file_name, os.path.join(directory, file_name))
for file_name in file_names
)
for file_name in file_names:
file_name_counter[file_name] += 1
except FileNotFoundError:
# Data stores can have empty entries for a given version delta.
pass
except OSError:
raise UpgradeDatabaseException(
"Could not open delta dir for version %d: %s" % (v, directory)
)
duplicates = {
file_name for file_name, count in file_name_counter.items() if count > 1
}
if duplicates:
# We don't support using the same file name in the same delta version.
raise PrepareDatabaseException(
"Found multiple delta files with the same name in v%d: %s"
% (v, duplicates,)
)
# We sort to ensure that we apply the delta files in a consistent
# order (to avoid bugs caused by inconsistent directory listing order)
directory_entries.sort()
for entry in directory_entries:
file_name = entry.file_name
relative_path = os.path.join(str(v), file_name)
absolute_path = entry.absolute_path
logger.debug("Found file: %s (%s)", relative_path, absolute_path)
if relative_path in applied_delta_files:
continue
root_name, ext = os.path.splitext(file_name)
if ext == ".py":
# This is a python upgrade module. We need to import into some
# package and then execute its `run_upgrade` function.
module_name = "synapse.storage.v%d_%s" % (v, root_name)
with open(absolute_path) as python_file:
module = imp.load_source(module_name, absolute_path, python_file)
logger.info("Running script %s", relative_path)
module.run_create(cur, database_engine)
if not is_empty:
module.run_upgrade(cur, database_engine, config=config)
elif ext == ".pyc" or file_name == "__pycache__":
# Sometimes .pyc files turn up anyway even though we've
# disabled their generation; e.g. from distribution package
# installers. Silently skip it
continue
elif ext == ".sql":
# A plain old .sql file, just read and execute it
logger.info("Applying schema %s", relative_path)
executescript(cur, absolute_path)
elif ext == specific_engine_extension and root_name.endswith(".sql"):
# A .sql file specific to our engine; just read and execute it
logger.info("Applying engine-specific schema %s", relative_path)
executescript(cur, absolute_path)
elif ext in specific_engine_extensions and root_name.endswith(".sql"):
# A .sql file for a different engine; skip it.
continue
else:
# Not a valid delta file.
logger.warning(
"Found directory entry that did not end in .py or .sql: %s",
relative_path,
)
continue
# Mark as done.
cur.execute(
database_engine.convert_param_style(
"INSERT INTO applied_schema_deltas (version, file) VALUES (?,?)"
),
(v, relative_path),
)
cur.execute("DELETE FROM schema_version")
cur.execute(
database_engine.convert_param_style(
"INSERT INTO schema_version (version, upgraded) VALUES (?,?)"
),
(v, True),
)
| def _upgrade_existing_database(
cur,
current_version,
applied_delta_files,
upgraded,
database_engine,
config,
databases,
is_empty=False,
):
"""Upgrades an existing physical database.
Delta files can either be SQL stored in *.sql files, or python modules
in *.py.
There can be multiple delta files per version. Synapse will keep track of
which delta files have been applied, and will apply any that haven't been
even if there has been no version bump. This is useful for development
where orthogonal schema changes may happen on separate branches.
Different delta files for the same version *must* be orthogonal and give
the same result when applied in any order. No guarantees are made on the
order of execution of these scripts.
This is a no-op of current_version == SCHEMA_VERSION.
Example directory structure:
schema/
delta/
11/
foo.sql
...
12/
foo.sql
bar.py
...
full_schemas/
...
In the example, if current_version is 11, then foo.sql will be run if and
only if `upgraded` is True. Then `foo.sql` and `bar.py` would be run in
some arbitrary order.
Note: we apply the delta files from the specified data stores as well as
those in the top-level schema. We apply all delta files across data stores
for a version before applying those in the next version.
Args:
cur (Cursor)
current_version (int): The current version of the schema.
applied_delta_files (list): A list of deltas that have already been
applied.
upgraded (bool): Whether the current version was generated by having
applied deltas or from full schema file. If `True` the function
will never apply delta files for the given `current_version`, since
the current_version wasn't generated by applying those delta files.
database_engine (DatabaseEngine)
config (synapse.config.homeserver.HomeServerConfig|None):
None if we are initialising a blank database, otherwise the application
config
databases (list[str]): The names of the databases to instantiate
on the given physical database.
is_empty (bool): Is this a blank database? I.e. do we need to run the
upgrade portions of the delta scripts.
"""
if is_empty:
assert not applied_delta_files
else:
assert config
if current_version > SCHEMA_VERSION:
raise ValueError(
"Cannot use this database as it is too "
+ "new for the server to understand"
)
# some of the deltas assume that config.server_name is set correctly, so now
# is a good time to run the sanity check.
if not is_empty and "main" in databases:
from synapse.storage.databases.main import check_database_before_upgrade
check_database_before_upgrade(cur, database_engine, config)
start_ver = current_version
if not upgraded:
start_ver += 1
logger.debug("applied_delta_files: %s", applied_delta_files)
if isinstance(database_engine, PostgresEngine):
specific_engine_extension = ".postgres"
else:
specific_engine_extension = ".sqlite"
specific_engine_extensions = (".sqlite", ".postgres")
for v in range(start_ver, SCHEMA_VERSION + 1):
logger.info("Upgrading schema to v%d", v)
# We need to search both the global and per data store schema
# directories for schema updates.
# First we find the directories to search in
delta_dir = os.path.join(dir_path, "schema", "delta", str(v))
directories = [delta_dir]
for database in databases:
directories.append(
os.path.join(
dir_path, "databases", database, "schema", "delta", str(v)
)
)
# Used to check if we have any duplicate file names
file_name_counter = Counter()
# Now find which directories have anything of interest.
directory_entries = []
for directory in directories:
logger.debug("Looking for schema deltas in %s", directory)
try:
file_names = os.listdir(directory)
directory_entries.extend(
_DirectoryListing(file_name, os.path.join(directory, file_name))
for file_name in file_names
)
for file_name in file_names:
file_name_counter[file_name] += 1
except FileNotFoundError:
# Data stores can have empty entries for a given version delta.
pass
except OSError:
raise UpgradeDatabaseException(
"Could not open delta dir for version %d: %s" % (v, directory)
)
duplicates = {
file_name for file_name, count in file_name_counter.items() if count > 1
}
if duplicates:
# We don't support using the same file name in the same delta version.
raise PrepareDatabaseException(
"Found multiple delta files with the same name in v%d: %s"
% (v, duplicates,)
)
# We sort to ensure that we apply the delta files in a consistent
# order (to avoid bugs caused by inconsistent directory listing order)
directory_entries.sort()
for entry in directory_entries:
file_name = entry.file_name
relative_path = os.path.join(str(v), file_name)
absolute_path = entry.absolute_path
logger.debug("Found file: %s (%s)", relative_path, absolute_path)
if relative_path in applied_delta_files:
continue
root_name, ext = os.path.splitext(file_name)
if ext == ".py":
# This is a python upgrade module. We need to import into some
# package and then execute its `run_upgrade` function.
module_name = "synapse.storage.v%d_%s" % (v, root_name)
with open(absolute_path) as python_file:
module = imp.load_source(module_name, absolute_path, python_file)
logger.info("Running script %s", relative_path)
module.run_create(cur, database_engine)
if not is_empty:
module.run_upgrade(cur, database_engine, config=config)
elif ext == ".pyc" or file_name == "__pycache__":
# Sometimes .pyc files turn up anyway even though we've
# disabled their generation; e.g. from distribution package
# installers. Silently skip it
continue
elif ext == ".sql":
# A plain old .sql file, just read and execute it
logger.info("Applying schema %s", relative_path)
executescript(cur, absolute_path)
elif ext == specific_engine_extension and root_name.endswith(".sql"):
# A .sql file specific to our engine; just read and execute it
logger.info("Applying engine-specific schema %s", relative_path)
executescript(cur, absolute_path)
elif ext in specific_engine_extensions and root_name.endswith(".sql"):
# A .sql file for a different engine; skip it.
continue
else:
# Not a valid delta file.
logger.warning(
"Found directory entry that did not end in .py or .sql: %s",
relative_path,
)
continue
# Mark as done.
cur.execute(
database_engine.convert_param_style(
"INSERT INTO applied_schema_deltas (version, file) VALUES (?,?)"
),
(v, relative_path),
)
cur.execute("DELETE FROM schema_version")
cur.execute(
database_engine.convert_param_style(
"INSERT INTO schema_version (version, upgraded) VALUES (?,?)"
),
(v, True),
)
|
932 | def test_Mod():
assert Mod(x, 1).func is Mod
assert pi % pi is S.Zero
assert Mod(5, 3) == 2
assert Mod(-5, 3) == 1
assert Mod(5, -3) == -1
assert Mod(-5, -3) == -2
assert type(Mod(3.2, 2, evaluate=False)) == Mod
assert 5 % x == Mod(5, x)
assert x % 5 == Mod(x, 5)
assert x % y == Mod(x, y)
assert (x % y).subs({x: 5, y: 3}) == 2
assert Mod(nan, 1) is nan
assert Mod(1, nan) is nan
assert Mod(nan, nan) is nan
assert Mod(0, x) == 0
with raises(ZeroDivisionError):
Mod(x, 0)
k = Symbol('k', integer=True)
m = Symbol('m', integer=True, positive=True)
assert (x**m % x).func is Mod
assert (k**(-m) % k).func is Mod
assert k**m % k == 0
assert (-2*k)**m % k == 0
# Float handling
point3 = Float(3.3) % 1
assert (x - 3.3) % 1 == Mod(1.*x + 1 - point3, 1)
assert Mod(-3.3, 1) == 1 - point3
assert Mod(0.7, 1) == Float(0.7)
e = Mod(1.3, 1)
assert comp(e, .3) and e.is_Float
e = Mod(1.3, .7)
assert comp(e, .6) and e.is_Float
e = Mod(1.3, Rational(7, 10))
assert comp(e, .6) and e.is_Float
e = Mod(Rational(13, 10), 0.7)
assert comp(e, .6) and e.is_Float
e = Mod(Rational(13, 10), Rational(7, 10))
assert comp(e, .6) and e.is_Rational
# check that sign is right
r2 = sqrt(2)
r3 = sqrt(3)
for i in [-r3, -r2, r2, r3]:
for j in [-r3, -r2, r2, r3]:
assert verify_numerically(i % j, i.n() % j.n())
for _x in range(4):
for _y in range(9):
reps = [(x, _x), (y, _y)]
assert Mod(3*x + y, 9).subs(reps) == (3*_x + _y) % 9
# denesting
t = Symbol('t', real=True)
assert Mod(Mod(x, t), t) == Mod(x, t)
assert Mod(-Mod(x, t), t) == Mod(-x, t)
assert Mod(Mod(x, 2*t), t) == Mod(x, t)
assert Mod(-Mod(x, 2*t), t) == Mod(-x, t)
assert Mod(Mod(x, t), 2*t) == Mod(x, t)
assert Mod(-Mod(x, t), -2*t) == -Mod(x, t)
for i in [-4, -2, 2, 4]:
for j in [-4, -2, 2, 4]:
for k in range(4):
assert Mod(Mod(x, i), j).subs({x: k}) == (k % i) % j
assert Mod(-Mod(x, i), j).subs({x: k}) == -(k % i) % j
# known difference
assert Mod(5*sqrt(2), sqrt(5)) == 5*sqrt(2) - 3*sqrt(5)
p = symbols('p', positive=True)
assert Mod(2, p + 3) == 2
assert Mod(-2, p + 3) == p + 1
assert Mod(2, -p - 3) == -p - 1
assert Mod(-2, -p - 3) == -2
assert Mod(p + 5, p + 3) == 2
assert Mod(-p - 5, p + 3) == p + 1
assert Mod(p + 5, -p - 3) == -p - 1
assert Mod(-p - 5, -p - 3) == -2
assert Mod(p + 1, p - 1).func is Mod
# handling sums
assert (x + 3) % 1 == Mod(x, 1)
assert (x + 3.0) % 1 == Mod(1.*x, 1)
assert (x - S(33)/10) % 1 == Mod(x + S(7)/10, 1)
a = Mod(.6*x + y, .3*y)
b = Mod(0.1*y + 0.6*x, 0.3*y)
# Test that a, b are equal, with 1e-14 accuracy in coefficients
eps = 1e-14
assert abs((a.args[0] - b.args[0]).subs({x: 1, y: 1})) < eps
assert abs((a.args[1] - b.args[1]).subs({x: 1, y: 1})) < eps
assert (x + 1) % x == 1 % x
assert (x + y) % x == y % x
assert (x + y + 2) % x == (y + 2) % x
assert (a + 3*x + 1) % (2*x) == Mod(a + x + 1, 2*x)
assert (12*x + 18*y) % (3*x) == 3*Mod(6*y, x)
# gcd extraction
assert (-3*x) % (-2*y) == -Mod(3*x, 2*y)
assert (.6*pi) % (.3*x*pi) == 0.3*pi*Mod(2, x)
assert (.6*pi) % (.31*x*pi) == pi*Mod(0.6, 0.31*x)
assert (6*pi) % (.3*x*pi) == 0.3*pi*Mod(20, x)
assert (6*pi) % (.31*x*pi) == pi*Mod(6, 0.31*x)
assert (6*pi) % (.42*x*pi) == pi*Mod(6, 0.42*x)
assert (12*x) % (2*y) == 2*Mod(6*x, y)
assert (12*x) % (3*5*y) == 3*Mod(4*x, 5*y)
assert (12*x) % (15*x*y) == 3*x*Mod(4, 5*y)
assert (-2*pi) % (3*pi) == pi
assert (2*x + 2) % (x + 1) == 0
assert (x*(x + 1)) % (x + 1) == (x + 1)*Mod(x, 1)
assert Mod(5.0*x, 0.1*y) == 0.1*Mod(50*x, y)
i = Symbol('i', integer=True)
assert (3*i*x) % (2*i*y) == i*Mod(3*x, 2*y)
assert Mod(4*i, 4) == 0
# issue 8677
n = Symbol('n', integer=True, positive=True)
assert factorial(n) % n == 0
assert factorial(n + 2) % n == 0
assert (factorial(n + 4) % (n + 5)).func is Mod
# Wilson's theorem
assert factorial(18042, evaluate=False) % 18043 == 18042
p = Symbol('n', prime=True)
assert factorial(p - 1) % p == p - 1
assert factorial(p - 1) % -p == -1
assert (factorial(3, evaluate=False) % 4).doit() == 2
n = Symbol('n', composite=True, odd=True)
assert factorial(n - 1) % n == 0
# symbolic with known parity
n = Symbol('n', even=True)
assert Mod(n, 2) == 0
n = Symbol('n', odd=True)
assert Mod(n, 2) == 1
# issue 10963
assert (x**6000%400).args[1] == 400
#issue 13543
assert Mod(Mod(x + 1, 2) + 1, 2) == Mod(x, 2)
assert Mod(Mod(x + 2, 4)*(x + 4), 4) == Mod(x*(x + 2), 4)
assert Mod(Mod(x + 2, 4)*4, 4) == 0
# issue 15493
i, j = symbols('i j', integer=True, positive=True)
assert Mod(3*i, 2) == Mod(i, 2)
assert Mod(8*i/j, 4) == 4*Mod(2*i/j, 1)
assert Mod(8*i, 4) == 0
# rewrite
assert Mod(x, y).rewrite(floor) == x - y*floor(x/y)
assert ((x - Mod(x, y))/y).rewrite(floor) == floor(x/y)
# issue 21373
from sympy.functions.elementary.trigonometric import sinh
from sympy.functions.elementary.piecewise import Piecewise
x_r, y_r = symbols('x_r y_r', real=True)
assert (Piecewise((x_r, y_r > x_r), (y_r, True)) / z) % 1
expr = exp(sinh(Piecewise((x_r, y_r > x_r), (y_r, True)) / z))
expr.subs({1: 1.0})
sinh(Piecewise((x_r, y_r > x_r), (y_r, True)) * z ** -1.0).is_zero
| def test_Mod():
assert Mod(x, 1).func is Mod
assert pi % pi is S.Zero
assert Mod(5, 3) == 2
assert Mod(-5, 3) == 1
assert Mod(5, -3) == -1
assert Mod(-5, -3) == -2
assert type(Mod(3.2, 2, evaluate=False)) == Mod
assert 5 % x == Mod(5, x)
assert x % 5 == Mod(x, 5)
assert x % y == Mod(x, y)
assert (x % y).subs({x: 5, y: 3}) == 2
assert Mod(nan, 1) is nan
assert Mod(1, nan) is nan
assert Mod(nan, nan) is nan
assert Mod(0, x) == 0
with raises(ZeroDivisionError):
Mod(x, 0)
k = Symbol('k', integer=True)
m = Symbol('m', integer=True, positive=True)
assert (x**m % x).func is Mod
assert (k**(-m) % k).func is Mod
assert k**m % k == 0
assert (-2*k)**m % k == 0
# Float handling
point3 = Float(3.3) % 1
assert (x - 3.3) % 1 == Mod(1.*x + 1 - point3, 1)
assert Mod(-3.3, 1) == 1 - point3
assert Mod(0.7, 1) == Float(0.7)
e = Mod(1.3, 1)
assert comp(e, .3) and e.is_Float
e = Mod(1.3, .7)
assert comp(e, .6) and e.is_Float
e = Mod(1.3, Rational(7, 10))
assert comp(e, .6) and e.is_Float
e = Mod(Rational(13, 10), 0.7)
assert comp(e, .6) and e.is_Float
e = Mod(Rational(13, 10), Rational(7, 10))
assert comp(e, .6) and e.is_Rational
# check that sign is right
r2 = sqrt(2)
r3 = sqrt(3)
for i in [-r3, -r2, r2, r3]:
for j in [-r3, -r2, r2, r3]:
assert verify_numerically(i % j, i.n() % j.n())
for _x in range(4):
for _y in range(9):
reps = [(x, _x), (y, _y)]
assert Mod(3*x + y, 9).subs(reps) == (3*_x + _y) % 9
# denesting
t = Symbol('t', real=True)
assert Mod(Mod(x, t), t) == Mod(x, t)
assert Mod(-Mod(x, t), t) == Mod(-x, t)
assert Mod(Mod(x, 2*t), t) == Mod(x, t)
assert Mod(-Mod(x, 2*t), t) == Mod(-x, t)
assert Mod(Mod(x, t), 2*t) == Mod(x, t)
assert Mod(-Mod(x, t), -2*t) == -Mod(x, t)
for i in [-4, -2, 2, 4]:
for j in [-4, -2, 2, 4]:
for k in range(4):
assert Mod(Mod(x, i), j).subs({x: k}) == (k % i) % j
assert Mod(-Mod(x, i), j).subs({x: k}) == -(k % i) % j
# known difference
assert Mod(5*sqrt(2), sqrt(5)) == 5*sqrt(2) - 3*sqrt(5)
p = symbols('p', positive=True)
assert Mod(2, p + 3) == 2
assert Mod(-2, p + 3) == p + 1
assert Mod(2, -p - 3) == -p - 1
assert Mod(-2, -p - 3) == -2
assert Mod(p + 5, p + 3) == 2
assert Mod(-p - 5, p + 3) == p + 1
assert Mod(p + 5, -p - 3) == -p - 1
assert Mod(-p - 5, -p - 3) == -2
assert Mod(p + 1, p - 1).func is Mod
# handling sums
assert (x + 3) % 1 == Mod(x, 1)
assert (x + 3.0) % 1 == Mod(1.*x, 1)
assert (x - S(33)/10) % 1 == Mod(x + S(7)/10, 1)
a = Mod(.6*x + y, .3*y)
b = Mod(0.1*y + 0.6*x, 0.3*y)
# Test that a, b are equal, with 1e-14 accuracy in coefficients
eps = 1e-14
assert abs((a.args[0] - b.args[0]).subs({x: 1, y: 1})) < eps
assert abs((a.args[1] - b.args[1]).subs({x: 1, y: 1})) < eps
assert (x + 1) % x == 1 % x
assert (x + y) % x == y % x
assert (x + y + 2) % x == (y + 2) % x
assert (a + 3*x + 1) % (2*x) == Mod(a + x + 1, 2*x)
assert (12*x + 18*y) % (3*x) == 3*Mod(6*y, x)
# gcd extraction
assert (-3*x) % (-2*y) == -Mod(3*x, 2*y)
assert (.6*pi) % (.3*x*pi) == 0.3*pi*Mod(2, x)
assert (.6*pi) % (.31*x*pi) == pi*Mod(0.6, 0.31*x)
assert (6*pi) % (.3*x*pi) == 0.3*pi*Mod(20, x)
assert (6*pi) % (.31*x*pi) == pi*Mod(6, 0.31*x)
assert (6*pi) % (.42*x*pi) == pi*Mod(6, 0.42*x)
assert (12*x) % (2*y) == 2*Mod(6*x, y)
assert (12*x) % (3*5*y) == 3*Mod(4*x, 5*y)
assert (12*x) % (15*x*y) == 3*x*Mod(4, 5*y)
assert (-2*pi) % (3*pi) == pi
assert (2*x + 2) % (x + 1) == 0
assert (x*(x + 1)) % (x + 1) == (x + 1)*Mod(x, 1)
assert Mod(5.0*x, 0.1*y) == 0.1*Mod(50*x, y)
i = Symbol('i', integer=True)
assert (3*i*x) % (2*i*y) == i*Mod(3*x, 2*y)
assert Mod(4*i, 4) == 0
# issue 8677
n = Symbol('n', integer=True, positive=True)
assert factorial(n) % n == 0
assert factorial(n + 2) % n == 0
assert (factorial(n + 4) % (n + 5)).func is Mod
# Wilson's theorem
assert factorial(18042, evaluate=False) % 18043 == 18042
p = Symbol('n', prime=True)
assert factorial(p - 1) % p == p - 1
assert factorial(p - 1) % -p == -1
assert (factorial(3, evaluate=False) % 4).doit() == 2
n = Symbol('n', composite=True, odd=True)
assert factorial(n - 1) % n == 0
# symbolic with known parity
n = Symbol('n', even=True)
assert Mod(n, 2) == 0
n = Symbol('n', odd=True)
assert Mod(n, 2) == 1
# issue 10963
assert (x**6000%400).args[1] == 400
#issue 13543
assert Mod(Mod(x + 1, 2) + 1, 2) == Mod(x, 2)
assert Mod(Mod(x + 2, 4)*(x + 4), 4) == Mod(x*(x + 2), 4)
assert Mod(Mod(x + 2, 4)*4, 4) == 0
# issue 15493
i, j = symbols('i j', integer=True, positive=True)
assert Mod(3*i, 2) == Mod(i, 2)
assert Mod(8*i/j, 4) == 4*Mod(2*i/j, 1)
assert Mod(8*i, 4) == 0
# rewrite
assert Mod(x, y).rewrite(floor) == x - y*floor(x/y)
assert ((x - Mod(x, y))/y).rewrite(floor) == floor(x/y)
# issue 21373
from sympy.functions.elementary.trigonometric import sinh
from sympy.functions.elementary.piecewise import Piecewise
x_r, y_r = symbols('x_r y_r', real=True)
assert unchanged(Mod, Piecewise((x_r, y_r > x_r), (y_r, True)) / z, 1)
expr = exp(sinh(Piecewise((x_r, y_r > x_r), (y_r, True)) / z))
expr.subs({1: 1.0})
sinh(Piecewise((x_r, y_r > x_r), (y_r, True)) * z ** -1.0).is_zero
|
13,522 | def test_parse_longname():
f = io.BytesIO(
textwrap.dedent(
'''\
FormatVersion=5.0 // Do not edit this line!
Title="a file"
{SEND}
[pass]
DLC=8
Var=Password unsigned 16,16 /ln:"Access Level : Password"
''',
).encode('utf-8'),
)
matrix = canmatrix.sym.load(f)
for frame in matrix.frames:
for s in frame.signals:
ln = s.attributes.get('LongName')
if ln:
assert ln == 'Access Level : Password'
| def test_parse_longname_with_colon():
f = io.BytesIO(
textwrap.dedent(
'''\
FormatVersion=5.0 // Do not edit this line!
Title="a file"
{SEND}
[pass]
DLC=8
Var=Password unsigned 16,16 /ln:"Access Level : Password"
''',
).encode('utf-8'),
)
matrix = canmatrix.sym.load(f)
for frame in matrix.frames:
for s in frame.signals:
ln = s.attributes.get('LongName')
if ln:
assert ln == 'Access Level : Password'
|
59,539 | def call(
target: Optional[Union[circuit.QuantumCircuit, Schedule, ScheduleBlock]] = None,
name: Optional[str] = None,
channels: Optional[List[chans.Channel]] = None,
value_dict: Optional[Dict[ParameterValueType, ParameterValueType]] = None,
**kw_params: ParameterValueType,
):
"""Call the subroutine within the currently active builder context with arbitrary
parameters which will be assigned to the target program.
.. note::
If the ``target`` program is instance of schedule or quantum cirucit,
it will be assigned as :class:`~qiskit.pulse.instructions.Call` instruction.
Otherwise :class:`~qiskit.pulse.instructions.Reference` instruction
is added and ``target`` is separately registered to the references.
Examples:
1. Call with substantial program.
.. code-block:: python
from qiskit import circuit, pulse, schedule, transpile
from qiskit.test.mock import FakeOpenPulse2Q
backend = FakeOpenPulse2Q()
qc = circuit.QuantumCircuit(2)
qc.cx(0, 1)
qc_transpiled = transpile(qc, optimization_level=3)
sched = schedule(qc_transpiled, backend)
with pulse.build(backend) as pulse_prog:
pulse.call(sched)
pulse.call(qc)
This function can optionally take parameter dictionary with the parameterized target program.
.. code-block:: python
from qiskit import circuit, pulse
amp = circuit.Parameter('amp')
with pulse.build() as subroutine:
pulse.play(pulse.Gaussian(160, amp, 40), pulse.DriveChannel(0))
with pulse.build() as main_prog:
pulse.call(subroutine, amp=0.1)
pulse.call(subroutine, amp=0.3)
If there is any parameter name collision, you can distinguish them by specifying
each parameter object as a python dictionary. Otherwise ``amp1`` and ``amp2`` will be
updated with the same value.
.. code-block:: python
from qiskit import circuit, pulse
amp1 = circuit.Parameter('amp')
amp2 = circuit.Parameter('amp')
with pulse.build() as subroutine:
pulse.play(pulse.Gaussian(160, amp1, 40), pulse.DriveChannel(0))
pulse.play(pulse.Gaussian(160, amp2, 40), pulse.DriveChannel(1))
with pulse.build() as main_prog:
pulse.call(subroutine, value_dict={amp1: 0.1, amp2: 0.2})
2. Call with unassigned program.
.. code-block:: python
qiskit import pulse
with pulse.build() as main_prog:
ref_key = "my_subroutine"
pulse.call(name=ref_key, channels=[pulse.DriveChannel(0)])
with pulse.build() as subroutine:
pulse.play(pulse.Gaussian(160, 0.1, 40), pulse.DriveChannel(0))
main_prog.assign_reference(ref_key=ref_key, schedule=subroutine)
When you call without actual program, you can assign the program afterwards
through the :meth:`ScheduleBlock.assign_reference` method.
Args:
target: Target circuit or pulse schedule to call. If this program is not
provided, both ``name`` and ``channels`` should be provided instead.
name: Name of subroutine if defined.
channels: Optional. Channels associated to the subroutine.
value_dict: Optional. Local scoped parameters assigned to the subroutine.
If this dictionary is provided, the ``target`` program is copied and
then stored in the main built schedule with having parameters assigned.
This dictionary is keyed on the :class:`~.Parameter` object,
thus parameter name collision can be avoided.
This option is valid only when the subroutine is called with ``target``.
kw_params: Alternative way to provide local scoped parameters.
Since this is keyed on the string parameter name,
the parameters having the same name are all updated together.
If you want to avoid name collision, use ``value_dict`` with :class:`~.Parameter`
object instead.
Raises:
exceptions.PulseError: If the input ``target`` type is not supported.
exceptions.PulseError: Target program is empty and name and channels are not both provided.
exceptions.PulseError: Subroutine is called by name and channels but
local scoped parameters are also provided.
"""
if target is None:
if value_dict is not None or any(kw_params):
raise exceptions.PulseError(
"Parameters are provided without target program. "
"These parameters cannot be assigned."
)
if name is None or channels is None:
raise exceptions.PulseError(
"Subroutine name and channels are not both provided. "
"Please call subroutine with target program, or both name and channels."
)
_active_builder().append_reference(reference_key=name, channels=channels)
else:
if not isinstance(target, (circuit.QuantumCircuit, Schedule, ScheduleBlock)):
raise exceptions.PulseError(
f'Target of type "{target.__class__.__name__}" is not supported.'
)
_active_builder().call_subroutine(
subroutine=target, name=name, value_dict=value_dict, **kw_params
)
| def call(
target: Optional[Union[circuit.QuantumCircuit, Schedule, ScheduleBlock]] = None,
name: Optional[str] = None,
channels: Optional[List[chans.Channel]] = None,
value_dict: Optional[Dict[ParameterValueType, ParameterValueType]] = None,
**kw_params: ParameterValueType,
):
"""Call the subroutine within the currently active builder context with arbitrary
parameters which will be assigned to the target program.
.. note::
If the ``target`` program is a schedule or a quantum cirucit instance, then
it will be assigned as a :class:`~qiskit.pulse.instructions.Call` instruction.
If the ``target`` program is not given and only its ``name`` is given, then
a :class:`~qiskit.pulse.instructions.Reference` instruction is added and
the ``target`` program is separately registered to the references at a later stage.
Examples:
1. Call with substantial program.
.. code-block:: python
from qiskit import circuit, pulse, schedule, transpile
from qiskit.test.mock import FakeOpenPulse2Q
backend = FakeOpenPulse2Q()
qc = circuit.QuantumCircuit(2)
qc.cx(0, 1)
qc_transpiled = transpile(qc, optimization_level=3)
sched = schedule(qc_transpiled, backend)
with pulse.build(backend) as pulse_prog:
pulse.call(sched)
pulse.call(qc)
This function can optionally take parameter dictionary with the parameterized target program.
.. code-block:: python
from qiskit import circuit, pulse
amp = circuit.Parameter('amp')
with pulse.build() as subroutine:
pulse.play(pulse.Gaussian(160, amp, 40), pulse.DriveChannel(0))
with pulse.build() as main_prog:
pulse.call(subroutine, amp=0.1)
pulse.call(subroutine, amp=0.3)
If there is any parameter name collision, you can distinguish them by specifying
each parameter object as a python dictionary. Otherwise ``amp1`` and ``amp2`` will be
updated with the same value.
.. code-block:: python
from qiskit import circuit, pulse
amp1 = circuit.Parameter('amp')
amp2 = circuit.Parameter('amp')
with pulse.build() as subroutine:
pulse.play(pulse.Gaussian(160, amp1, 40), pulse.DriveChannel(0))
pulse.play(pulse.Gaussian(160, amp2, 40), pulse.DriveChannel(1))
with pulse.build() as main_prog:
pulse.call(subroutine, value_dict={amp1: 0.1, amp2: 0.2})
2. Call with unassigned program.
.. code-block:: python
qiskit import pulse
with pulse.build() as main_prog:
ref_key = "my_subroutine"
pulse.call(name=ref_key, channels=[pulse.DriveChannel(0)])
with pulse.build() as subroutine:
pulse.play(pulse.Gaussian(160, 0.1, 40), pulse.DriveChannel(0))
main_prog.assign_reference(ref_key=ref_key, schedule=subroutine)
When you call without actual program, you can assign the program afterwards
through the :meth:`ScheduleBlock.assign_reference` method.
Args:
target: Target circuit or pulse schedule to call. If this program is not
provided, both ``name`` and ``channels`` should be provided instead.
name: Name of subroutine if defined.
channels: Optional. Channels associated to the subroutine.
value_dict: Optional. Local scoped parameters assigned to the subroutine.
If this dictionary is provided, the ``target`` program is copied and
then stored in the main built schedule with having parameters assigned.
This dictionary is keyed on the :class:`~.Parameter` object,
thus parameter name collision can be avoided.
This option is valid only when the subroutine is called with ``target``.
kw_params: Alternative way to provide local scoped parameters.
Since this is keyed on the string parameter name,
the parameters having the same name are all updated together.
If you want to avoid name collision, use ``value_dict`` with :class:`~.Parameter`
object instead.
Raises:
exceptions.PulseError: If the input ``target`` type is not supported.
exceptions.PulseError: Target program is empty and name and channels are not both provided.
exceptions.PulseError: Subroutine is called by name and channels but
local scoped parameters are also provided.
"""
if target is None:
if value_dict is not None or any(kw_params):
raise exceptions.PulseError(
"Parameters are provided without target program. "
"These parameters cannot be assigned."
)
if name is None or channels is None:
raise exceptions.PulseError(
"Subroutine name and channels are not both provided. "
"Please call subroutine with target program, or both name and channels."
)
_active_builder().append_reference(reference_key=name, channels=channels)
else:
if not isinstance(target, (circuit.QuantumCircuit, Schedule, ScheduleBlock)):
raise exceptions.PulseError(
f'Target of type "{target.__class__.__name__}" is not supported.'
)
_active_builder().call_subroutine(
subroutine=target, name=name, value_dict=value_dict, **kw_params
)
|
30,161 | def fetch_production(
zone_key="AM", session=None, target_datetime=None, logger=None
) -> dict:
r = session or requests.session()
url = SOURCE
response = r.get(url)
response.encoding = "utf-8"
html_doc = response.text
start_string = "<script type='text/javascript'>"
start_index = html_doc.find(start_string) + len(start_string)
stop_index = html_doc.find("left:")
soup = BeautifulSoup(html_doc[start_index:stop_index], "html.parser")
data_string = soup.find(text=re.compile("var"))
data_split = data_string.split("\r\n")
gas_tes = re.findall(REGEX, data_split[10])
gas_total = float(gas_tes[0])
hydro_ges = re.findall(REGEX, data_split[11])
hydro_altern = re.findall(REGEX, data_split[8])
hydro_total = float(hydro_ges[0]) + float(hydro_altern[0])
nuclear_atom = re.findall(REGEX, data_split[9])
nuclear_total = float(nuclear_atom[0])
time_data = [s for s in data_split if "time2" in s][0]
yerevan = tz.gettz(TZ)
date_time = dparser.parse(
time_data.split()[3], default=datetime.now(yerevan), fuzzy=True
)
# Operating solar, wind and biomass plants exist in small numbers, but are not reported yet
return {
"zoneKey": zone_key,
"datetime": date_time,
"production": {
"gas": gas_total,
"hydro": hydro_total,
"nuclear": nuclear_total,
"biomass": None,
"coal": 0,
"geothermal": 0,
"oil": 0,
"solar": None,
"wind": None,
},
"storage": {"hydro": 0, "battery": 0},
"source": SOURCE,
}
| def fetch_production(
zone_key="AM", session=None, target_datetime=None, logger=None
) -> dict:
r = session or requests.session()
response = r.get(SOURCE)
response.encoding = "utf-8"
html_doc = response.text
start_string = "<script type='text/javascript'>"
start_index = html_doc.find(start_string) + len(start_string)
stop_index = html_doc.find("left:")
soup = BeautifulSoup(html_doc[start_index:stop_index], "html.parser")
data_string = soup.find(text=re.compile("var"))
data_split = data_string.split("\r\n")
gas_tes = re.findall(REGEX, data_split[10])
gas_total = float(gas_tes[0])
hydro_ges = re.findall(REGEX, data_split[11])
hydro_altern = re.findall(REGEX, data_split[8])
hydro_total = float(hydro_ges[0]) + float(hydro_altern[0])
nuclear_atom = re.findall(REGEX, data_split[9])
nuclear_total = float(nuclear_atom[0])
time_data = [s for s in data_split if "time2" in s][0]
yerevan = tz.gettz(TZ)
date_time = dparser.parse(
time_data.split()[3], default=datetime.now(yerevan), fuzzy=True
)
# Operating solar, wind and biomass plants exist in small numbers, but are not reported yet
return {
"zoneKey": zone_key,
"datetime": date_time,
"production": {
"gas": gas_total,
"hydro": hydro_total,
"nuclear": nuclear_total,
"biomass": None,
"coal": 0,
"geothermal": 0,
"oil": 0,
"solar": None,
"wind": None,
},
"storage": {"hydro": 0, "battery": 0},
"source": SOURCE,
}
|
3,427 | def resolve_tags(org_id: int, input_: Any) -> Any:
"""Translate tags in snuba condition
Column("metric_id") is not supported.
"""
if input_ is None:
return None
if isinstance(input_, (list, tuple)):
elements = [resolve_tags(org_id, item) for item in input_]
# Lists are either arguments to IN or NOT IN. In both cases, we can
# drop unknown strings:
return [x for x in elements if x != STRING_NOT_FOUND]
if isinstance(input_, Function):
if input_.function == "ifNull":
# This was wrapped automatically by QueryBuilder, remove wrapper
return resolve_tags(org_id, input_.parameters[0])
elif input_.function == "isNull":
return Function(
"equals",
[
resolve_tags(org_id, input_.parameters[0]),
resolve_tags(org_id, ""),
],
)
elif input_.function in FUNCTION_ALLOWLIST:
return Function(
function=input_.function,
parameters=input_.parameters
and [resolve_tags(org_id, item) for item in input_.parameters],
)
if (
isinstance(input_, Or)
and len(input_.conditions) == 2
and isinstance(c := input_.conditions[0], Condition)
and isinstance(c.lhs, Function)
and c.lhs.function == "isNull"
and c.op == Op.EQ
and c.rhs == 1
):
# Remove another "null" wrapper. We should really write our own parser instead.
return resolve_tags(org_id, input_.conditions[1])
if isinstance(input_, Condition):
if input_.op == Op.IS_NULL and input_.rhs is None:
return Condition(
lhs=resolve_tags(org_id, input_.lhs), op=Op.EQ, rhs=resolve_tags(org_id, "")
)
if (
isinstance(input_.lhs, Function)
and input_.lhs.function == "ifNull"
and isinstance(input_.lhs.parameters[0], Column)
and input_.lhs.parameters[0].name == "tags[project]"
):
# Special condition as when we send a `project:<slug>` query, discover converter
# converts it into it a tags[project]:[<slug>] query, so we want to further process
# the lhs to get to its translation of `project_id` but we don't to further resolve
# rhs and we just want to extract the project ids from the slugs
rhs = [p.id for p in Project.objects.filter(slug__in=input_.rhs)]
return Condition(lhs=resolve_tags(org_id, input_.lhs), op=input_.op, rhs=rhs)
return Condition(
lhs=resolve_tags(org_id, input_.lhs), op=input_.op, rhs=resolve_tags(org_id, input_.rhs)
)
if isinstance(input_, BooleanCondition):
return input_.__class__(
conditions=[resolve_tags(org_id, item) for item in input_.conditions]
)
if isinstance(input_, Column):
if input_.name == "project_id":
return input_
# HACK: Some tags already take the form "tags[...]" in discover, take that into account:
if input_.subscriptable == "tags":
# Handles translating field aliases to their "metrics" equivalent, for example
# "project" -> "project_id"
if input_.key in FIELD_ALIAS_MAPPINGS:
return Column(FIELD_ALIAS_MAPPINGS[input_.key])
name = input_.key
else:
name = input_.name
return Column(name=resolve_tag_key(org_id, name))
if isinstance(input_, str):
return resolve_weak(org_id, input_)
if isinstance(input_, int):
return input_
raise InvalidParams("Unable to resolve conditions")
| def resolve_tags(org_id: int, input_: Any) -> Any:
"""Translate tags in snuba condition
Column("metric_id") is not supported.
"""
if input_ is None:
return None
if isinstance(input_, (list, tuple)):
elements = [resolve_tags(org_id, item) for item in input_]
# Lists are either arguments to IN or NOT IN. In both cases, we can
# drop unknown strings:
return [x for x in elements if x != STRING_NOT_FOUND]
if isinstance(input_, Function):
if input_.function == "ifNull":
# This was wrapped automatically by QueryBuilder, remove wrapper
return resolve_tags(org_id, input_.parameters[0])
elif input_.function == "isNull":
return Function(
"equals",
[
resolve_tags(org_id, input_.parameters[0]),
resolve_tags(org_id, ""),
],
)
elif input_.function in FUNCTION_ALLOWLIST:
return Function(
function=input_.function,
parameters=input_.parameters
and [resolve_tags(org_id, item) for item in input_.parameters],
)
if (
isinstance(input_, Or)
and len(input_.conditions) == 2
and isinstance(c := input_.conditions[0], Condition)
and isinstance(c.lhs, Function)
and c.lhs.function == "isNull"
and c.op == Op.EQ
and c.rhs == 1
):
# Remove another "null" wrapper. We should really write our own parser instead.
return resolve_tags(org_id, input_.conditions[1])
if isinstance(input_, Condition):
if input_.op == Op.IS_NULL and input_.rhs is None:
return Condition(
lhs=resolve_tags(org_id, input_.lhs), op=Op.EQ, rhs=resolve_tags(org_id, "")
)
if (
isinstance(input_.lhs, Function)
and input_.lhs.function == "ifNull"
and isinstance(input_.lhs.parameters[0], Column)
and input_.lhs.parameters[0].name == "tags[project]"
):
# Special condition as when we send a `project:<slug>` query, discover converter
# converts it into a tags[project]:[<slug>] query, so we want to further process
# the lhs to get to its translation of `project_id` but we don't to further resolve
# rhs and we just want to extract the project ids from the slugs
rhs = [p.id for p in Project.objects.filter(slug__in=input_.rhs)]
return Condition(lhs=resolve_tags(org_id, input_.lhs), op=input_.op, rhs=rhs)
return Condition(
lhs=resolve_tags(org_id, input_.lhs), op=input_.op, rhs=resolve_tags(org_id, input_.rhs)
)
if isinstance(input_, BooleanCondition):
return input_.__class__(
conditions=[resolve_tags(org_id, item) for item in input_.conditions]
)
if isinstance(input_, Column):
if input_.name == "project_id":
return input_
# HACK: Some tags already take the form "tags[...]" in discover, take that into account:
if input_.subscriptable == "tags":
# Handles translating field aliases to their "metrics" equivalent, for example
# "project" -> "project_id"
if input_.key in FIELD_ALIAS_MAPPINGS:
return Column(FIELD_ALIAS_MAPPINGS[input_.key])
name = input_.key
else:
name = input_.name
return Column(name=resolve_tag_key(org_id, name))
if isinstance(input_, str):
return resolve_weak(org_id, input_)
if isinstance(input_, int):
return input_
raise InvalidParams("Unable to resolve conditions")
|
52 | def works_by_author(akey, sort='editions', page=1, rows=100, has_fulltext=False, query=None):
# called by merge_author_works
q = 'author_key:' + akey
if query:
q = query
offset = rows * (page - 1)
fields = ['key', 'author_name', 'author_key', 'title', 'subtitle',
'edition_count', 'ia', 'cover_edition_key', 'has_fulltext',
'first_publish_year', 'public_scan_b', 'lending_edition_s', 'lending_identifier_s',
'ia_collection_s', 'cover_i']
fl = ','.join(fields)
fq = ['author_key:OL31676A', 'type:work']
if has_fulltext:
fq.append('has_fulltext:true')
solr_select = solr_select_url + "?fq=%s&q.op=AND&q=%s&start=%d&rows=%d&fl=%s&wt=json" % ('&fq='.join(fq), q, offset, rows, fl)
facet_fields = ["author_facet", "language", "publish_year", "publisher_facet", "subject_facet", "person_facet", "place_facet", "time_facet"]
if sort == 'editions':
solr_select += '&sort=edition_count+desc'
elif sort.startswith('old'):
solr_select += '&sort=first_publish_year+asc'
elif sort.startswith('new'):
solr_select += '&sort=first_publish_year+desc'
elif sort.startswith('title'):
solr_select += '&sort=title+asc'
solr_select += "&facet=true&facet.mincount=1&f.author_facet.facet.sort=count&f.publish_year.facet.limit=-1&facet.limit=25&" + '&'.join("facet.field=" + f for f in facet_fields)
reply = parse_json_from_solr_query(solr_select)
if reply is None:
return web.storage(
num_found = 0,
works = [],
years = [],
get_facet = [],
sort = sort,
)
# TODO: Deep JSON structure defense - for now, let it blow up so easier to detect
facets = reply['facet_counts']['facet_fields']
works = [work_object(w) for w in reply['response']['docs']]
def get_facet(f, limit=None):
return list(web.group(facets[f][:limit * 2] if limit else facets[f], 2))
return web.storage(
num_found = int(reply['response']['numFound']),
works = add_availability(works),
years = [(int(k), v) for k, v in get_facet('publish_year')],
get_facet = get_facet,
sort = sort,
)
| def works_by_author(akey, sort='editions', page=1, rows=100, has_fulltext=False, query=None):
# called by merge_author_works
q = 'author_key:' + akey
if query:
q = query
offset = rows * (page - 1)
fields = ['key', 'author_name', 'author_key', 'title', 'subtitle',
'edition_count', 'ia', 'cover_edition_key', 'has_fulltext',
'first_publish_year', 'public_scan_b', 'lending_edition_s', 'lending_identifier_s',
'ia_collection_s', 'cover_i']
fl = ','.join(fields)
fq = ['author_key:' + akey, 'type:work']
if has_fulltext:
fq.append('has_fulltext:true')
solr_select = solr_select_url + "?fq=%s&q.op=AND&q=%s&start=%d&rows=%d&fl=%s&wt=json" % ('&fq='.join(fq), q, offset, rows, fl)
facet_fields = ["author_facet", "language", "publish_year", "publisher_facet", "subject_facet", "person_facet", "place_facet", "time_facet"]
if sort == 'editions':
solr_select += '&sort=edition_count+desc'
elif sort.startswith('old'):
solr_select += '&sort=first_publish_year+asc'
elif sort.startswith('new'):
solr_select += '&sort=first_publish_year+desc'
elif sort.startswith('title'):
solr_select += '&sort=title+asc'
solr_select += "&facet=true&facet.mincount=1&f.author_facet.facet.sort=count&f.publish_year.facet.limit=-1&facet.limit=25&" + '&'.join("facet.field=" + f for f in facet_fields)
reply = parse_json_from_solr_query(solr_select)
if reply is None:
return web.storage(
num_found = 0,
works = [],
years = [],
get_facet = [],
sort = sort,
)
# TODO: Deep JSON structure defense - for now, let it blow up so easier to detect
facets = reply['facet_counts']['facet_fields']
works = [work_object(w) for w in reply['response']['docs']]
def get_facet(f, limit=None):
return list(web.group(facets[f][:limit * 2] if limit else facets[f], 2))
return web.storage(
num_found = int(reply['response']['numFound']),
works = add_availability(works),
years = [(int(k), v) for k, v in get_facet('publish_year')],
get_facet = get_facet,
sort = sort,
)
|
7,214 | def circle_level_set(image_shape, center=None, radius=None):
"""Create a circle level set with binary values.
Parameters
----------
image_shape : tuple of positive integers
Shape of the image
center : tuple of positive integers, optional
Coordinates of the center of the circle given in (row, column). If not
given, it defaults to the center of the image.
radius : float, optional
Radius of the circle. If not given, it is set to the 75% of the
smallest image dimension.
Returns
-------
out : array with shape `image_shape`
Binary level set of the circle with the given `radius` and `center`.
Warns
-----
Deprecated:
.. versionadded:: 0.17
This function is deprecated and will be removed in scikit-image 0.19.
Please use the function named ``disk_level_set`` instead.
See also
--------
checkerboard_level_set
"""
warnings.warn("circle_level_set is deprecated in favor of "
"disk_level_set."
"circle_level_set will be removed in version 0.19",
FutureWarning)
return disk_level_set(image_shape, center=center, radius=radius)
| def circle_level_set(image_shape, center=None, radius=None):
"""Create a circle level set with binary values.
Parameters
----------
image_shape : tuple of positive integers
Shape of the image
center : tuple of positive integers, optional
Coordinates of the center of the circle given in (row, column). If not
given, it defaults to the center of the image.
radius : float, optional
Radius of the circle. If not given, it is set to the 75% of the
smallest image dimension.
Returns
-------
out : array with shape `image_shape`
Binary level set of the circle with the given `radius` and `center`.
Warns
-----
Deprecated:
.. versionadded:: 0.17
This function is deprecated and will be removed in scikit-image 0.19.
Please use the function named ``disk_level_set`` instead.
See also
--------
checkerboard_level_set
"""
warnings.warn("circle_level_set is deprecated in favor of "
"disk_level_set."
"circle_level_set will be removed in version 0.19",
FutureWarning, stacklevel=2)
return disk_level_set(image_shape, center=center, radius=radius)
|
5,373 | def test_cert_fresh_certificate_t():
"""
Test cert state fetching a new certificate. (test=True)
"""
# With test=True
with patch.dict(
acme.__salt__,
{
"acme.has": MagicMock(return_value=False),
"acme.info": MagicMock(return_value={"foo": "bar"}),
},
), patch.dict(acme.__opts__, {"test": True}):
match = {
"name": "test",
"result": None,
"comment": ["Certificate test would have been obtained."],
"changes": {"old": "current certificate", "new": "new certificate"},
}
assert acme.cert("test") == match
assert acme.cert("testing.example.com", certname="test") == match
| def test_cert_fresh_certificate_test():
"""
Test cert state fetching a new certificate. (test=True)
"""
# With test=True
with patch.dict(
acme.__salt__,
{
"acme.has": MagicMock(return_value=False),
"acme.info": MagicMock(return_value={"foo": "bar"}),
},
), patch.dict(acme.__opts__, {"test": True}):
match = {
"name": "test",
"result": None,
"comment": ["Certificate test would have been obtained."],
"changes": {"old": "current certificate", "new": "new certificate"},
}
assert acme.cert("test") == match
assert acme.cert("testing.example.com", certname="test") == match
|
42 | def normalize_ddc(ddc):
"""
:param str ddc:
:rtype: list of str
"""
ddc = collapse_multiple_space(ddc.strip()).replace('/', '').replace("'", '')
results = []
for match in DDC_RE.finditer(ddc):
parts = match.groupdict()
prefix = ''
suffix = ''
# DDCs should start at word boundaries
start = match.start()
if start > 0 and re.search(r'\b', ddc[start - 1]):
continue
# And end at them
end = match.end()
if end < (len(ddc) - 1) and re.search(r'\b', ddc[end]):
continue
# Some old standard which isn't used anymore; might need to filter these
# out, but they should sort OK so let's keep them.
if parts['neg']:
prefix += '-'
# Juvenile prefix
if parts['j']:
prefix += 'j'
# Star should be at end
if parts['prestar'] or parts['poststar']:
suffix = '*'
# Series suffix
if parts['s']:
suffix += ' s'
# Biographical
if parts['B']:
suffix += ' B'
# Not at all sure
if parts['ninetwo']:
suffix += parts['ninetwo']
# And now the actual number!
if parts['number']:
# Numbers in parenthesis are "series" numbers
end = match.end('number')
if end < len(ddc) and ddc[end] == ')':
suffix += ' s'
# pad the integer part of the number
number_parts = parts['number'].split('.')
integer = number_parts[0]
# Copy decimal without losing precision
decimal = '.' + number_parts[1] if len(number_parts) > 1 else ''
number = '%03d%s' % (int(integer), decimal)
# Handle [Fic] or [E]
elif parts['fic']:
number = '[%s]' % parts['fic'].title()
else:
continue
results.append(prefix + number + suffix)
return results
| def normalize_ddc(ddc):
"""
:param str ddc:
:rtype: list of str
"""
ddc = collapse_multiple_space(ddc.strip()).replace('/', '').replace("'", '')
results = []
for match in DDC_RE.finditer(ddc):
parts = match.groupdict()
prefix = ''
suffix = ''
# DDCs should start at word boundaries
start = match.start()
if start > 0 and re.search(r'\b', ddc[start - 1]):
continue
# And end at them
end = match.end()
if end < (len(ddc) - 1) and re.search(r'\b', ddc[end]):
continue
# Some old standard which isn't used anymore; might need to filter these
# out, but they should sort OK so let's keep them.
if parts['neg']:
prefix += '-'
# Juvenile prefix
if parts['j']:
prefix += 'j'
# Star should be at end
if parts['prestar'] or parts['poststar']:
suffix = '*'
# Series suffix
if parts['s']:
suffix += ' s'
# Biographical
if parts['B']:
suffix += ' B'
# Not at all sure
if parts['ninetwo']:
suffix += parts['ninetwo']
# And now the actual number!
if parts['number']:
# Numbers in parenthesis are "series" numbers
end = match.end('number')
if end < len(ddc) and ddc[end] == ')':
suffix += ' s'
# pad the integer part of the number
number_parts = parts['number'].split('.')
integer = number_parts[0]
# Copy decimal without losing precision
decimal = '.' + number_parts[1] if len(number_parts) > 1 else ''
number = '%03d%s' % (int(integer), decimal)
# Handle [Fic] or [E]
elif parts['fic']:
number = '[%s]' % parts['fic'].title()
else:
continue
results.append(prefix + number + suffix)
return [process_match(match) for match in DDC_RE.finditer(ddc) if match]
|
31,802 | def get_attributes_command(client: Client, args: Dict[str, str], params: Dict[str, str]) -> CommandResults:
"""Wrapper for retrieving indicators from the feed to the war-room.
Args:
client: Client object with request
args: demisto.args()
params: demisto.params()
Returns:
CommandResults object containing the indicators retrieved
"""
limit = int(args.get('limit', '10'))
tlp_color = params.get('tlp_color')
tags = argToList(args.get('tags', ''))
query = args.get('query', None)
attribute_type = argToList(args.get('attribute_type', ''))
indicators = fetch_indicators(client, tags, attribute_type, query, tlp_color, params.get('url'), limit)
human_readable = f'Retrieved {str(len(indicators))} indicators.'
return CommandResults(
readable_output=human_readable,
outputs_prefix='MISPFeed.Indicators',
outputs_key_field='',
raw_response=indicators,
outputs=indicators,
)
| def get_attributes_command(client: Client, args: Dict[str, str], params: Dict[str, str]) -> CommandResults:
"""Wrapper for retrieving indicators from the feed to the war-room.
Args:
client: Client object with request
args: demisto.args()
params: demisto.params()
Returns:
CommandResults object containing the indicators retrieved
"""
limit = int(args.get('limit', '10'))
tlp_color = params.get('tlp_color')
tags = argToList(args.get('tags', ''))
query = args.get('query', None)
attribute_type = argToList(args.get('attribute_type', ''))
indicators = fetch_indicators(client, tags, attribute_type, query, tlp_color, params.get('url'), limit)
human_readable = f'Retrieved {len(indicators)} indicators.'
return CommandResults(
readable_output=human_readable,
outputs_prefix='MISPFeed.Indicators',
outputs_key_field='',
raw_response=indicators,
outputs=indicators,
)
|
58,150 | def attach_comment_to_ip(ip_list: List[str],
comment_list: List[str],
description: str = None) -> List:
""" Insure comment_list has the same length as ip_list
and description or ip is specified.
Args:
ip_list (_type_): The IP list.
comment_list (_type_): The comment list.
description (_type_): List description.
Raises:
ValueError: Description or ip must be provided.
Returns:
List: Items List.
"""
if ip_list:
while len(ip_list) > len(comment_list):
comment_list.append('')
items = []
if ip_list:
for ip, comment in zip(ip_list, comment_list):
items.append({'ip': ip, 'comment': comment})
else:
if not description:
raise ValueError('description or ip must be provided.')
return items
| def attach_comment_to_ip(ip_list: List[str],
comment_list: List[str],
description: str = None) -> List:
""" Insure comment_list has the same length as ip_list
and description or ip is specified.
Args:
ip_list (_type_): The IP list.
comment_list (_type_): The comment list.
description (_type_): List description.
Raises:
ValueError: Description or ip must be provided.
Returns:
List: Items List.
"""
items = []
if ip_list:
while len(ip_list) > len(comment_list):
comment_list.append('')
for ip, comment in zip(ip_list, comment_list):
items.append({'ip': ip, 'comment': comment})
else:
if not description:
raise ValueError('description or ip must be provided.')
return items
|
49,532 | def load_seednodes(emitter,
min_stake: int,
federated_only: bool,
network_domains: set,
network_middleware: RestMiddleware = None,
teacher_uris: list = None,
registry: BaseContractRegistry = None,
) -> List:
emitter.message("Connecting to preferred seednodes...", color='yellow')
from nucypher.characters.lawful import Ursula
# Set domains
if network_domains is None:
from nucypher.config.node import CharacterConfiguration
network_domains = {CharacterConfiguration.DEFAULT_DOMAIN, }
teacher_nodes = list() # Ursula
if teacher_uris is None:
teacher_uris = list()
for domain in network_domains:
try:
# Known NuCypher Domain
seednode_uris = TEACHER_NODES[domain]
except KeyError:
# Unknown NuCypher Domain
if not teacher_uris:
emitter.message(f"No default teacher nodes exist for the specified network: {domain}")
else:
# Prefer the injected teacher URI, then use the hardcoded seednodes.
teacher_uris.extend(seednode_uris)
for uri in teacher_uris:
try:
teacher_node = Ursula.from_teacher_uri(teacher_uri=uri,
min_stake=min_stake,
federated_only=federated_only,
network_middleware=network_middleware,
registry=registry)
except NodeSeemsToBeDown:
LOG.info(f"Failed to load seednode URI {uri}")
continue
teacher_nodes.append(teacher_node)
if not teacher_nodes:
emitter.message(f'WARNING - No Bootnodes Available')
return teacher_nodes
| def load_seednodes(emitter,
min_stake: int,
federated_only: bool,
network_domains: set,
network_middleware: RestMiddleware = None,
teacher_uris: list = None,
registry: BaseContractRegistry = None,
) -> List:
emitter.message("Connecting to preferred seednodes...", color='yellow')
from nucypher.characters.lawful import Ursula
# Set domains
if network_domains is None:
from nucypher.config.node import CharacterConfiguration
network_domains = {CharacterConfiguration.DEFAULT_DOMAIN, }
teacher_nodes = list() # Ursula
if teacher_uris is None:
teacher_uris = list()
for domain in network_domains:
try:
# Known NuCypher Domain
seednode_uris = TEACHER_NODES[domain]
except KeyError:
# Unknown NuCypher Domain
if not teacher_uris:
emitter.message(f"No default teacher nodes exist for the specified network: {domain}")
else:
# Prefer the injected teacher URI, then use the hardcoded seednodes.
teacher_uris.extend(seednode_uris)
emitter.message(f'Connecting to preferred seednodes for {domain}: {teacher_uris}', color='yellow')
for uri in teacher_uris:
try:
teacher_node = Ursula.from_teacher_uri(teacher_uri=uri,
min_stake=min_stake,
federated_only=federated_only,
network_middleware=network_middleware,
registry=registry)
except NodeSeemsToBeDown:
LOG.info(f"Failed to load seednode URI {uri}")
continue
teacher_nodes.append(teacher_node)
if not teacher_nodes:
emitter.message(f'WARNING - No Bootnodes Available')
return teacher_nodes
|
26,305 | def get_msvcr():
"""Include the appropriate MSVC runtime library if Python was built
with MSVC 7.0 or later.
"""
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos+6:msc_pos+10]
# https://en.wikipedia.org/wiki/Microsoft_Visual_C%2B%2B#Internal_version_numbering
if msc_ver == '1300':
# MSVC 7.0
return ['msvcr70']
elif msc_ver == '1310':
# MSVC 7.1
return ['msvcr71']
elif msc_ver == '1400':
# VS2005 / MSVC 8.0
return ['msvcr80']
elif msc_ver == '1500':
# VS2008 / MSVC 9.0
return ['msvcr90']
elif msc_ver == '1600':
# VS2010 / MSVC 10.0
return ['msvcr100']
elif msc_ver == '1700':
# Visual Studio 2012 / Visual C++ 11.0
return ['msvcr110']
elif msc_ver == '1800':
# Visual Studio 2013 / Visual C++ 12.0
return ['msvcr120']
elif msc_ver == '1900':
# Visual Studio 2015 / Visual C++ 14.0
# "msvcr140.dll no longer exists" http://blogs.msdn.com/b/vcblog/archive/2014/06/03/visual-studio-14-ctp.aspx
return ['vcruntime140']
else:
raise ValueError("Unknown MS Compiler version %s " % msc_ver)
| def get_msvcr():
"""Include the appropriate MSVC runtime library if Python was built
with MSVC 7.0 or later.
"""
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos+6:msc_pos+10]
# https://en.wikipedia.org/wiki/Microsoft_Visual_C%2B%2B#Internal_version_numbering
if msc_ver == '1300':
# MSVC 7.0
return ['msvcr70']
elif msc_ver == '1310':
# MSVC 7.1
return ['msvcr71']
elif msc_ver == '1400':
# VS2005 / MSVC 8.0
return ['msvcr80']
elif msc_ver == '1500':
# VS2008 / MSVC 9.0
return ['msvcr90']
elif msc_ver == '1600':
# VS2010 / MSVC 10.0
return ['msvcr100']
elif msc_ver == '1700':
# Visual Studio 2012 / Visual C++ 11.0
return ['msvcr110']
elif msc_ver == '1800':
# Visual Studio 2013 / Visual C++ 12.0
return ['msvcr120']
elif msc_ver >= '1900':
# Visual Studio 2015 / Visual C++ 14.0
# "msvcr140.dll no longer exists" http://blogs.msdn.com/b/vcblog/archive/2014/06/03/visual-studio-14-ctp.aspx
return ['vcruntime140']
else:
raise ValueError("Unknown MS Compiler version %s " % msc_ver)
|
4,526 | def test_basic(tmpdir, capsys):
"""Test some basic functionality."""
assert cs.main('_does_not_exist_') == 0
fname = op.join(str(tmpdir), 'tmp')
with open(fname, 'w') as f:
pass
code, _, stderr = cs.main('-D', 'foo', f.name, std=True)
assert code == EX_USAGE, 'missing dictionary'
assert 'cannot find dictionary' in stderr
assert cs.main(fname) == 0, 'empty file'
with open(fname, 'a') as f:
f.write('this is a test file\n')
assert cs.main(fname) == 0, 'good'
with open(fname, 'a') as f:
f.write('abandonned\n')
assert cs.main(fname) == 1, 'bad'
with open(fname, 'a') as f:
f.write('abandonned\n')
assert cs.main(fname) == 2, 'worse'
with open(fname, 'a') as f:
f.write('tim\ngonna\n')
assert cs.main(fname) == 2, 'with a name'
assert cs.main('--builtin', 'clear,rare,names,informal', fname) == 4
code, _, stderr = cs.main(fname, '--builtin', 'foo', std=True)
assert code == EX_USAGE # bad type
assert 'Unknown builtin dictionary' in stderr
d = str(tmpdir)
code, _, stderr = cs.main(fname, '-D', op.join(d, 'foo'), std=True)
assert code == EX_USAGE # bad dict
assert 'cannot find dictionary' in stderr
os.remove(fname)
with open(op.join(d, 'bad.txt'), 'w') as f:
f.write('abandonned\nAbandonned\nABANDONNED\nAbAnDoNnEd\nabandonned\rAbandonned\r\nABANDONNED \nAbAnDoNnEd') # noqa: E501
assert cs.main(d) == 8
code, _, stderr = cs.main('-w', d, std=True)
assert code == 0
assert 'FIXED:' in stderr
with open(op.join(d, 'bad.txt')) as f:
new_content = f.read()
assert cs.main(d) == 0
assert new_content == 'abandoned\nAbandoned\nABANDONED\nabandoned\nabandoned\rAbandoned\r\nABANDONED \nabandonned' # noqa: E501
with open(op.join(d, 'bad.txt'), 'w') as f:
f.write('abandonned abandonned\n')
assert cs.main(d) == 2
code, stdout, stderr = cs.main(
'-q', '16', '-w', d, count=False, std=True)
assert code == 0
assert stdout == stderr == ''
assert cs.main(d) == 0
# empty directory
os.mkdir(op.join(d, 'test'))
assert cs.main(d) == 0
| def test_basic(tmpdir, capsys):
"""Test some basic functionality."""
assert cs.main('_does_not_exist_') == 0
fname = op.join(str(tmpdir), 'tmp')
with open(fname, 'w') as f:
pass
code, _, stderr = cs.main('-D', 'foo', f.name, std=True)
assert code == EX_USAGE, 'missing dictionary'
assert 'cannot find dictionary' in stderr
assert cs.main(fname) == 0, 'empty file'
with open(fname, 'a') as f:
f.write('this is a test file\n')
assert cs.main(fname) == 0, 'good'
with open(fname, 'a') as f:
f.write('abandonned\n')
assert cs.main(fname) == 1, 'bad'
with open(fname, 'a') as f:
f.write('abandonned\n')
assert cs.main(fname) == 2, 'worse'
with open(fname, 'a') as f:
f.write('tim\ngonna\n')
assert cs.main(fname) == 2, 'with a name'
assert cs.main('--builtin', 'clear,rare,names,informal', fname) == 4
code, _, stderr = cs.main(fname, '--builtin', 'foo', std=True)
assert code == EX_USAGE # bad type
assert 'Unknown builtin dictionary' in stderr
d = str(tmpdir)
code, _, stderr = cs.main(fname, '-D', op.join(d, 'foo'), std=True)
assert code == EX_USAGE # bad dict
assert 'cannot find dictionary' in stderr
os.remove(fname)
with open(op.join(d, 'bad.txt'), 'w') as f:
f.write('abandonned\nAbandonned\nABANDONNED\nAbAnDoNnEd\nabandonned\rAbandonned\r\n ABANDONNED \nAbAnDoNnEd') # noqa: E501
assert cs.main(d) == 8
code, _, stderr = cs.main('-w', d, std=True)
assert code == 0
assert 'FIXED:' in stderr
with open(op.join(d, 'bad.txt')) as f:
new_content = f.read()
assert cs.main(d) == 0
assert new_content == 'abandoned\nAbandoned\nABANDONED\nabandoned\nabandoned\rAbandoned\r\nABANDONED \nabandonned' # noqa: E501
with open(op.join(d, 'bad.txt'), 'w') as f:
f.write('abandonned abandonned\n')
assert cs.main(d) == 2
code, stdout, stderr = cs.main(
'-q', '16', '-w', d, count=False, std=True)
assert code == 0
assert stdout == stderr == ''
assert cs.main(d) == 0
# empty directory
os.mkdir(op.join(d, 'test'))
assert cs.main(d) == 0
|
30,825 | def prettify_data_filtering_rule(rule):
pretty_rule = {
'Name': rule['@name'],
}
if 'application' in rule and 'member' in rule['application']:
pretty_rule['Application'] = rule['application']['member']
if 'file-type' in rule and 'member' in rule['file-type']:
pretty_rule['File-type'] = rule['file-type']['member']
if 'direction' in rule:
pretty_rule['Direction'] = rule['direction']
if 'alert-threshold' in rule:
pretty_rule['Alert-threshold'] = rule['alert-threshold']
if 'block-threshold' in rule:
pretty_rule['Block-threshold'] = rule['block-threshold']
if 'data-object' in rule:
pretty_rule['Data-object'] = rule['data-object']
if 'log-severity' in rule:
pretty_rule['Log-severity'] = rule['log-severity']
if 'description' in rule:
pretty_rule['Description'] = rule['description']
return pretty_rule
| def prettify_data_filtering_rule(rule):
pretty_rule = {
'Name': rule.get('@name'),
}
if 'application' in rule and 'member' in rule['application']:
pretty_rule['Application'] = rule['application']['member']
if 'file-type' in rule and 'member' in rule['file-type']:
pretty_rule['File-type'] = rule['file-type']['member']
if 'direction' in rule:
pretty_rule['Direction'] = rule['direction']
if 'alert-threshold' in rule:
pretty_rule['Alert-threshold'] = rule['alert-threshold']
if 'block-threshold' in rule:
pretty_rule['Block-threshold'] = rule['block-threshold']
if 'data-object' in rule:
pretty_rule['Data-object'] = rule['data-object']
if 'log-severity' in rule:
pretty_rule['Log-severity'] = rule['log-severity']
if 'description' in rule:
pretty_rule['Description'] = rule['description']
return pretty_rule
|
48,650 | def main() -> None:
parser = argparse.ArgumentParser(
description="""Generate baseline stubs automatically for an installed pip package
using stubgen. Also run black and isort. If the name of
the project is different from the runtime Python package name, you may
need to use --package (example: --package yaml PyYAML)."""
)
parser.add_argument("project", help="name of PyPI project for which to generate stubs under stubs/")
parser.add_argument("--package", help="generate stubs for this Python package (default is autodetected)")
args = parser.parse_args()
project = args.project
package = args.package
if not re.match(r"[a-zA-Z0-9-_.]+$", project):
sys.exit(f"Invalid character in project name: {project!r}")
if not package:
package = project # default
# Try to find which packages are provided by the project
# Use default if that fails or if several packages are found
#
# The importlib.metadata module is used for projects whose name is different
# from the runtime Python package name (example: PyYAML/yaml)
if sys.version_info >= (3, 8):
dist = distribution(project).read_text("top_level.txt")
if dist is not None:
packages = [name for name in dist.split() if not name.startswith("_")]
if len(packages) == 1:
package = packages[0]
print(f'Using detected package "{package}" for project "{project}"', file=sys.stderr)
print("Suggestion: Try again with --package argument if that's not what you wanted", file=sys.stderr)
if not os.path.isdir("stubs") or not os.path.isdir("stdlib"):
sys.exit("Error: Current working directory must be the root of typeshed repository")
# Get normalized project name and version of installed package.
info = get_installed_package_info(project)
if info is None:
print(f'Error: "{project}" is not installed', file=sys.stderr)
print("", file=sys.stderr)
print(f'Suggestion: Run "python3 -m pip install {project}" and try again', file=sys.stderr)
sys.exit(1)
project, version = info
stub_dir = os.path.join("stubs", project)
if os.path.exists(stub_dir):
sys.exit(f"Error: {stub_dir} already exists (delete it first)")
run_stubgen(package, stub_dir)
run_isort(stub_dir)
run_black(stub_dir)
create_metadata(stub_dir, version)
# Since the generated stubs won't have many type annotations, we
# have to exclude them from strict pyright checks.
add_pyright_exclusion(stub_dir)
print("\nDone!\n\nSuggested next steps:")
print(f" 1. Manually review the generated stubs in {stub_dir}")
print(" 2. See CONTRIBUTING.md for all the best practices and tips to improve your stub")
print(' 3. Run autofixes and tests locally using "python3 -m ./scripts/run_all_checks.py"')
print(" See tests/README.md to run individual tests")
print(" 4. Commit the changes on a new branch and create a typeshed PR (don't force-push!)")
| def main() -> None:
parser = argparse.ArgumentParser(
description="""Generate baseline stubs automatically for an installed pip package
using stubgen. Also run black and isort. If the name of
the project is different from the runtime Python package name, you may
need to use --package (example: --package yaml PyYAML)."""
)
parser.add_argument("project", help="name of PyPI project for which to generate stubs under stubs/")
parser.add_argument("--package", help="generate stubs for this Python package (default is autodetected)")
args = parser.parse_args()
project = args.project
package = args.package
if not re.match(r"[a-zA-Z0-9-_.]+$", project):
sys.exit(f"Invalid character in project name: {project!r}")
if not package:
package = project # default
# Try to find which packages are provided by the project
# Use default if that fails or if several packages are found
#
# The importlib.metadata module is used for projects whose name is different
# from the runtime Python package name (example: PyYAML/yaml)
if sys.version_info >= (3, 8):
dist = distribution(project).read_text("top_level.txt")
if dist is not None:
packages = [name for name in dist.split() if not name.startswith("_")]
if len(packages) == 1:
package = packages[0]
print(f'Using detected package "{package}" for project "{project}"', file=sys.stderr)
print("Suggestion: Try again with --package argument if that's not what you wanted", file=sys.stderr)
if not os.path.isdir("stubs") or not os.path.isdir("stdlib"):
sys.exit("Error: Current working directory must be the root of typeshed repository")
# Get normalized project name and version of installed package.
info = get_installed_package_info(project)
if info is None:
print(f'Error: "{project}" is not installed', file=sys.stderr)
print("", file=sys.stderr)
print(f'Suggestion: Run "python3 -m pip install {project}" and try again', file=sys.stderr)
sys.exit(1)
project, version = info
stub_dir = os.path.join("stubs", project)
if os.path.exists(stub_dir):
sys.exit(f"Error: {stub_dir} already exists (delete it first)")
run_stubgen(package, stub_dir)
run_isort(stub_dir)
run_black(stub_dir)
create_metadata(stub_dir, version)
# Since the generated stubs won't have many type annotations, we
# have to exclude them from strict pyright checks.
add_pyright_exclusion(stub_dir)
print("\nDone!\n\nSuggested next steps:")
print(f" 1. Manually review the generated stubs in {stub_dir}")
print(" 2. Optionally run tests and autofixes (see tests/README.md for details")
print(" 3. Commit the changes on a new branch and create a typeshed PR (don't force-push!)")
|
55,061 | def sparse_hamiltonian(H):
r"""Computes the sparse matrix representation a Hamiltonian in the computational basis.
Args:
H (~.Hamiltonian): Hamiltonian operator for which the matrix representation should be
measured
Returns:
coo_matrix: a sparse matrix in scipy COOrdinate format with the dimension of
:math:`(2^n, 2^n)` where :math:`n` is the number of wires
**Example:**
This function can be used by passing a `qml.Hamiltonian` object as:
>>> coeffs = [0.5, 0.5]
>>> obs = [qml.PauliZ(wires=[0]) @ qml.PauliZ(wires=[1]),
... qml.Identity(wires=[0]) @ qml.PauliZ(wires=[1])]
>>> H = qml.Hamiltonian(coeffs, obs)
>>> H_sparse = sparse_hamiltonian(H)
The resulting sparse matrix can be either used directly or transformed into a numpy array:
>>> H_sparse.toarray()
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j]])
"""
if not isinstance(H, qml.Hamiltonian):
raise TypeError("Passed Hamiltonian must be of type `qml.Hamiltonian`")
n = len(H.wires)
matrix = scipy.sparse.coo_matrix((2 ** n, 2 ** n), dtype="complex128")
for coeffs, ops in zip(H.coeffs, H.ops):
obs = [scipy.sparse.coo_matrix(o.matrix) for o in ops.obs]
mat = [scipy.sparse.eye(2, format="coo")] * n
for i, j in enumerate(ops.wires):
mat[j] = obs[i]
matrix += functools.reduce(lambda i, j: scipy.sparse.kron(i, j, format="coo"), mat) * coeffs
return matrix.tocoo()
| def sparse_hamiltonian(H):
r"""Computes the sparse matrix representation a Hamiltonian in the computational basis.
Args:
H (~.Hamiltonian): Hamiltonian operator for which the matrix representation should be
measured
Returns:
coo_matrix: a sparse matrix in scipy coordinate list (COO) format with dimension
:math:`(2^n, 2^n)`, where :math:`n` is the number of wires
**Example:**
This function can be used by passing a `qml.Hamiltonian` object as:
>>> coeffs = [0.5, 0.5]
>>> obs = [qml.PauliZ(wires=[0]) @ qml.PauliZ(wires=[1]),
... qml.Identity(wires=[0]) @ qml.PauliZ(wires=[1])]
>>> H = qml.Hamiltonian(coeffs, obs)
>>> H_sparse = sparse_hamiltonian(H)
The resulting sparse matrix can be either used directly or transformed into a numpy array:
>>> H_sparse.toarray()
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j]])
"""
if not isinstance(H, qml.Hamiltonian):
raise TypeError("Passed Hamiltonian must be of type `qml.Hamiltonian`")
n = len(H.wires)
matrix = scipy.sparse.coo_matrix((2 ** n, 2 ** n), dtype="complex128")
for coeffs, ops in zip(H.coeffs, H.ops):
obs = [scipy.sparse.coo_matrix(o.matrix) for o in ops.obs]
mat = [scipy.sparse.eye(2, format="coo")] * n
for i, j in enumerate(ops.wires):
mat[j] = obs[i]
matrix += functools.reduce(lambda i, j: scipy.sparse.kron(i, j, format="coo"), mat) * coeffs
return matrix.tocoo()
|
56,686 | def ol_import_request(item, retries=5, servername=None, require_marc=True):
"""Requests OL to import an item and retries on server errors.
"""
# logger uses batch_id:id for item.data identifier if no item.ia_id
logger.info("importing %s", item.ia_id)
for i in range(retries):
if i != 0:
logger.info("sleeping for 5 seconds before next attempt.")
time.sleep(5)
try:
ol = get_ol(servername=servername)
if item.data:
return ol.import_data(item.data)
return ol.import_ocaid(item.ia_id, require_marc=require_marc)
except IOError as e:
logger.warning("Failed to contact OL server. error=%s", e)
except OLError as e:
if e.code < 500:
return e.text
logger.warning("Failed to contact OL server. error=%s", e)
| def ol_import_request(item, retries=5, servername=None, require_marc=True):
"""Requests OL to import an item and retries on server errors.
"""
# logger uses batch_id:id for item.data identifier if no item.ia_id
_id = item.ia_id or "%s:%s" % (item.batch_id, item.id)
logger.info("importing %s", _id)
for i in range(retries):
if i != 0:
logger.info("sleeping for 5 seconds before next attempt.")
time.sleep(5)
try:
ol = get_ol(servername=servername)
if item.data:
return ol.import_data(item.data)
return ol.import_ocaid(item.ia_id, require_marc=require_marc)
except IOError as e:
logger.warning("Failed to contact OL server. error=%s", e)
except OLError as e:
if e.code < 500:
return e.text
logger.warning("Failed to contact OL server. error=%s", e)
|
58,224 | def maybe_stringify(obj):
# type: (Any) -> Optional[str]
if obj is not None:
return stringify(obj)
return None
| def maybe_stringify(obj):
# type: (Any) -> Optional[str]
if obj is None:
return None
return stringify(obj)
|
58,636 | def validate_stories(args):
"""Validate all files needed for training a model.
Fails with a non-zero exit code if there are any errors in the data."""
from rasa.core.validator import Validator
from rasa.importers.rasa import RasaFileImporter
# Check if a valid setting for `max_history` was given
if not isinstance(args.max_history, int) or args.max_history < 1:
logger.error("You have to provide a positive integer for --max-history.")
sys.exit(1)
# Prepare story and domain file import
loop = asyncio.get_event_loop()
file_importer = RasaFileImporter(
domain_path=args.domain, training_data_paths=args.data
)
# Loads the stories
validator = loop.run_until_complete(Validator.from_importer(file_importer))
# If names are unique, look for story conflicts
everything_is_alright = validator.verify_story_structure(
not args.fail_on_warnings, max_history=args.max_history
)
sys.exit(0) if everything_is_alright else sys.exit(1)
| def validate_stories(args):
"""Validate all files needed for training a model.
Fails with a non-zero exit code if there are any errors in the data."""
from rasa.core.validator import Validator
from rasa.importers.rasa import RasaFileImporter
# Check if a valid setting for `max_history` was given
if not isinstance(args.max_history, int) or args.max_history < 1:
logger.error("You have to provide a positive integer for --max-history.")
sys.exit(1)
# Prepare story and domain file import
loop = asyncio.get_event_loop()
file_importer = RasaFileImporter(
domain_path=args.domain, training_data_paths=args.data
)
# Loads the stories
validator = loop.run_until_complete(Validator.from_importer(file_importer))
# If names are unique, look for story conflicts
everything_is_alright = validator.verify_story_structure(
not args.fail_on_warnings, max_history=args.max_history
)
if not everything_is_alright:
print_error("Story validation completed with errors.")
sys.exit(1)
|
1,381 | def test_iforest_with_uniform_data():
"""Test whether iTrees predict inliers when using uniform data."""
# 2-d array of all 1s
X = np.ones((100, 10))
iforest = IsolationForest()
iforest.fit(X)
assert all(iforest.predict(X) == 1)
assert all(iforest.predict(np.random.randn(100, 10)) == 1)
assert all(iforest.predict(X + 1) == 1)
assert all(iforest.predict(X - 1) == 1)
# 2-d array where columns contain the same value across rows
X = np.repeat(np.random.randn(1, 10), 100, 0)
iforest = IsolationForest()
iforest.fit(X)
assert all(iforest.predict(X) == 1)
assert all(iforest.predict(np.random.randn(100, 10)) == 1)
assert all(iforest.predict(np.ones((100, 10))) == 1)
# Single row
X = np.random.randn(1, 10)
iforest = IsolationForest()
iforest.fit(X)
assert all(iforest.predict(X) == 1)
assert all(iforest.predict(np.random.randn(100, 10)) == 1)
assert all(iforest.predict(np.ones((100, 10))) == 1)
| def test_iforest_with_uniform_data():
"""Test whether IsolationForest predicts inliers when using uniform data."""
# 2-d array of all 1s
X = np.ones((100, 10))
iforest = IsolationForest()
iforest.fit(X)
assert all(iforest.predict(X) == 1)
assert all(iforest.predict(np.random.randn(100, 10)) == 1)
assert all(iforest.predict(X + 1) == 1)
assert all(iforest.predict(X - 1) == 1)
# 2-d array where columns contain the same value across rows
X = np.repeat(np.random.randn(1, 10), 100, 0)
iforest = IsolationForest()
iforest.fit(X)
assert all(iforest.predict(X) == 1)
assert all(iforest.predict(np.random.randn(100, 10)) == 1)
assert all(iforest.predict(np.ones((100, 10))) == 1)
# Single row
X = np.random.randn(1, 10)
iforest = IsolationForest()
iforest.fit(X)
assert all(iforest.predict(X) == 1)
assert all(iforest.predict(np.random.randn(100, 10)) == 1)
assert all(iforest.predict(np.ones((100, 10))) == 1)
|
707 | def install_reactor(reactor_path, event_loop_path):
"""Installs the :mod:`~twisted.internet.reactor` with the specified
import path.Also installs asyncio event loop as specified in the import
path if asyncio reactor is enabled"""
reactor_class = load_object(reactor_path)
if reactor_class is asyncioreactor.AsyncioSelectorReactor:
with suppress(error.ReactorAlreadyInstalledError):
if event_loop_path is not None:
x = __import__(event_loop_path)
if x is not None:
loop = x.new_event_loop()
asyncio.set_event_loop(loop)
else:
loop = asyncio.get_event_loop()
asyncioreactor.install(loop)
else:
*module, _ = reactor_path.split(".")
installer_path = module + ["install"]
installer = load_object(".".join(installer_path))
with suppress(error.ReactorAlreadyInstalledError):
installer()
| def install_reactor(reactor_path, event_loop_path=None):
"""Installs the :mod:`~twisted.internet.reactor` with the specified
import path.Also installs asyncio event loop as specified in the import
path if asyncio reactor is enabled"""
reactor_class = load_object(reactor_path)
if reactor_class is asyncioreactor.AsyncioSelectorReactor:
with suppress(error.ReactorAlreadyInstalledError):
if event_loop_path is not None:
x = __import__(event_loop_path)
if x is not None:
loop = x.new_event_loop()
asyncio.set_event_loop(loop)
else:
loop = asyncio.get_event_loop()
asyncioreactor.install(loop)
else:
*module, _ = reactor_path.split(".")
installer_path = module + ["install"]
installer = load_object(".".join(installer_path))
with suppress(error.ReactorAlreadyInstalledError):
installer()
|
43,632 | def s2_me_table(sz, n_spin_orbs):
r"""Generates the table of the matrix elements
:math:`\langle \alpha, beta \vert \hat{s}_1 \cdot \hat{s}_2 \vert \gamma, \delta \rangle`
of the two-particle spin operator :math:`\hat{s}_1 \cdot \hat{s}_2`.
The matrix elements are evaluated using the expression,
.. math:
\langle \alpha, \beta \vert \hat{\bf{s}}_1 \cdot \hat {\bf{s}}_2
\vert \gamma, \delta \rangle = && \delta_{\alpha,\delta} \delta_{\beta,\gamma} \times \\
&& \left( \frac{1}{2} \delta_{m_\alpha, m_\delta+1} \delta_{m_\beta, m_\gamma-1}
+ \frac{1}{2} \delta_{m_\alpha, m_\delta-1} \delta_{m_\beta, m_\gamma+1}
+ m_\alpha m_\beta \delta_{m_\alpha, m_\delta} \delta_{m_\beta, m_\gamma} \right),
where :math:`\alpha` and :math:`m_\alpha` refer to the quantum numbers of the spatial
:math:`\varphi_\alpha({\bf r})` and spin :math:`\chi_{m_\alpha}(s_z)` wave functions,
respectively, of the single-particle state :math:`\vert \alpha \rangle`.
**Example**
>>> sz = np.array([0.5, -0.5])
>>> print(s2_me_table(sz, 2))
[[ 0. 0. 0. 0. 0.25]
[ 0. 1. 1. 0. -0.25]
[ 1. 0. 0. 1. -0.25]
[ 1. 1. 1. 1. 0.25]
[ 0. 1. 0. 1. 0.5 ]
[ 1. 0. 1. 0. 0.5 ]]
Args:
sz (array[float]): spin-projection quantum number of the spin-orbitals
n_spin_orbs (int): number of spin orbitals
Returns:
array: NumPy array with the table of matrix elements
"""
if sz.size != n_spin_orbs:
raise ValueError(
"Size of 'sz' must be equal to 'n_spin_orbs'; got {}".format(sz.size)
)
n = np.arange(n_spin_orbs)
alpha = n.reshape(-1, 1, 1, 1)
beta = n.reshape(1, -1, 1, 1)
gamma = n.reshape(1, 1, -1, 1)
delta = n.reshape(1, 1, 1, -1)
# we only care about indices satisfying the following boolean mask
mask = np.logical_and(alpha // 2 == delta // 2, beta // 2 == gamma // 2)
# diagonal elements
diag_mask = np.logical_and(sz[alpha] == sz[delta], sz[beta] == sz[gamma])
diag_indices = np.argwhere(np.logical_and(mask, diag_mask))
diag_values = (sz[alpha] * sz[beta]).flatten()
diag = np.vstack([diag_indices.T, diag_values]).T
# off-diagonal elements
m1 = np.logical_and(sz[alpha] == sz[delta] + 1, sz[beta] == sz[gamma] - 1)
m2 = np.logical_and(sz[alpha] == sz[delta] - 1, sz[beta] == sz[gamma] + 1)
off_diag_mask = np.logical_and(mask, np.logical_or(m1, m2))
off_diag_indices = np.argwhere(off_diag_mask)
off_diag_values = np.full([len(off_diag_indices)], 0.5)
off_diag = np.vstack([off_diag_indices.T, off_diag_values]).T
# combine the off diagonal and diagonal tables into a single table
return np.vstack([diag, off_diag])
| def s2_me_table(sz, n_spin_orbs):
r"""Generates the table of the matrix elements
:math:`\langle \alpha, \beta \vert \hat{s}_1 \cdot \hat{s}_2 \vert \gamma, \delta \rangle`
of the two-particle spin operator :math:`\hat{s}_1 \cdot \hat{s}_2`.
The matrix elements are evaluated using the expression,
.. math:
\langle \alpha, \beta \vert \hat{\bf{s}}_1 \cdot \hat {\bf{s}}_2
\vert \gamma, \delta \rangle = && \delta_{\alpha,\delta} \delta_{\beta,\gamma} \times \\
&& \left( \frac{1}{2} \delta_{m_\alpha, m_\delta+1} \delta_{m_\beta, m_\gamma-1}
+ \frac{1}{2} \delta_{m_\alpha, m_\delta-1} \delta_{m_\beta, m_\gamma+1}
+ m_\alpha m_\beta \delta_{m_\alpha, m_\delta} \delta_{m_\beta, m_\gamma} \right),
where :math:`\alpha` and :math:`m_\alpha` refer to the quantum numbers of the spatial
:math:`\varphi_\alpha({\bf r})` and spin :math:`\chi_{m_\alpha}(s_z)` wave functions,
respectively, of the single-particle state :math:`\vert \alpha \rangle`.
**Example**
>>> sz = np.array([0.5, -0.5])
>>> print(s2_me_table(sz, 2))
[[ 0. 0. 0. 0. 0.25]
[ 0. 1. 1. 0. -0.25]
[ 1. 0. 0. 1. -0.25]
[ 1. 1. 1. 1. 0.25]
[ 0. 1. 0. 1. 0.5 ]
[ 1. 0. 1. 0. 0.5 ]]
Args:
sz (array[float]): spin-projection quantum number of the spin-orbitals
n_spin_orbs (int): number of spin orbitals
Returns:
array: NumPy array with the table of matrix elements
"""
if sz.size != n_spin_orbs:
raise ValueError(
"Size of 'sz' must be equal to 'n_spin_orbs'; got {}".format(sz.size)
)
n = np.arange(n_spin_orbs)
alpha = n.reshape(-1, 1, 1, 1)
beta = n.reshape(1, -1, 1, 1)
gamma = n.reshape(1, 1, -1, 1)
delta = n.reshape(1, 1, 1, -1)
# we only care about indices satisfying the following boolean mask
mask = np.logical_and(alpha // 2 == delta // 2, beta // 2 == gamma // 2)
# diagonal elements
diag_mask = np.logical_and(sz[alpha] == sz[delta], sz[beta] == sz[gamma])
diag_indices = np.argwhere(np.logical_and(mask, diag_mask))
diag_values = (sz[alpha] * sz[beta]).flatten()
diag = np.vstack([diag_indices.T, diag_values]).T
# off-diagonal elements
m1 = np.logical_and(sz[alpha] == sz[delta] + 1, sz[beta] == sz[gamma] - 1)
m2 = np.logical_and(sz[alpha] == sz[delta] - 1, sz[beta] == sz[gamma] + 1)
off_diag_mask = np.logical_and(mask, np.logical_or(m1, m2))
off_diag_indices = np.argwhere(off_diag_mask)
off_diag_values = np.full([len(off_diag_indices)], 0.5)
off_diag = np.vstack([off_diag_indices.T, off_diag_values]).T
# combine the off diagonal and diagonal tables into a single table
return np.vstack([diag, off_diag])
|
45,992 | def _image_shape_to_corners(image: torch.Tensor) -> torch.Tensor:
"""Convert image size to 4 corners representation in clockwise order.
Args:
image: image tensor with shape :math:`(B, C, H, W)` where B = batch size,
C = number of channels
Return:
the corners of the image.
"""
if len(image.shape) != 4:
raise ValueError('patch should be of size B, C, H, W')
batch_size = image.shape[0]
image_width = image.shape[-2]
image_height = image.shape[-1]
corners = torch.tensor(
[[0, 0], [image_width, 0], [image_width, image_height], [0, image_height]],
device=image.device,
dtype=image.dtype,
requires_grad=False,
)
corners = corners.repeat(batch_size, 1, 1)
return corners
| def _image_shape_to_corners(image: torch.Tensor) -> torch.Tensor:
"""Convert image size to 4 corners representation in clockwise order.
Args:
image: image tensor with shape :math:`(B, C, H, W)` where B = batch size,
C = number of channels
Return:
the corners of the image.
"""
if len(image.shape) != 4:
raise ValueError('patch should be of size B, C, H, W')
batch_size = image.shape[0]
image_width = image.shape[-2]
image_height = image.shape[-1]
corners = torch.tensor(
[[0, 0], [image_width - 1, 0], [image_width, image_height], [0, image_height]],
device=image.device,
dtype=image.dtype,
requires_grad=False,
)
corners = corners.repeat(batch_size, 1, 1)
return corners
|
39,477 | def usecase_series_pct_change(input_data):
start_time = time.time()
res = input_data.pct_change(periods=1, limit=None, freq=None)
finish_time = time.time()
res_time = finish_time - start_time
return res_time, res
| def usecase_series_pct_change_periods_1_limit_None_freq_None(input_data):
start_time = time.time()
res = input_data.pct_change(periods=1, limit=None, freq=None)
finish_time = time.time()
res_time = finish_time - start_time
return res_time, res
|
48,436 | def install(m, pkgspec, cache, upgrade=False, default_release=None,
install_recommends=None, force=False,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS),
build_dep=False, fixed=False, autoremove=False, no_remove=False, only_upgrade=False,
allow_unauthenticated=False):
pkg_list = []
packages = ""
pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache)
package_names = []
for package in pkgspec:
if build_dep:
# Let apt decide what to install
pkg_list.append("'%s'" % package)
continue
name, version = package_split(package)
package_names.append(name)
installed, installed_version, upgradable, has_files = package_status(m, name, version, cache, state='install')
if (not installed and not only_upgrade) or (installed and not installed_version) or (upgrade and upgradable):
pkg_list.append("'%s'" % package)
if installed_version and upgradable and version:
# This happens when the package is installed, a newer version is
# available, and the version is a wildcard that matches both
#
# We do not apply the upgrade flag because we cannot specify both
# a version and state=latest. (This behaviour mirrors how apt
# treats a version with wildcard in the package)
pkg_list.append("'%s'" % package)
packages = ' '.join(pkg_list)
if packages:
if force:
force_yes = '--force-yes'
else:
force_yes = ''
if m.check_mode:
check_arg = '--simulate'
else:
check_arg = ''
if autoremove:
autoremove = '--auto-remove'
else:
autoremove = ''
if no_remove:
no_remove = '--no-remove'
else:
no_remove = ''
if only_upgrade:
only_upgrade = '--only-upgrade'
else:
only_upgrade = ''
if fixed:
fixed = '--fix-broken'
else:
fixed = ''
if build_dep:
cmd = "%s -y %s %s %s %s %s %s build-dep %s" % (APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, no_remove, check_arg, packages)
else:
cmd = "%s -y %s %s %s %s %s %s %s install %s" % \
(APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, autoremove, no_remove, check_arg, packages)
if default_release:
cmd += " -t '%s'" % (default_release,)
if install_recommends is False:
cmd += " -o APT::Install-Recommends=no"
elif install_recommends is True:
cmd += " -o APT::Install-Recommends=yes"
# install_recommends is None uses the OS default
if allow_unauthenticated:
cmd += " --allow-unauthenticated"
with PolicyRcD(m):
rc, out, err = m.run_command(cmd)
if m._diff:
diff = parse_diff(out)
else:
diff = {}
status = True
changed = True
if build_dep:
changed = APT_GET_ZERO not in out
data = dict(changed=changed, stdout=out, stderr=err, diff=diff)
if rc:
status = False
data = dict(msg="'%s' failed: %s" % (cmd, err), stdout=out, stderr=err, rc=rc)
else:
status = True
data = dict(changed=False)
if not build_dep:
mark_installed_manually(m, package_names)
return (status, data)
| def install(m, pkgspec, cache, upgrade=False, default_release=None,
install_recommends=None, force=False,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS),
build_dep=False, fixed=False, autoremove=False, no_remove=False, only_upgrade=False,
allow_unauthenticated=False):
pkg_list = []
packages = ""
pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache)
package_names = []
for package in pkgspec:
if build_dep:
# Let apt decide what to install
pkg_list.append("'%s'" % package)
continue
name, version = package_split(package)
package_names.append(name)
installed, installed_version, upgradable, has_files = package_status(m, name, version, cache, state='install')
if (not installed and not only_upgrade) or (installed and not installed_version) or (upgrade and upgradable):
pkg_list.append("'%s'" % package)
if installed_version and upgradable and version:
# This happens when the package is installed, a newer version is
# available, and the version is a wildcard that matches both
#
# We do not apply the upgrade flag because we cannot specify both
# a version and state=latest. (This behaviour mirrors how apt
# treats a version with wildcard in the package)
pkg_list.append("'%s'" % package)
packages = ' '.join(pkg_list)
if packages:
if force:
force_yes = '--force-yes'
else:
force_yes = ''
if m.check_mode:
check_arg = '--simulate'
else:
check_arg = ''
if autoremove:
autoremove = '--auto-remove'
else:
autoremove = ''
if fail_on_autoremove:
fail_on_autoremove = '--no-remove'
else:
fail_on_autoremove = ''
if only_upgrade:
only_upgrade = '--only-upgrade'
else:
only_upgrade = ''
if fixed:
fixed = '--fix-broken'
else:
fixed = ''
if build_dep:
cmd = "%s -y %s %s %s %s %s %s build-dep %s" % (APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, no_remove, check_arg, packages)
else:
cmd = "%s -y %s %s %s %s %s %s %s install %s" % \
(APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, autoremove, no_remove, check_arg, packages)
if default_release:
cmd += " -t '%s'" % (default_release,)
if install_recommends is False:
cmd += " -o APT::Install-Recommends=no"
elif install_recommends is True:
cmd += " -o APT::Install-Recommends=yes"
# install_recommends is None uses the OS default
if allow_unauthenticated:
cmd += " --allow-unauthenticated"
with PolicyRcD(m):
rc, out, err = m.run_command(cmd)
if m._diff:
diff = parse_diff(out)
else:
diff = {}
status = True
changed = True
if build_dep:
changed = APT_GET_ZERO not in out
data = dict(changed=changed, stdout=out, stderr=err, diff=diff)
if rc:
status = False
data = dict(msg="'%s' failed: %s" % (cmd, err), stdout=out, stderr=err, rc=rc)
else:
status = True
data = dict(changed=False)
if not build_dep:
mark_installed_manually(m, package_names)
return (status, data)
|
847 | def test_log_expand_factor():
assert (log(18)/log(3) - 2).expand(factor=True) == log(2)/log(3)
assert (log(12)/log(2)).expand(factor=True) == log(3)/log(2) + 2
assert (log(15)/log(3)).expand(factor=True) == 1 + log(5)/log(3)
assert (log(2)/(-log(12) + log(24))).expand(factor=True) == 1
assert expand_log(log(12), factor=True) == log(3) + 2*log(2)
assert expand_log(log(21)/log(7), factor=False) == (log(3) + log(7))/log(7)
assert expand_log(log(45)/log(5) + 4, factor=False) == (log(5) + 2*log(3))/log(5) + 4
assert expand_log(log(45)/log(5) + log(26), factor=True) == log(2) + log(13) + (log(5) + 2*log(3))/log(5)
| def test_log_expand_factor():
assert (log(18)/log(3) - 2).expand(factor=True) == log(2)/log(3)
assert (log(12)/log(2)).expand(factor=True) == log(3)/log(2) + 2
assert (log(15)/log(3)).expand(factor=True) == 1 + log(5)/log(3)
assert (log(2)/(-log(12) + log(24))).expand(factor=True) == 1
assert expand_log(log(12), factor=True) == log(3) + 2*log(2)
assert expand_log(log(21)/log(7), factor=False) == (log(3) + log(7))/log(7)
assert expand_log(log(45)/log(5) + log(20) + log(4), factor=False) == (log(5) + 2*log(3))/log(5) + log(20) + 2*log(2)
assert expand_log(log(45)/log(5) + log(26), factor=True) == log(2) + log(13) + (log(5) + 2*log(3))/log(5)
|
51,108 | def init_entity_id_from_config(hass, entity, config, entity_id_format):
"""Set entity_id from object_id if defined in config."""
if CONF_OBJECT_ID in config:
if entity_id_format != "":
entity.entity_id = generate_entity_id(
entity_id_format, config[CONF_OBJECT_ID], None, hass
)
else:
_LOGGER.warning("Field '_entity_id_format' not configurate")
| def init_entity_id_from_config(hass, entity, config, entity_id_format):
"""Set entity_id from object_id if defined in config."""
if CONF_OBJECT_ID in config:
entity.entity_id = generate_entity_id(
entity_id_format, config[CONF_OBJECT_ID], None, hass
)
|
20,029 | def image_fusion(img1, img2, wvs1, wvs2, array_type = None, filename = None):
""" Fuse two images of the same size together with given wavelengths representing and make a Spectral_data instance
img1: 1st image to be fused
img2: 2nd image to be fused
wvs1: list of wavelengths represent bands in img1
wvs2: list of wavelengths represent bands in img2
array_type: (optional) description of the fused array
filename: (optional) desired filename of the fused array
:param img1: np.ndarray
:param img2: np.ndarray
:param wvs1: list
:param wvs2: list
:param array_type: str
:param filename: str
:return: fused_array (a Spectral_data instance)
"""
if len(img1.shape) == 2:
img1 = np.expand_dims(img1,axis=2)
r1, c1, b1 = img1.shape
if len(img2.shape) == 2:
img2 = np.expand_dims(img2,axis=2)
r2, c2, b2 = img2.shape
if (r1,c1) != (r2,c2):
fatal_error("Input images should have the same image size")
array_data = np.concatenate((img1, img2), axis=2)
# sort all wavelengths
wavelengths = np.array(wvs1 + wvs2)
ind = np.argsort(wavelengths)
wavelengths = wavelengths[ind]
wavelength_dict = dict()
for (idx, wv) in enumerate(wavelengths):
wavelength_dict[wv] = float(idx)
# sort array_data based on wavelengths
array_data = array_data[:,:,ind]
array_data = (array_data / 255).astype(np.float32)
max_pixel = float(np.amax(array_data))
min_pixel = float(np.amin(array_data))
d_type = array_data.dtype
r, c, b = array_data.shape
fused_array = Spectral_data(array_data=array_data,
max_wavelength=float(max(wavelengths)),
min_wavelength=float(min(wavelengths)),
max_value=max_pixel, min_value=min_pixel,
d_type=d_type,
wavelength_dict=wavelength_dict, samples=int(r * c),
lines=int(b), interleave="bil",
wavelength_units="nm", array_type=array_type,
pseudo_rgb=None, filename=filename, default_bands=None)
# Make pseudo-rgb image and replace it inside the class instance object
pseudo_rgb = _make_pseudo_rgb(fused_array)
fused_array.pseudo_rgb = pseudo_rgb
_debug(visual=pseudo_rgb, filename=os.path.join(params.debug_outdir, str(params.device) + "_fused_pseudo_rgb.png"))
return fused_array
| def image_fusion(img1, img2, wvs1, wvs2, array_type = None, filename = None):
""" Fuse two images of the same size together with given wavelengths representing and make a Spectral_data instance
img1: 1st image to be fused
img2: 2nd image to be fused
wvs1: list of wavelengths represent bands in img1
wvs2: list of wavelengths represent bands in img2
array_type: (optional) description of the fused array
filename: (optional) desired filename of the fused array
:param img1: np.ndarray
:param img2: np.ndarray
:param wvs1: list
:param wvs2: list
:param array_type: str
:param filename: str
:return: fused_array (a Spectral_data instance)
"""
if len(img1.shape) == 2:
img1 = np.expand_dims(img1,axis=2)
r1, c1, b1 = img1.shape
if len(img2.shape) == 2:
img2 = np.expand_dims(img2,axis=2)
r2, c2, b2 = img2.shape
if (r1,c1) != (r2,c2):
fatal_error("Input images should have the same image size")
array_data = np.concatenate((img1, img2), axis=2)
# sort all wavelengths
wavelengths = np.array(wvs1 + wvs2)
ind = np.argsort(wavelengths)
wavelengths = wavelengths[ind]
wavelength_dict = dict()
for (idx, wv) in enumerate(wavelengths):
wavelength_dict[wv] = float(idx)
# sort array_data based on wavelengths
array_data = array_data[:,:,ind]
array_data = (array_data / 255).astype(np.float32)
max_pixel = float(np.amax(array_data))
min_pixel = float(np.amin(array_data))
d_type = array_data.dtype
r, c, b = array_data.shape
fused_array = Spectral_data(array_data=array_data,
max_wavelength=float(max(wavelengths)),
min_wavelength=float(min(wavelengths)),
max_value=max_pixel, min_value=min_pixel,
d_type=d_type,
wavelength_dict=wavelength_dict, samples=int(r * c),
lines=int(b), interleave="bil",
wavelength_units="nm", array_type=array_type,
pseudo_rgb=None, filename=filename, default_bands=None)
# Make pseudo-rgb image and replace it inside the class instance object
pseudo_rgb = _make_pseudo_rgb(fused_array)
fused_array.pseudo_rgb = pseudo_rgb
_debug(visual=pseudo_rgb, filename=os.path.join(params.debug_outdir, str(params.device) + "_fused_pseudo_rgb.png"))
return fused_array
|
13,795 | def get_component_templates(courselike, library=False): # lint-amnesty, pylint: disable=too-many-statements
"""
Returns the applicable component templates that can be used by the specified course or library.
"""
def create_template_dict(name, category, support_level, boilerplate_name=None, tab="common", hinted=False):
"""
Creates a component template dict.
Parameters
display_name: the user-visible name of the component
category: the type of component (problem, html, etc.)
support_level: the support level of this component
boilerplate_name: name of boilerplate for filling in default values. May be None.
hinted: True if hinted problem else False
tab: common(default)/advanced, which tab it goes in
"""
return {
"display_name": name,
"category": category,
"boilerplate_name": boilerplate_name,
"hinted": hinted,
"tab": tab,
"support_level": support_level
}
def component_support_level(editable_types, name, template=None):
"""
Returns the support level for the given xblock name/template combination.
Args:
editable_types: a QuerySet of xblocks with their support levels
name: the name of the xblock
template: optional template for the xblock
Returns:
If XBlockStudioConfigurationFlag is enabled, returns the support level
(see XBlockStudioConfiguration) or False if this xblock name/template combination
has no Studio support at all. If XBlockStudioConfigurationFlag is disabled,
simply returns True.
"""
# If the Studio support feature is disabled, return True for all.
if not XBlockStudioConfigurationFlag.is_enabled():
return True
if template is None:
template = ""
extension_index = template.rfind(".yaml")
if extension_index >= 0:
template = template[0:extension_index]
for block in editable_types:
if block.name == name and block.template == template:
return block.support_level
return False
def create_support_legend_dict():
"""
Returns a dict of settings information for the display of the support level legend.
"""
return {
"show_legend": XBlockStudioConfigurationFlag.is_enabled(),
"allow_unsupported_xblocks": allow_unsupported,
"documentation_label": _("{platform_name} Support Levels:").format(platform_name=settings.PLATFORM_NAME)
}
component_display_names = {
'discussion': _("Discussion"),
'html': _("HTML"),
'problem': _("Problem"),
'video': _("Video"),
'openassessment': _("Open Response")
}
component_templates = []
categories = set()
# The component_templates array is in the order of "advanced" (if present), followed
# by the components in the order listed in COMPONENT_TYPES.
component_types = COMPONENT_TYPES[:]
# Libraries do not support discussions and openassessment
component_not_supported_by_library = ['discussion', 'openassessment']
if library:
component_types = [component for component in component_types
if component not in set(component_not_supported_by_library)]
component_types = _filter_disabled_blocks(component_types)
#Filter out discussion component from component_types if non-legacy discussion provider is configured for course key
component_types = _filter_discussion_for_non_legacy_provider(component_types, courselike.location.course_key)
# Content Libraries currently don't allow opting in to unsupported xblocks/problem types.
allow_unsupported = getattr(courselike, "allow_unsupported_xblocks", False)
for category in component_types: # lint-amnesty, pylint: disable=too-many-nested-blocks
authorable_variations = authorable_xblocks(allow_unsupported=allow_unsupported, name=category)
support_level_without_template = component_support_level(authorable_variations, category)
templates_for_category = []
component_class = _load_mixed_class(category)
if support_level_without_template:
# add the default template with localized display name
# TODO: Once mixins are defined per-application, rather than per-runtime,
# this should use a cms mixed-in class. (cpennington)
template_id = None
display_name = xblock_type_display_name(category, _('Blank')) # this is the Blank Advanced problem
# The first template that is given should be Blank Assessment Template
if category == 'openassessment':
display_name = _("Blank Open Response Assessment")
template_id = "blank-assessment"
templates_for_category.append(
create_template_dict(display_name, category, support_level_without_template, template_id, 'advanced')
)
categories.add(category)
# add boilerplates
if hasattr(component_class, 'templates'):
for template in component_class.templates():
filter_templates = getattr(component_class, 'filter_templates', None)
if not filter_templates or filter_templates(template, courselike):
template_id = template.get('template_id')
support_level_with_template = component_support_level(
authorable_variations, category, template_id
)
if support_level_with_template:
# Tab can be 'common' 'advanced'
# Default setting is common/advanced depending on the presence of markdown
tab = 'common'
if template['metadata'].get('markdown') is None:
tab = 'advanced'
hinted = template.get('hinted', False)
templates_for_category.append(
create_template_dict(
_(template['metadata'].get('display_name')), # lint-amnesty, pylint: disable=translation-of-non-string
category,
support_level_with_template,
template_id,
tab,
hinted,
)
)
# Add any advanced problem types. Note that these are different xblocks being stored as Advanced Problems,
# currently not supported in libraries .
if category == 'problem' and not library:
disabled_block_names = [block.name for block in disabled_xblocks()]
advanced_problem_types = [advanced_problem_type for advanced_problem_type in ADVANCED_PROBLEM_TYPES
if advanced_problem_type['component'] not in disabled_block_names]
for advanced_problem_type in advanced_problem_types:
component = advanced_problem_type['component']
boilerplate_name = advanced_problem_type['boilerplate_name']
authorable_advanced_component_variations = authorable_xblocks(
allow_unsupported=allow_unsupported, name=component
)
advanced_component_support_level = component_support_level(
authorable_advanced_component_variations, component, boilerplate_name
)
if advanced_component_support_level:
try:
component_display_name = xblock_type_display_name(component)
except PluginMissingError:
log.warning('Unable to load xblock type %s to read display_name', component, exc_info=True)
else:
templates_for_category.append(
create_template_dict(
component_display_name,
component,
advanced_component_support_level,
boilerplate_name,
'advanced'
)
)
categories.add(component)
component_templates.append({
"type": category,
"templates": templates_for_category,
"display_name": component_display_names[category],
"support_legend": create_support_legend_dict()
})
# Libraries do not support advanced components at this time.
if library:
return component_templates
# Check if there are any advanced modules specified in the course policy.
# These modules should be specified as a list of strings, where the strings
# are the names of the modules in ADVANCED_COMPONENT_TYPES that should be
# enabled for the course.
course_advanced_keys = courselike.advanced_modules
advanced_component_templates = {
"type": "advanced",
"templates": [],
"display_name": _("Advanced"),
"support_legend": create_support_legend_dict()
}
advanced_component_types = _advanced_component_types(allow_unsupported)
# Set component types according to course policy file
if isinstance(course_advanced_keys, list):
for category in course_advanced_keys:
if category in advanced_component_types.keys() and category not in categories: # pylint: disable=consider-iterating-dictionary
# boilerplates not supported for advanced components
try:
component_display_name = xblock_type_display_name(category, default_display_name=category)
advanced_component_templates['templates'].append(
create_template_dict(
component_display_name,
category,
advanced_component_types[category]
)
)
categories.add(category)
except PluginMissingError:
# dhm: I got this once but it can happen any time the
# course author configures an advanced component which does
# not exist on the server. This code here merely
# prevents any authors from trying to instantiate the
# non-existent component type by not showing it in the menu
log.warning(
"Advanced component %s does not exist. It will not be added to the Studio new component menu.",
category
)
else:
log.error(
"Improper format for course advanced keys! %s",
course_advanced_keys
)
if advanced_component_templates['templates']:
component_templates.insert(0, advanced_component_templates)
return component_templates
| def get_component_templates(courselike, library=False): # lint-amnesty, pylint: disable=too-many-statements
"""
Returns the applicable component templates that can be used by the specified course or library.
"""
def create_template_dict(name, category, support_level, boilerplate_name=None, tab="common", hinted=False):
"""
Creates a component template dict.
Parameters
display_name: the user-visible name of the component
category: the type of component (problem, html, etc.)
support_level: the support level of this component
boilerplate_name: name of boilerplate for filling in default values. May be None.
hinted: True if hinted problem else False
tab: common(default)/advanced, which tab it goes in
"""
return {
"display_name": name,
"category": category,
"boilerplate_name": boilerplate_name,
"hinted": hinted,
"tab": tab,
"support_level": support_level
}
def component_support_level(editable_types, name, template=None):
"""
Returns the support level for the given xblock name/template combination.
Args:
editable_types: a QuerySet of xblocks with their support levels
name: the name of the xblock
template: optional template for the xblock
Returns:
If XBlockStudioConfigurationFlag is enabled, returns the support level
(see XBlockStudioConfiguration) or False if this xblock name/template combination
has no Studio support at all. If XBlockStudioConfigurationFlag is disabled,
simply returns True.
"""
# If the Studio support feature is disabled, return True for all.
if not XBlockStudioConfigurationFlag.is_enabled():
return True
if template is None:
template = ""
extension_index = template.rfind(".yaml")
if extension_index >= 0:
template = template[0:extension_index]
for block in editable_types:
if block.name == name and block.template == template:
return block.support_level
return False
def create_support_legend_dict():
"""
Returns a dict of settings information for the display of the support level legend.
"""
return {
"show_legend": XBlockStudioConfigurationFlag.is_enabled(),
"allow_unsupported_xblocks": allow_unsupported,
"documentation_label": _("{platform_name} Support Levels:").format(platform_name=settings.PLATFORM_NAME)
}
component_display_names = {
'discussion': _("Discussion"),
'html': _("HTML"),
'problem': _("Problem"),
'video': _("Video"),
'openassessment': _("Open Response")
}
component_templates = []
categories = set()
# The component_templates array is in the order of "advanced" (if present), followed
# by the components in the order listed in COMPONENT_TYPES.
component_types = COMPONENT_TYPES[:]
# Libraries do not support discussions and openassessment
component_not_supported_by_library = ['discussion', 'openassessment']
if library:
component_types = [component for component in component_types
if component not in set(component_not_supported_by_library)]
component_types = _filter_disabled_blocks(component_types)
# Filter out discussion component from component_types if non-legacy discussion provider is configured for course
component_types = _filter_discussion_for_non_legacy_provider(component_types, courselike.location.course_key)
# Content Libraries currently don't allow opting in to unsupported xblocks/problem types.
allow_unsupported = getattr(courselike, "allow_unsupported_xblocks", False)
for category in component_types: # lint-amnesty, pylint: disable=too-many-nested-blocks
authorable_variations = authorable_xblocks(allow_unsupported=allow_unsupported, name=category)
support_level_without_template = component_support_level(authorable_variations, category)
templates_for_category = []
component_class = _load_mixed_class(category)
if support_level_without_template:
# add the default template with localized display name
# TODO: Once mixins are defined per-application, rather than per-runtime,
# this should use a cms mixed-in class. (cpennington)
template_id = None
display_name = xblock_type_display_name(category, _('Blank')) # this is the Blank Advanced problem
# The first template that is given should be Blank Assessment Template
if category == 'openassessment':
display_name = _("Blank Open Response Assessment")
template_id = "blank-assessment"
templates_for_category.append(
create_template_dict(display_name, category, support_level_without_template, template_id, 'advanced')
)
categories.add(category)
# add boilerplates
if hasattr(component_class, 'templates'):
for template in component_class.templates():
filter_templates = getattr(component_class, 'filter_templates', None)
if not filter_templates or filter_templates(template, courselike):
template_id = template.get('template_id')
support_level_with_template = component_support_level(
authorable_variations, category, template_id
)
if support_level_with_template:
# Tab can be 'common' 'advanced'
# Default setting is common/advanced depending on the presence of markdown
tab = 'common'
if template['metadata'].get('markdown') is None:
tab = 'advanced'
hinted = template.get('hinted', False)
templates_for_category.append(
create_template_dict(
_(template['metadata'].get('display_name')), # lint-amnesty, pylint: disable=translation-of-non-string
category,
support_level_with_template,
template_id,
tab,
hinted,
)
)
# Add any advanced problem types. Note that these are different xblocks being stored as Advanced Problems,
# currently not supported in libraries .
if category == 'problem' and not library:
disabled_block_names = [block.name for block in disabled_xblocks()]
advanced_problem_types = [advanced_problem_type for advanced_problem_type in ADVANCED_PROBLEM_TYPES
if advanced_problem_type['component'] not in disabled_block_names]
for advanced_problem_type in advanced_problem_types:
component = advanced_problem_type['component']
boilerplate_name = advanced_problem_type['boilerplate_name']
authorable_advanced_component_variations = authorable_xblocks(
allow_unsupported=allow_unsupported, name=component
)
advanced_component_support_level = component_support_level(
authorable_advanced_component_variations, component, boilerplate_name
)
if advanced_component_support_level:
try:
component_display_name = xblock_type_display_name(component)
except PluginMissingError:
log.warning('Unable to load xblock type %s to read display_name', component, exc_info=True)
else:
templates_for_category.append(
create_template_dict(
component_display_name,
component,
advanced_component_support_level,
boilerplate_name,
'advanced'
)
)
categories.add(component)
component_templates.append({
"type": category,
"templates": templates_for_category,
"display_name": component_display_names[category],
"support_legend": create_support_legend_dict()
})
# Libraries do not support advanced components at this time.
if library:
return component_templates
# Check if there are any advanced modules specified in the course policy.
# These modules should be specified as a list of strings, where the strings
# are the names of the modules in ADVANCED_COMPONENT_TYPES that should be
# enabled for the course.
course_advanced_keys = courselike.advanced_modules
advanced_component_templates = {
"type": "advanced",
"templates": [],
"display_name": _("Advanced"),
"support_legend": create_support_legend_dict()
}
advanced_component_types = _advanced_component_types(allow_unsupported)
# Set component types according to course policy file
if isinstance(course_advanced_keys, list):
for category in course_advanced_keys:
if category in advanced_component_types.keys() and category not in categories: # pylint: disable=consider-iterating-dictionary
# boilerplates not supported for advanced components
try:
component_display_name = xblock_type_display_name(category, default_display_name=category)
advanced_component_templates['templates'].append(
create_template_dict(
component_display_name,
category,
advanced_component_types[category]
)
)
categories.add(category)
except PluginMissingError:
# dhm: I got this once but it can happen any time the
# course author configures an advanced component which does
# not exist on the server. This code here merely
# prevents any authors from trying to instantiate the
# non-existent component type by not showing it in the menu
log.warning(
"Advanced component %s does not exist. It will not be added to the Studio new component menu.",
category
)
else:
log.error(
"Improper format for course advanced keys! %s",
course_advanced_keys
)
if advanced_component_templates['templates']:
component_templates.insert(0, advanced_component_templates)
return component_templates
|
55,531 | def _parse_tuple(tup):
"""
Unpack the user input for getitem and setitem and compute ndim.
loc[a] -> ([a], :), 1D
loc[[a,b],] -> ([a,b], :),
loc[a,b] -> ([a], [b]), 0D
Parameters
----------
tup : tuple
User input to unpack.
Returns
-------
row_loc : list
List of row locators.
col_list : list
List of column locators.
ndim : {0, 1, 2}
Number of dimensions of located dataset.
row_scaler : bool
True of `row_loc` is a scalar, False otherwise.
col_scaler : bool
True of `col_loc` is a scalar, False otherwise.
"""
row_loc, col_loc = slice(None), slice(None)
if is_tuple(tup):
row_loc = tup[0]
if len(tup) == 2:
col_loc = tup[1]
if len(tup) > 2:
raise IndexingError("Too many indexers")
else:
row_loc = tup
ndim = _compute_ndim(row_loc, col_loc)
row_scaler = is_scalar(row_loc)
col_scaler = is_scalar(col_loc)
row_loc = [row_loc] if row_scaler else row_loc
col_loc = [col_loc] if col_scaler else col_loc
return row_loc, col_loc, ndim, row_scaler, col_scaler
| def _parse_tuple(tup):
"""
Unpack the user input for getitem and setitem and compute ndim.
loc[a] -> ([a], :), 1D
loc[[a,b],] -> ([a,b], :),
loc[a,b] -> ([a], [b]), 0D
Parameters
----------
tup : tuple
User input to unpack.
Returns
-------
row_loc : list
List of row locators.
col_list : list
List of column locators.
ndim : {0, 1, 2}
Number of dimensions of located dataset.
row_scaler : bool
True if `row_loc` is a scalar, False otherwise.
col_scaler : bool
True if `col_loc` is a scalar, False otherwise.
"""
row_loc, col_loc = slice(None), slice(None)
if is_tuple(tup):
row_loc = tup[0]
if len(tup) == 2:
col_loc = tup[1]
if len(tup) > 2:
raise IndexingError("Too many indexers")
else:
row_loc = tup
ndim = _compute_ndim(row_loc, col_loc)
row_scaler = is_scalar(row_loc)
col_scaler = is_scalar(col_loc)
row_loc = [row_loc] if row_scaler else row_loc
col_loc = [col_loc] if col_scaler else col_loc
return row_loc, col_loc, ndim, row_scaler, col_scaler
|
26,747 | def check_limit(value: int):
"""
This checks the limit passed to view and raises BadRequest if
limit exceed user configured value
"""
max_val = int(conf.get("api", "maximum_page_limit"))
spec = {'default': 100, 'maximum': max_val,
'minimum': 1, 'type': 'integer'}
if value > max_val:
# This message should not be formatted. If formatted please
# run tests
message = ("{value} is greater than the maximum of {max_val}\n"
"\n"
"Failed validating 'maximum' in schema:\n"
" {spec}\n"
"\nOn instance:\n {value}").format(value=value,
max_val=max_val, spec=spec)
raise BadRequest(title="Bad Request", detail=message)
return value
| def check_limit(value: int):
"""
This checks the limit passed to view and raises BadRequest if
limit exceed user configured value
"""
max_val = conf.getint("api", "maximum_page_limit")
spec = {'default': 100, 'maximum': max_val,
'minimum': 1, 'type': 'integer'}
if value > max_val:
# This message should not be formatted. If formatted please
# run tests
message = ("{value} is greater than the maximum of {max_val}\n"
"\n"
"Failed validating 'maximum' in schema:\n"
" {spec}\n"
"\nOn instance:\n {value}").format(value=value,
max_val=max_val, spec=spec)
raise BadRequest(title="Bad Request", detail=message)
return value
|
30,638 | def list_watchlists_command(client: Client) -> CommandResults:
contents = []
headers = ['ID', 'Name', 'Description', 'create_timestamp', 'Alerts_enabled', 'Tags_enabled', 'Report_ids',
'Last_update_timestamp', 'Classifier']
result = client.list_watchlists_request()
watchlists = result.get('results')
if not watchlists:
return 'No watchlists were found.'
for watchlist in watchlists:
contents.append({
'Name': watchlist.get('name'),
'ID': watchlist.get('id'),
'Description': watchlist.get('description'),
'Tags_enabled': watchlist.get('tags_enabled'),
'Alerts_enabled': watchlist.get('alerts_enabled'),
'create_timestamp': convert_unix_to_timestamp(watchlist.get('create_timestamp')),
'Last_update_timestamp': convert_unix_to_timestamp(watchlist.get('last_update_timestamp')),
'Report_ids': watchlist.get('report_ids'),
'Classifier': watchlist.get('classifier')
})
readable_output = tableToMarkdown('Watchlists list ', contents, headers, removeNull=True)
results = CommandResults(
outputs_prefix='CarbonBlackEEDR.Watchlist',
outputs_key_field='id',
outputs=contents,
readable_output=readable_output,
raw_response=result
)
return results
| def list_watchlists_command(client: Client) -> CommandResults:
contents = []
headers = ['ID', 'Name', 'Description', 'create_timestamp', 'Alerts_enabled', 'Tags_enabled', 'Report_ids',
'Last_update_timestamp', 'Classifier']
result = client.list_watchlists_request()
watchlists = result.get('results')
if not watchlists:
return 'No watchlists were found.'
for watchlist in watchlists:
contents.append({
'Name': watchlist.get('name'),
'ID': watchlist.get('id'),
'Description': watchlist.get('description'),
'Tags_enabled': watchlist.get('tags_enabled'),
'Alerts_enabled': watchlist.get('alerts_enabled'),
'create_timestamp': convert_unix_to_timestamp(watchlist.get('create_timestamp')),
'Last_update_timestamp': convert_unix_to_timestamp(watchlist.get('last_update_timestamp')),
'Report_ids': watchlist.get('report_ids'),
'Classifier': watchlist.get('classifier')
})
readable_output = tableToMarkdown('Carbon Black Enterprise EDR Watchlists ', contents, headers, removeNull=True)
results = CommandResults(
outputs_prefix='CarbonBlackEEDR.Watchlist',
outputs_key_field='id',
outputs=contents,
readable_output=readable_output,
raw_response=result
)
return results
|
39,568 | def command_line_test_helper(remote_path, branch, pusher_path):
work_dir = "/".join(os.path.dirname(os.path.abspath(__file__)).split("/")[:-1]) + "/nbgitpuller"
try:
cmd = ['python3', 'pull.py', remote_path]
if branch is not None:
cmd += ['--branch_name', branch]
if pusher_path is not None:
cmd += ['--repo_dir', pusher_path]
sp.check_output(
cmd,
cwd=work_dir
).decode()
return True
except Exception:
return False
| def command_line_test_helper(remote_path, branch, pusher_path):
work_dir = "/".join(os.path.dirname(os.path.abspath(__file__)).split("/")[:-1]) + "/nbgitpuller"
try:
cmd = ['python3', 'pull.py', remote_path]
if branch is not None:
cmd += ['--branch-name', branch]
if pusher_path is not None:
cmd += ['--repo_dir', pusher_path]
sp.check_output(
cmd,
cwd=work_dir
).decode()
return True
except Exception:
return False
|
34,175 | def convert_training_data(
data_file: Text, out_file: Text, output_format: Text, language: Text
):
if not os.path.exists(data_file):
print_error(
"Data file '{}' does not exist. Provide a valid data file using the "
"'--data-file' argument.".format(data_file)
)
return
if output_format == "json":
td = training_data.load_data(data_file, language)
output = td.as_json(indent=2)
elif output_format == "md":
td = training_data.load_data(data_file, language)
output = td.as_markdown()
else:
print_error(
"Did not recognize data format. Supported data formats: 'json' and 'md'. "
"Specify the desired data format with '--format'."
)
return
write_to_file(out_file, output)
| def convert_training_data(
data_file: Text, out_file: Text, output_format: Text, language: Text
):
if not os.path.exists(data_file):
print_error(
"Data file '{}' does not exist. Provide a valid data file using the "
"the '--data-file' argument.".format(data_file)
)
return
if output_format == "json":
td = training_data.load_data(data_file, language)
output = td.as_json(indent=2)
elif output_format == "md":
td = training_data.load_data(data_file, language)
output = td.as_markdown()
else:
print_error(
"Did not recognize data format. Supported data formats: 'json' and 'md'. "
"Specify the desired data format with '--format'."
)
return
write_to_file(out_file, output)
|
31,331 | def main():
args = demisto.args()
build_pattern_args = assign_params(
pattern_algorithm=args.get('algorithm', 'raw'),
case_insensitive=args.get('case_insensitive', False)
)
include_pattern = build_pattern(**build_pattern_args, pattern=args['pattern'])
exclude_pattern = None
if args.get('exclude_pattern'):
exclude_pattern = build_pattern(**build_pattern_args, pattern=args['exclude_pattern'])
filter_options = argToList(args.get('filter_options', []))
output_option = args.get('summary', 'basic')
exclude_ids = []
if 'exclude_this_entry' not in filter_options:
exclude_ids.append(demisto.parentEntry()['id'])
ents = []
for ent in iterate_entries(
incident_id=args.get('incident_id'),
query_filter=assign_params(
categories=argToList(args.get('filter_categories')),
tags=argToList(args.get('filter_tags'))
),
entry_filter=EntryFilter(
include_pattern=include_pattern,
exclude_pattern=exclude_pattern,
node_paths=argToList(args.get('node_paths', 'Contents'))
)
):
if ent.entry['ID'] not in exclude_ids:
rent = {'ID': ent.entry['ID']}
if 'verbose' == output_option and ent.match:
rent['Summary'] = ent.match[0][:128]
ents.append(rent)
if 'first_entry' in filter_options:
if 'last_entry' in filter_options:
del ents[1:-1]
else:
ents = ents[:1]
elif 'last_entry' in filter_options:
ents = ents[-1:]
if not ents:
return_outputs('No entries matched')
else:
dry_run = argToBoolean(args.get('dry_run', False))
tags = argToList(args.get('tags', []))
for ent in ents:
ent['Tags'] = ','.join(tags)
if not dry_run:
description = args.get('description', '')
for ent in ents:
entry_id = ent['ID']
res = demisto.executeCommand('markAsEvidence', {
'id': entry_id,
'tags': ent['Tags'],
'description': description
})
if not res or is_error(res[0]):
return_error(f'Failed to mark an entrie as evidence: entryID={entry_id}')
md = f'**Matched entries:** {len(ents)}'
if output_option != 'quiet':
header = assign_params(
ID='Entry ID',
Tags='Tags',
Summary='Summary' if 'verbose' == output_option else None
)
md += '\n' + tblToMd('', ents, headers=header.keys(), headerTransform=lambda h: header.get(h, ''))
return_outputs(md)
| def main():
args = demisto.args()
build_pattern_args = assign_params(
pattern_algorithm=args.get('algorithm', 'raw'),
case_insensitive=argToBoolean(args.get('case_insensitive', False))
)
include_pattern = build_pattern(**build_pattern_args, pattern=args['pattern'])
exclude_pattern = None
if args.get('exclude_pattern'):
exclude_pattern = build_pattern(**build_pattern_args, pattern=args['exclude_pattern'])
filter_options = argToList(args.get('filter_options', []))
output_option = args.get('summary', 'basic')
exclude_ids = []
if 'exclude_this_entry' not in filter_options:
exclude_ids.append(demisto.parentEntry()['id'])
ents = []
for ent in iterate_entries(
incident_id=args.get('incident_id'),
query_filter=assign_params(
categories=argToList(args.get('filter_categories')),
tags=argToList(args.get('filter_tags'))
),
entry_filter=EntryFilter(
include_pattern=include_pattern,
exclude_pattern=exclude_pattern,
node_paths=argToList(args.get('node_paths', 'Contents'))
)
):
if ent.entry['ID'] not in exclude_ids:
rent = {'ID': ent.entry['ID']}
if 'verbose' == output_option and ent.match:
rent['Summary'] = ent.match[0][:128]
ents.append(rent)
if 'first_entry' in filter_options:
if 'last_entry' in filter_options:
del ents[1:-1]
else:
ents = ents[:1]
elif 'last_entry' in filter_options:
ents = ents[-1:]
if not ents:
return_outputs('No entries matched')
else:
dry_run = argToBoolean(args.get('dry_run', False))
tags = argToList(args.get('tags', []))
for ent in ents:
ent['Tags'] = ','.join(tags)
if not dry_run:
description = args.get('description', '')
for ent in ents:
entry_id = ent['ID']
res = demisto.executeCommand('markAsEvidence', {
'id': entry_id,
'tags': ent['Tags'],
'description': description
})
if not res or is_error(res[0]):
return_error(f'Failed to mark an entrie as evidence: entryID={entry_id}')
md = f'**Matched entries:** {len(ents)}'
if output_option != 'quiet':
header = assign_params(
ID='Entry ID',
Tags='Tags',
Summary='Summary' if 'verbose' == output_option else None
)
md += '\n' + tblToMd('', ents, headers=header.keys(), headerTransform=lambda h: header.get(h, ''))
return_outputs(md)
|
11,407 | def construct_iso8601(start=None, end=None, duration=None):
if duration is not None:
duration = _duration_to_iso8601(duration)
iso_str = None
if start is not None:
start = Serializer.serialize_iso(start)
if end is not None:
end = Serializer.serialize_iso(end)
iso_str = start + '/' + end
elif duration is not None:
iso_str = start + '/' + duration
else:
raise ValueError("Start time must be provided aling with duration or end time.")
elif end is not None:
end = Serializer.serialize_iso(end)
iso_str = duration + '/' + end
else:
iso_str = duration
return iso_str
| def construct_iso8601(start=None, end=None, duration=None):
if duration is not None:
duration = _duration_to_iso8601(duration)
iso_str = None
if start is not None:
start = Serializer.serialize_iso(start)
if end is not None:
end = Serializer.serialize_iso(end)
iso_str = start + '/' + end
elif duration is not None:
iso_str = start + '/' + duration
else:
raise ValueError("Start time must be provided aling with duration or end time.")
elif end is not None:
end = Serializer.serialize_iso(end)
iso_str = duration + '/' + end
else:
iso_str = duration
return iso_str
|
35,073 | def read_compute(tensor: te.Tensor, zero_point: int, scale: float, layout: str = None) -> te.Tensor:
"""A tensor expression which represents a read.
Parameters
----------
tensor : te.Tensor
The tensor to read.
zero_point : int
The zero point of the tensor.
scale : float
The scale of the tensor.
layout : Optional[str]
The layout of the tensor, either NHWC or NHCWB16.
Returns
-------
te.Tensor
The tensor having been read.
"""
read_attrs = {
"op": "ethosu_read",
"zero_point": zero_point,
"scale": scale,
}
if layout:
assert layout in {"NHWC", "NHCWB16"}
read_attrs["layout"] = layout
return te.compute(tensor.shape, lambda *i: tensor(*i), name="ethosu_read", attrs=read_attrs)
| def read_compute(tensor: te.Tensor, zero_point: int, scale: float, layout: Optional[str] = None) -> te.Tensor:
"""A tensor expression which represents a read.
Parameters
----------
tensor : te.Tensor
The tensor to read.
zero_point : int
The zero point of the tensor.
scale : float
The scale of the tensor.
layout : Optional[str]
The layout of the tensor, either NHWC or NHCWB16.
Returns
-------
te.Tensor
The tensor having been read.
"""
read_attrs = {
"op": "ethosu_read",
"zero_point": zero_point,
"scale": scale,
}
if layout:
assert layout in {"NHWC", "NHCWB16"}
read_attrs["layout"] = layout
return te.compute(tensor.shape, lambda *i: tensor(*i), name="ethosu_read", attrs=read_attrs)
|
10,304 | def main():
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=False),
username=dict(required=False),
password=dict(required=False, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),
configType=dict(required=True),
protocol=dict(required=True),
serverip=dict(required=True),
rcpath=dict(required=True),
serverusername=dict(required=False),
serverpassword=dict(required=False, no_log=True),),
supports_check_mode=False)
outputfile = module.params['outputfile']
protocol = module.params['protocol'].lower()
output = ''
if(protocol == "tftp" or protocol == "ftp" or
protocol == "sftp" or protocol == "scp"):
transfer_status = doConfigBackUp(module, None, None)
else:
transfer_status = "Invalid Protocol option"
output = output + "\n Config Back Up status \n" + transfer_status
# Save it into the file
path = outputfile.rsplit('/', 1)
# cnos.debugOutput(path[0])
if not os.path.exists(path[0]):
os.makedirs(path[0])
with open(outputfile, "a") as fh:
fh.write(output)
# Logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="Config file transferred to server")
else:
module.fail_json(msg=errorMsg)
| def main():
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=False),
username=dict(required=False),
password=dict(required=False, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),
configType=dict(required=True),
protocol=dict(required=True),
serverip=dict(required=True),
rcpath=dict(required=True),
serverusername=dict(required=False),
serverpassword=dict(required=False, no_log=True),),
supports_check_mode=False)
outputfile = module.params['outputfile']
protocol = module.params['protocol'].lower()
output = ''
if(protocol == "tftp" or protocol == "ftp" or
protocol == "sftp" or protocol == "scp"):
transfer_status = doConfigBackUp(module, None, None)
else:
transfer_status = "Invalid Protocol option"
output = output + "\n Config Back Up status \n" + transfer_status
# Save it into the file
path = outputfile.rsplit('/', 1)
# cnos.debugOutput(path[0])
if not os.path.exists(path[0]):
os.makedirs(path[0])
with open(outputfile, "a") as file:
fh.write(output)
# Logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="Config file transferred to server")
else:
module.fail_json(msg=errorMsg)
|
6,836 | def getWords(content):
return re.compile('\w+').findall(content) | def get_words(content):
return re.compile('\w+').findall(content) |
5,960 | def load_host_site():
"""trigger reload of site.py - now it will use the standard library instance that will take care of init"""
# the standard library will be the first element starting with the real prefix, not zip, must be present
import os
std_lib = os.path.dirname(os.__file__)
std_lib_suffix = std_lib[len(sys.real_prefix) :] # strip away the real prefix to keep just the suffix
reload(sys.modules["site"]) # noqa
# ensure standard library suffix/site-packages is on the new path
# notably Debian derivatives change site-packages constant to dist-packages, so will not get added
target = os.path.join("{}{}".format(sys.prefix, std_lib_suffix), "site-packages")
if target not in reversed(sys.path): # if wasn't automatically added do it explicitly
sys.path.append(target)
| def load_host_site():
"""trigger reload of site.py - now it will use the standard library instance that will take care of init"""
# the standard library will be the first element starting with the real prefix, not zip, must be present
import os
std_lib = os.path.dirname(os.__file__)
std_lib_suffix = std_lib[len(sys.real_prefix) :] # strip away the real prefix to keep just the suffix
reload(sys.modules["site"]) # noqa: F821
# ensure standard library suffix/site-packages is on the new path
# notably Debian derivatives change site-packages constant to dist-packages, so will not get added
target = os.path.join("{}{}".format(sys.prefix, std_lib_suffix), "site-packages")
if target not in reversed(sys.path): # if wasn't automatically added do it explicitly
sys.path.append(target)
|
4,049 | def do_dictsort(
value: dict, case_sensitive: bool = False, by: str = "key", reverse: bool = False
):
"""Sort a dict and yield (key, value) pairs. Because python dicts are
unsorted you may want to use this function to order them by either
key or value:
.. sourcecode:: jinja
{% for item in mydict|dictsort %}
sort the dict by key, case insensitive
{% for item in mydict|dictsort(reverse=true) %}
sort the dict by key, case insensitive, reverse order
{% for item in mydict|dictsort(true) %}
sort the dict by key, case sensitive
{% for item in mydict|dictsort(false, 'value') %}
sort the dict by value, case insensitive
"""
if by == "key":
pos = 0
elif by == "value":
pos = 1
else:
raise FilterArgumentError('You can only sort by either "key" or "value"')
def sort_func(item):
value = item[pos]
if not case_sensitive:
value = ignore_case(value)
return value
return sorted(value.items(), key=sort_func, reverse=reverse)
| def do_dictsort(
value: dict, case_sensitive: bool = False, by: str = "key", reverse: bool = False
) -> dict:
"""Sort a dict and yield (key, value) pairs. Because python dicts are
unsorted you may want to use this function to order them by either
key or value:
.. sourcecode:: jinja
{% for item in mydict|dictsort %}
sort the dict by key, case insensitive
{% for item in mydict|dictsort(reverse=true) %}
sort the dict by key, case insensitive, reverse order
{% for item in mydict|dictsort(true) %}
sort the dict by key, case sensitive
{% for item in mydict|dictsort(false, 'value') %}
sort the dict by value, case insensitive
"""
if by == "key":
pos = 0
elif by == "value":
pos = 1
else:
raise FilterArgumentError('You can only sort by either "key" or "value"')
def sort_func(item):
value = item[pos]
if not case_sensitive:
value = ignore_case(value)
return value
return sorted(value.items(), key=sort_func, reverse=reverse)
|
30,497 | def errors_handel(res: List) -> Union[None, str]:
error_msg: Union[None, str] = None
if res and isinstance(res, list) and isinstance(res[0].get('Contents'), dict):
if 'data' not in res[0]['Contents']:
error_msg = res[0].get('Contents')
elif res[0]['Contents']['data'] is None:
error_msg = "Incidents not found."
else:
error_msg = f'failed to get incidents from demisto got {res}'
return error_msg
| def errors_handel(res: List) -> Union[None, str]:
error_msg: Union[None, str] = None
if res and isinstance(res, list) and isinstance(res[0].get('Contents'), dict):
if 'data' not in res[0]['Contents']:
error_msg = res[0].get('Contents')
elif res[0]['Contents']['data'] is None:
error_msg = "Incidents not found."
else:
error_msg = f'failed to get incidents from demisto.\nGot: {res}'
return error_msg
|
38,928 | def multivalue_literal_field_for_schema(values: Tuple[Any, ...], field: Field) -> Field:
field = Field(
name=field.name,
type_=Union[tuple(Literal[value] for value in values)],
class_validators=field.class_validators,
model_config=field.model_config,
default=field.default,
required=field.required,
alias=field.alias,
schema=field.schema,
)
return field
| def multivalue_literal_field_for_schema(values: Tuple[Any, ...], field: Field) -> Field:
return Field(
name=field.name,
type_=Union[tuple(Literal[value] for value in values)],
class_validators=field.class_validators,
model_config=field.model_config,
default=field.default,
required=field.required,
alias=field.alias,
schema=field.schema,
)
return field
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.