id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
44,764 | def save_model(lgb_model, path, conda_env=None, mlflow_model=Model()):
"""
Save a LightGBM model to a path on the local file system.
:param lgb_model: LightGBM model to be saved.
:param path: Local path where the model is to be saved.
:param conda_env: Either a dictionary representation of a Conda environment or the path to a
Conda environment yaml file. If provided, this describes the environment
this model should be run in. At minimum, it should specify the dependencies
contained in :func:`get_default_conda_env()`. If ``None``, the default
:func:`get_default_conda_env()` environment is added to the model.
The following is an *example* dictionary representation of a Conda
environment::
{
'name': 'mlflow-env',
'channels': ['defaults'],
'dependencies': [
'python=3.7.0',
'pip': [
'lightgbm==2.3.0'
]
]
}
:param mlflow_model: :py:mod:`mlflow.models.Model` this flavor is being added to.
"""
import lightgbm as lgb
path = os.path.abspath(path)
if os.path.exists(path):
raise MlflowException("Path '{}' already exists".format(path))
model_data_subpath = "model.lgb"
model_data_path = os.path.join(path, model_data_subpath)
os.makedirs(path)
# Save a LightGBM model
lgb_model.save_model(model_data_path)
conda_env_subpath = "conda.yaml"
if conda_env is None:
conda_env = get_default_conda_env()
elif not isinstance(conda_env, dict):
with open(conda_env, "r") as f:
conda_env = yaml.safe_load(f)
with open(os.path.join(path, conda_env_subpath), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
pyfunc.add_to_model(mlflow_model, loader_module="mlflow.lightgbm",
data=model_data_subpath, env=conda_env_subpath)
mlflow_model.add_flavor(FLAVOR_NAME, lgb_version=lgb.__version__, data=model_data_subpath)
mlflow_model.save(os.path.join(path, "MLmodel"))
| def save_model(lgb_model, path, conda_env=None, mlflow_model=Model()):
"""
Save a LightGBM model to a path on the local file system.
:param lgb_model: LightGBM model (an instance of lightgbm.Booster) to be saved.
:param path: Local path where the model is to be saved.
:param conda_env: Either a dictionary representation of a Conda environment or the path to a
Conda environment yaml file. If provided, this describes the environment
this model should be run in. At minimum, it should specify the dependencies
contained in :func:`get_default_conda_env()`. If ``None``, the default
:func:`get_default_conda_env()` environment is added to the model.
The following is an *example* dictionary representation of a Conda
environment::
{
'name': 'mlflow-env',
'channels': ['defaults'],
'dependencies': [
'python=3.7.0',
'pip': [
'lightgbm==2.3.0'
]
]
}
:param mlflow_model: :py:mod:`mlflow.models.Model` this flavor is being added to.
"""
import lightgbm as lgb
path = os.path.abspath(path)
if os.path.exists(path):
raise MlflowException("Path '{}' already exists".format(path))
model_data_subpath = "model.lgb"
model_data_path = os.path.join(path, model_data_subpath)
os.makedirs(path)
# Save a LightGBM model
lgb_model.save_model(model_data_path)
conda_env_subpath = "conda.yaml"
if conda_env is None:
conda_env = get_default_conda_env()
elif not isinstance(conda_env, dict):
with open(conda_env, "r") as f:
conda_env = yaml.safe_load(f)
with open(os.path.join(path, conda_env_subpath), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
pyfunc.add_to_model(mlflow_model, loader_module="mlflow.lightgbm",
data=model_data_subpath, env=conda_env_subpath)
mlflow_model.add_flavor(FLAVOR_NAME, lgb_version=lgb.__version__, data=model_data_subpath)
mlflow_model.save(os.path.join(path, "MLmodel"))
|
40,132 | def apt_remove_packages(*packages: str):
'''
Remove packages from Ubuntu / Debian / Mint / Kali systems.
:param packages: Iterable containing packages to remove.
'''
log_current_packages(packages, install=False)
return _run_shell_command_raise_on_return_code(f"sudo apt-get remove -y {' '.join(packages)}", f"Error in removal of package(s) {' '.join(packages)}", True)
| def apt_remove_packages(*packages: str):
'''
Remove packages from Ubuntu / Debian / Mint / Kali systems.
:param packages: Iterable containing packages to remove.
'''
return _run_shell_command_raise_on_return_code(f'sudo apt-get remove -y {" ".join(packages)}', f'Error in removal of package(s) {" ".join(packages)}', True)
return _run_shell_command_raise_on_return_code(f"sudo apt-get remove -y {' '.join(packages)}", f"Error in removal of package(s) {' '.join(packages)}", True)
|
58,760 | def schedule_deformable_conv2d_nhwc(outs):
"""Schedule for deformable_conv2d_nhwc
Parameters
----------
outs: Array of Tensor
The computation graph description of deformable_conv2d_nhwc
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
| def schedule_deformable_conv2d_nhwc(outs):
"""Schedule for deformable_conv2d_nhwc.
We only use the default schedule here and rely on auto_scheduler.
Parameters
----------
outs: Array of Tensor
The computation graph description of deformable_conv2d_nhwc
in the format of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
|
275 | def sample(draws=500, step=None, init='auto', n_init=200000, start=None, trace=None, chain_idx=0,
chains=None, cores=None, tune=500, progressbar=True,
model=None, random_seed=None, discard_tuned_samples=True,
compute_convergence_checks=True, **kwargs):
"""Draw samples from the posterior using the given step methods.
Multiple step methods are supported via compound step methods.
Parameters
----------
draws : int
The number of samples to draw. Defaults to 500. The number of tuned samples are discarded
by default. See `discard_tuned_samples`.
step : function or iterable of functions
A step function or collection of functions. If there are variables without a step methods,
step methods for those variables will be assigned automatically.
init : str
Initialization method to use for auto-assigned NUTS samplers.
* auto : Choose a default initialization method automatically.
Currently, this is `'jitter+adapt_diag'`, but this can change in the future.
If you depend on the exact behaviour, choose an initialization method explicitly.
* adapt_diag : Start with a identity mass matrix and then adapt a diagonal based on the
variance of the tuning samples. All chains use the test value (usually the prior mean)
as starting point.
* jitter+adapt_diag : Same as `adapt_diag`\, but add uniform jitter in [-1, 1] to the
starting point in each chain.
* advi+adapt_diag : Run ADVI and then adapt the resulting diagonal mass matrix based on the
sample variance of the tuning samples.
* advi+adapt_diag_grad : Run ADVI and then adapt the resulting diagonal mass matrix based
on the variance of the gradients during tuning. This is **experimental** and might be
removed in a future release.
* advi : Run ADVI to estimate posterior mean and diagonal mass matrix.
* advi_map: Initialize ADVI with MAP and use MAP as starting point.
* map : Use the MAP as starting point. This is discouraged.
* nuts : Run NUTS and estimate posterior mean and mass matrix from the trace.
n_init : int
Number of iterations of initializer. Only works for 'nuts' and 'ADVI'.
If 'ADVI', number of iterations, if 'nuts', number of draws.
start : dict, or array of dict
Starting point in parameter space (or partial point)
Defaults to `trace.point(-1))` if there is a trace provided and model.test_point if not
(defaults to empty dict). Initialization methods for NUTS (see `init` keyword) can
overwrite the default. For 'SMC' step method, `start` should be a list of dicts
of length = `chains`.
trace : backend, list, or MultiTrace
This should be a backend instance, a list of variables to track, or a MultiTrace object
with past values. If a MultiTrace object is given, it must contain samples for the chain
number `chain`. If None or a list of variables, the NDArray backend is used.
Passing either "text" or "sqlite" is taken as a shortcut to set up the corresponding
backend (with "mcmc" used as the base name). Ignored when using 'SMC' as step method.
chain_idx : int
Chain number used to store sample in backend. If `chains` is greater than one, chain
numbers will start here. Ignored when using 'SMC' as step method.
chains : int
The number of chains to sample. Running independent chains is important for some
convergence statistics and can also reveal multiple modes in the posterior. If `None`,
then set to either `cores` or 2, whichever is larger. For SMC the number of chains is the
number of draws.
cores : int
The number of chains to run in parallel. If `None`, set to the number of CPUs in the
system, but at most 4. When using 'SMC', this parameter will be ignored if running with
`pm.SMC(parallel=False)`. Keep in mind that
some chains might themselves be multithreaded via openmp or BLAS. In those cases it might
be faster to set this to 1.
tune : int
Number of iterations to tune, defaults to 500. Ignored when using 'SMC'. Samplers adjust
the step sizes, scalings or similar during tuning. Tuning samples will be drawn in addition
to the number specified in the `draws` argument, and will be discarded unless
`discard_tuned_samples` is set to False.
progressbar : bool
Whether or not to display a progress bar in the command line. The bar shows the percentage
of completion, the sampling speed in samples per second (SPS), and the estimated remaining
time until completion ("expected time of arrival"; ETA).
model : Model (optional if in `with` context)
random_seed : int or list of ints
A list is accepted if `cores` is greater than one.
discard_tuned_samples : bool
Whether to discard posterior samples of the tune interval. Ignored when using 'SMC'
compute_convergence_checks : bool, default=True
Whether to compute sampler statistics like Gelman-Rubin and `effective_n`.
Ignored when using 'SMC'
Returns
-------
trace : pymc3.backends.base.MultiTrace
A `MultiTrace` object that contains the samples.
Notes
-----
Optional keyword arguments can be passed to `sample` to be delivered to the
`step_method`s used during sampling. In particular, the NUTS step method accepts
a number of arguments. Common options are:
* target_accept: float in [0, 1]. The step size is tuned such that we approximate this
acceptance rate. Higher values like 0.9 or 0.95 often work better for problematic
posteriors.
* max_treedepth: The maximum depth of the trajectory tree.
* step_scale: float, default 0.25
The initial guess for the step size scaled down by :math:`1/n**(1/4)`
You can find a full list of arguments in the docstring of the step methods.
Examples
--------
.. code:: ipython
>>> import pymc3 as pm
... n = 100
... h = 61
... alpha = 2
... beta = 2
.. code:: ipython
>>> with pm.Model() as model: # context management
... p = pm.Beta('p', alpha=alpha, beta=beta)
... y = pm.Binomial('y', n=n, p=p, observed=h)
... trace = pm.sample(2000, tune=1000, cores=4)
>>> pm.summary(trace)
mean sd mc_error hpd_2.5 hpd_97.5
p 0.604625 0.047086 0.00078 0.510498 0.694774
"""
model = modelcontext(model)
nuts_kwargs = kwargs.pop('nuts_kwargs', None)
if nuts_kwargs is not None:
warnings.warn("The nuts_kwargs argument has been deprecated. Pass step "
"method arguments directly to sample instead",
DeprecationWarning)
kwargs.update(nuts_kwargs)
step_kwargs = kwargs.pop('step_kwargs', None)
if step_kwargs is not None:
warnings.warn("The step_kwargs argument has been deprecated. Pass step "
"method arguments directly to sample instead",
DeprecationWarning)
kwargs.update(step_kwargs)
if cores is None:
cores = min(4, _cpu_count())
if isinstance(step, pm.step_methods.smc.SMC):
trace = smc.sample_smc(draws=draws,
step=step,
start=start,
cores=cores,
progressbar=progressbar,
model=model,
random_seed=random_seed)
else:
if 'njobs' in kwargs:
cores = kwargs['njobs']
warnings.warn(
"The njobs argument has been deprecated. Use cores instead.",
DeprecationWarning)
if 'nchains' in kwargs:
chains = kwargs['nchains']
warnings.warn(
"The nchains argument has been deprecated. Use chains instead.",
DeprecationWarning)
if chains is None:
chains = max(2, cores)
if isinstance(start, dict):
start = [start] * chains
if random_seed == -1:
random_seed = None
if chains == 1 and isinstance(random_seed, int):
random_seed = [random_seed]
if random_seed is None or isinstance(random_seed, int):
if random_seed is not None:
np.random.seed(random_seed)
random_seed = [np.random.randint(2 ** 30) for _ in range(chains)]
if not isinstance(random_seed, Iterable):
raise TypeError(
'Invalid value for `random_seed`. Must be tuple, list or int')
if 'chain' in kwargs:
chain_idx = kwargs['chain']
warnings.warn(
"The chain argument has been deprecated. Use chain_idx instead.",
DeprecationWarning)
if start is not None:
for start_vals in start:
_check_start_shape(model, start_vals)
# small trace warning
if draws == 0:
msg = "Tuning was enabled throughout the whole trace."
_log.warning(msg)
elif draws < 500:
msg = "Only %s samples in chain." % draws
_log.warning(msg)
draws += tune
if model.ndim == 0:
raise ValueError('The model does not contain any free variables.')
if step is None and init is not None and all_continuous(model.vars):
try:
# By default, try to use NUTS
_log.info('Auto-assigning NUTS sampler...')
start_, step = init_nuts(init=init, chains=chains, n_init=n_init,
model=model, random_seed=random_seed,
progressbar=progressbar, **kwargs)
if start is None:
start = start_
except (AttributeError, NotImplementedError, tg.NullTypeGradError):
# gradient computation failed
_log.info("Initializing NUTS failed. "
"Falling back to elementwise auto-assignment.")
_log.debug('Exception in init nuts', exec_info=True)
step = assign_step_methods(model, step, step_kwargs=kwargs)
else:
step = assign_step_methods(model, step, step_kwargs=kwargs)
if isinstance(step, list):
step = CompoundStep(step)
if start is None:
start = {}
if isinstance(start, dict):
start = [start] * chains
sample_args = {'draws': draws,
'step': step,
'start': start,
'trace': trace,
'chain': chain_idx,
'chains': chains,
'tune': tune,
'progressbar': progressbar,
'model': model,
'random_seed': random_seed,
'cores': cores, }
sample_args.update(kwargs)
has_population_samplers = np.any([isinstance(m, arraystep.PopulationArrayStepShared)
for m in (step.methods if isinstance(step, CompoundStep) else [step])])
parallel = cores > 1 and chains > 1 and not has_population_samplers
if parallel:
_log.info('Multiprocess sampling ({} chains in {} jobs)'.format(chains, cores))
_print_step_hierarchy(step)
try:
trace = _mp_sample(**sample_args)
except pickle.PickleError:
_log.warning("Could not pickle model, sampling singlethreaded.")
_log.debug('Pickling error:', exec_info=True)
parallel = False
except AttributeError as e:
if str(e).startswith("AttributeError: Can't pickle"):
_log.warning("Could not pickle model, sampling singlethreaded.")
_log.debug('Pickling error:', exec_info=True)
parallel = False
else:
raise
if not parallel:
if has_population_samplers:
_log.info('Population sampling ({} chains)'.format(chains))
_print_step_hierarchy(step)
trace = _sample_population(**sample_args, parallelize=cores > 1)
else:
_log.info('Sequential sampling ({} chains in 1 job)'.format(chains))
_print_step_hierarchy(step)
trace = _sample_many(**sample_args)
discard = tune if discard_tuned_samples else 0
trace = trace[discard:]
if compute_convergence_checks:
if draws-tune < 100:
warnings.warn("The number of samples is too small to check convergence reliably.")
else:
trace.report._run_convergence_checks(trace, model)
trace.report._log_summary()
return trace
| def sample(draws=500, step=None, init='auto', n_init=200000, start=None, trace=None, chain_idx=0,
chains=None, cores=None, tune=500, progressbar=True,
model=None, random_seed=None, discard_tuned_samples=True,
compute_convergence_checks=True, **kwargs):
"""Draw samples from the posterior using the given step methods.
Multiple step methods are supported via compound step methods.
Parameters
----------
draws : int
The number of samples to draw. Defaults to 500. The number of tuned samples are discarded
by default. See `discard_tuned_samples`.
step : function or iterable of functions
A step function or collection of functions. If there are variables without a step methods,
step methods for those variables will be assigned automatically.
init : str
Initialization method to use for auto-assigned NUTS samplers.
* auto : Choose a default initialization method automatically.
Currently, this is `'jitter+adapt_diag'`, but this can change in the future.
If you depend on the exact behaviour, choose an initialization method explicitly.
* adapt_diag : Start with a identity mass matrix and then adapt a diagonal based on the
variance of the tuning samples. All chains use the test value (usually the prior mean)
as starting point.
* jitter+adapt_diag : Same as `adapt_diag`\, but add uniform jitter in [-1, 1] to the
starting point in each chain.
* advi+adapt_diag : Run ADVI and then adapt the resulting diagonal mass matrix based on the
sample variance of the tuning samples.
* advi+adapt_diag_grad : Run ADVI and then adapt the resulting diagonal mass matrix based
on the variance of the gradients during tuning. This is **experimental** and might be
removed in a future release.
* advi : Run ADVI to estimate posterior mean and diagonal mass matrix.
* advi_map: Initialize ADVI with MAP and use MAP as starting point.
* map : Use the MAP as starting point. This is discouraged.
* nuts : Run NUTS and estimate posterior mean and mass matrix from the trace.
n_init : int
Number of iterations of initializer. Only works for 'nuts' and 'ADVI'.
If 'ADVI', number of iterations, if 'nuts', number of draws.
start : dict, or array of dict
Starting point in parameter space (or partial point)
Defaults to ``trace.point(-1))`` if there is a trace provided and ``model.test_point`` if not
(defaults to empty dict). Initialization methods for NUTS (see `init` keyword) can
overwrite the default. For 'SMC' step method, `start` should be a list of dicts
of length = `chains`.
trace : backend, list, or MultiTrace
This should be a backend instance, a list of variables to track, or a MultiTrace object
with past values. If a MultiTrace object is given, it must contain samples for the chain
number `chain`. If None or a list of variables, the NDArray backend is used.
Passing either "text" or "sqlite" is taken as a shortcut to set up the corresponding
backend (with "mcmc" used as the base name). Ignored when using 'SMC' as step method.
chain_idx : int
Chain number used to store sample in backend. If `chains` is greater than one, chain
numbers will start here. Ignored when using 'SMC' as step method.
chains : int
The number of chains to sample. Running independent chains is important for some
convergence statistics and can also reveal multiple modes in the posterior. If `None`,
then set to either `cores` or 2, whichever is larger. For SMC the number of chains is the
number of draws.
cores : int
The number of chains to run in parallel. If `None`, set to the number of CPUs in the
system, but at most 4. When using 'SMC', this parameter will be ignored if running with
`pm.SMC(parallel=False)`. Keep in mind that
some chains might themselves be multithreaded via openmp or BLAS. In those cases it might
be faster to set this to 1.
tune : int
Number of iterations to tune, defaults to 500. Ignored when using 'SMC'. Samplers adjust
the step sizes, scalings or similar during tuning. Tuning samples will be drawn in addition
to the number specified in the `draws` argument, and will be discarded unless
`discard_tuned_samples` is set to False.
progressbar : bool
Whether or not to display a progress bar in the command line. The bar shows the percentage
of completion, the sampling speed in samples per second (SPS), and the estimated remaining
time until completion ("expected time of arrival"; ETA).
model : Model (optional if in `with` context)
random_seed : int or list of ints
A list is accepted if `cores` is greater than one.
discard_tuned_samples : bool
Whether to discard posterior samples of the tune interval. Ignored when using 'SMC'
compute_convergence_checks : bool, default=True
Whether to compute sampler statistics like Gelman-Rubin and `effective_n`.
Ignored when using 'SMC'
Returns
-------
trace : pymc3.backends.base.MultiTrace
A `MultiTrace` object that contains the samples.
Notes
-----
Optional keyword arguments can be passed to `sample` to be delivered to the
`step_method`s used during sampling. In particular, the NUTS step method accepts
a number of arguments. Common options are:
* target_accept: float in [0, 1]. The step size is tuned such that we approximate this
acceptance rate. Higher values like 0.9 or 0.95 often work better for problematic
posteriors.
* max_treedepth: The maximum depth of the trajectory tree.
* step_scale: float, default 0.25
The initial guess for the step size scaled down by :math:`1/n**(1/4)`
You can find a full list of arguments in the docstring of the step methods.
Examples
--------
.. code:: ipython
>>> import pymc3 as pm
... n = 100
... h = 61
... alpha = 2
... beta = 2
.. code:: ipython
>>> with pm.Model() as model: # context management
... p = pm.Beta('p', alpha=alpha, beta=beta)
... y = pm.Binomial('y', n=n, p=p, observed=h)
... trace = pm.sample(2000, tune=1000, cores=4)
>>> pm.summary(trace)
mean sd mc_error hpd_2.5 hpd_97.5
p 0.604625 0.047086 0.00078 0.510498 0.694774
"""
model = modelcontext(model)
nuts_kwargs = kwargs.pop('nuts_kwargs', None)
if nuts_kwargs is not None:
warnings.warn("The nuts_kwargs argument has been deprecated. Pass step "
"method arguments directly to sample instead",
DeprecationWarning)
kwargs.update(nuts_kwargs)
step_kwargs = kwargs.pop('step_kwargs', None)
if step_kwargs is not None:
warnings.warn("The step_kwargs argument has been deprecated. Pass step "
"method arguments directly to sample instead",
DeprecationWarning)
kwargs.update(step_kwargs)
if cores is None:
cores = min(4, _cpu_count())
if isinstance(step, pm.step_methods.smc.SMC):
trace = smc.sample_smc(draws=draws,
step=step,
start=start,
cores=cores,
progressbar=progressbar,
model=model,
random_seed=random_seed)
else:
if 'njobs' in kwargs:
cores = kwargs['njobs']
warnings.warn(
"The njobs argument has been deprecated. Use cores instead.",
DeprecationWarning)
if 'nchains' in kwargs:
chains = kwargs['nchains']
warnings.warn(
"The nchains argument has been deprecated. Use chains instead.",
DeprecationWarning)
if chains is None:
chains = max(2, cores)
if isinstance(start, dict):
start = [start] * chains
if random_seed == -1:
random_seed = None
if chains == 1 and isinstance(random_seed, int):
random_seed = [random_seed]
if random_seed is None or isinstance(random_seed, int):
if random_seed is not None:
np.random.seed(random_seed)
random_seed = [np.random.randint(2 ** 30) for _ in range(chains)]
if not isinstance(random_seed, Iterable):
raise TypeError(
'Invalid value for `random_seed`. Must be tuple, list or int')
if 'chain' in kwargs:
chain_idx = kwargs['chain']
warnings.warn(
"The chain argument has been deprecated. Use chain_idx instead.",
DeprecationWarning)
if start is not None:
for start_vals in start:
_check_start_shape(model, start_vals)
# small trace warning
if draws == 0:
msg = "Tuning was enabled throughout the whole trace."
_log.warning(msg)
elif draws < 500:
msg = "Only %s samples in chain." % draws
_log.warning(msg)
draws += tune
if model.ndim == 0:
raise ValueError('The model does not contain any free variables.')
if step is None and init is not None and all_continuous(model.vars):
try:
# By default, try to use NUTS
_log.info('Auto-assigning NUTS sampler...')
start_, step = init_nuts(init=init, chains=chains, n_init=n_init,
model=model, random_seed=random_seed,
progressbar=progressbar, **kwargs)
if start is None:
start = start_
except (AttributeError, NotImplementedError, tg.NullTypeGradError):
# gradient computation failed
_log.info("Initializing NUTS failed. "
"Falling back to elementwise auto-assignment.")
_log.debug('Exception in init nuts', exec_info=True)
step = assign_step_methods(model, step, step_kwargs=kwargs)
else:
step = assign_step_methods(model, step, step_kwargs=kwargs)
if isinstance(step, list):
step = CompoundStep(step)
if start is None:
start = {}
if isinstance(start, dict):
start = [start] * chains
sample_args = {'draws': draws,
'step': step,
'start': start,
'trace': trace,
'chain': chain_idx,
'chains': chains,
'tune': tune,
'progressbar': progressbar,
'model': model,
'random_seed': random_seed,
'cores': cores, }
sample_args.update(kwargs)
has_population_samplers = np.any([isinstance(m, arraystep.PopulationArrayStepShared)
for m in (step.methods if isinstance(step, CompoundStep) else [step])])
parallel = cores > 1 and chains > 1 and not has_population_samplers
if parallel:
_log.info('Multiprocess sampling ({} chains in {} jobs)'.format(chains, cores))
_print_step_hierarchy(step)
try:
trace = _mp_sample(**sample_args)
except pickle.PickleError:
_log.warning("Could not pickle model, sampling singlethreaded.")
_log.debug('Pickling error:', exec_info=True)
parallel = False
except AttributeError as e:
if str(e).startswith("AttributeError: Can't pickle"):
_log.warning("Could not pickle model, sampling singlethreaded.")
_log.debug('Pickling error:', exec_info=True)
parallel = False
else:
raise
if not parallel:
if has_population_samplers:
_log.info('Population sampling ({} chains)'.format(chains))
_print_step_hierarchy(step)
trace = _sample_population(**sample_args, parallelize=cores > 1)
else:
_log.info('Sequential sampling ({} chains in 1 job)'.format(chains))
_print_step_hierarchy(step)
trace = _sample_many(**sample_args)
discard = tune if discard_tuned_samples else 0
trace = trace[discard:]
if compute_convergence_checks:
if draws-tune < 100:
warnings.warn("The number of samples is too small to check convergence reliably.")
else:
trace.report._run_convergence_checks(trace, model)
trace.report._log_summary()
return trace
|
25,878 | def load_arguments(self, _):
StorageAccountTypes, UpgradeMode, CachingTypes = self.get_models('StorageAccountTypes', 'UpgradeMode', 'CachingTypes')
OperatingSystemTypes = self.get_models('OperatingSystemTypes')
# REUSABLE ARGUMENT DEFINITIONS
name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME')
multi_ids_type = CLIArgumentType(nargs='+')
existing_vm_name = CLIArgumentType(overrides=name_arg_type,
configured_default='vm',
help="The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=<name>`",
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'), id_part='name')
existing_disk_name = CLIArgumentType(overrides=name_arg_type, help='The name of the managed disk', completer=get_resource_name_completion_list('Microsoft.Compute/disks'), id_part='name')
existing_snapshot_name = CLIArgumentType(overrides=name_arg_type, help='The name of the snapshot', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'), id_part='name')
vmss_name_type = CLIArgumentType(name_arg_type,
configured_default='vmss',
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'),
help="Scale set name. You can configure the default using `az configure --defaults vmss=<name>`",
id_part='name')
extension_instance_name_type = CLIArgumentType(help="Name of the vm's instance of the extension. Default: name of the extension.")
if StorageAccountTypes:
disk_sku = CLIArgumentType(arg_type=get_enum_type(StorageAccountTypes))
else:
# StorageAccountTypes introduced in api version 2016_04_30_preview of Resource.MGMT.Compute package..
# However, 2017-03-09-profile targets version 2016-03-30 of compute package.
disk_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
# special case for `network nic scale-set list` command alias
with self.argument_context('network nic scale-set list') as c:
c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
# region MixedScopes
for scope in ['vm', 'disk', 'snapshot', 'image', 'sig']:
with self.argument_context(scope) as c:
c.argument('tags', tags_type)
for scope in ['disk', 'snapshot']:
with self.argument_context(scope) as c:
c.ignore('source_blob_uri', 'source_disk', 'source_snapshot')
c.argument('source_storage_account_id', help='used when source blob is in a different subscription')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('duration_in_seconds', help='Time duration in seconds until the SAS access expires', type=int)
for scope in ['disk create', 'snapshot create']:
with self.argument_context(scope) as c:
c.argument('source', help='source to create the disk/snapshot from, including unmanaged blob uri, managed disk id or name, or snapshot id or name')
# endregion
# region Disks
with self.argument_context('disk') as c:
c.argument('zone', zone_type, min_api='2017-03-30', options_list=['--zone']) # TODO: --size-gb currently has claimed -z. We can do a breaking change later if we want to.
c.argument('disk_name', existing_disk_name, completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('name', arg_type=name_arg_type)
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
c.argument('os_type', arg_type=get_enum_type(OperatingSystemTypes), help='The Operating System type of the Disk.')
c.argument('disk_iops_read_write', type=int, min_api='2018-06-01', help='The number of IOPS allowed for this disk. Only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes')
c.argument('disk_mbps_read_write', type=int, min_api='2018-06-01', help="The bandwidth allowed for this disk. Only settable for UltraSSD disks. MBps means millions of bytes per second with ISO notation of powers of 10")
# endregion
# region Snapshots
with self.argument_context('snapshot', resource_type=ResourceType.MGMT_COMPUTE, operation_group='snapshots') as c:
c.argument('snapshot_name', existing_snapshot_name, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'))
c.argument('name', arg_type=name_arg_type)
if self.supported_api_version(min_api='2018-04-01', operation_group='snapshots'):
c.argument('sku', arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS', 'Standard_ZRS']))
else:
c.argument('sku', arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
# endregion
# region Images
with self.argument_context('image') as c:
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']))
c.argument('image_name', arg_type=name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/images'))
c.argument('tags', tags_type)
with self.argument_context('image create') as c:
# here we collpase all difference image sources to under 2 common arguments --os-disk-source --data-disk-sources
c.argument('name', arg_type=name_arg_type, help='new image name')
c.argument('source', help='OS disk source from the same region, including a virtual machine ID or name, OS disk blob URI, managed OS disk ID or name, or OS snapshot ID or name')
c.argument('data_disk_sources', nargs='+', help='Space-separated list of data disk sources, including unmanaged blob URI, managed disk ID or name, or snapshot ID or name')
c.argument('zone_resilient', min_api='2017-12-01', arg_type=get_three_state_flag(), help='Specifies whether an image is zone resilient or not. '
'Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage')
c.argument('storage_sku', arg_type=disk_sku, help='The SKU of the storage account with which to create the VM image. Unused if source VM is specified.')
c.argument('os_disk_caching', arg_type=get_enum_type(CachingTypes), help="Storage caching type for the image's OS disk.")
c.ignore('source_virtual_machine', 'os_blob_uri', 'os_disk', 'os_snapshot', 'data_blob_uris', 'data_disks', 'data_snapshots')
# endregion
# region AvailabilitySets
with self.argument_context('vm availability-set') as c:
c.argument('availability_set_name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
with self.argument_context('vm availability-set create') as c:
c.argument('availability_set_name', name_arg_type, validator=get_default_location_from_resource_group, help='Name of the availability set')
c.argument('platform_update_domain_count', type=int, help='Update Domain count. If unspecified, the server will pick the most optimal number like 5.')
c.argument('platform_fault_domain_count', type=int, help='Fault Domain count.')
c.argument('validate', help='Generate and validate the ARM template without creating any resources.', action='store_true')
c.argument('unmanaged', action='store_true', min_api='2016-04-30-preview', help='contained VMs should use unmanaged disks')
with self.argument_context('vm availability-set update') as c:
if self.supported_api_version(max_api='2016-04-30-preview', operation_group='virtual_machines'):
c.argument('name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
c.argument('availability_set_name', options_list=['--availability-set-name'])
# endregion
# region VirtualMachines
with self.argument_context('vm') as c:
c.argument('vm_name', existing_vm_name)
c.argument('size', completer=get_vm_size_completion_list)
c.argument('name', arg_type=name_arg_type)
c.argument('zone', zone_type, min_api='2017-03-30')
c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none.', arg_group='Network')
c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux.', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH']))
c.argument('application_security_groups', min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network')
with self.argument_context('vm capture') as c:
c.argument('overwrite', action='store_true')
with self.argument_context('vm update') as c:
c.argument('os_disk', min_api='2017-12-01', help="Managed OS disk ID or name to swap to")
c.argument('write_accelerator', nargs='*', min_api='2017-12-01',
help="enable/disable disk write accelerator. Use singular value 'true/false' to apply across, or specify individual disks, e.g.'os=true 1=true 2=true' for os disk and data disks with lun of 1 & 2")
c.argument('disk_caching', nargs='*', help="Use singular value to apply across, or specify individual disks, e.g. 'os=ReadWrite 0=None 1=ReadOnly' should enable update os disk and 2 data disks")
with self.argument_context('vm create') as c:
c.argument('name', name_arg_type, validator=_resource_not_exists(self.cli_ctx, 'Microsoft.Compute/virtualMachines'))
c.argument('vm_name', name_arg_type, id_part=None, help='Name of the virtual machine.', completer=None)
c.argument('os_disk_size_gb', type=int, help='the size of the os disk in GB', arg_group='Storage')
c.argument('attach_os_disk', help='Attach an existing OS disk to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')
c.argument('attach_data_disks', nargs='+', help='Attach existing data disks to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')
c.argument('availability_set', help='Name or ID of an existing availability set to add the VM to. None by default.')
c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none.', arg_group='Network')
c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux.', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH']))
c.argument('application_security_groups', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
c.argument('boot_diagnostics_storage',
help='pre-existing storage account name or its blob uri to capture boot diagnostics. Its sku should be one of Standard_GRS, Standard_LRS and Standard_RAGRS')
c.argument('accelerated_networking', resource_type=ResourceType.MGMT_NETWORK, min_api='2016-09-01', arg_type=get_three_state_flag(), arg_group='Network',
help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size")
with self.argument_context('vm open-port') as c:
c.argument('vm_name', name_arg_type, help='The name of the virtual machine to open inbound traffic on.')
c.argument('network_security_group_name', options_list=('--nsg-name',), help='The name of the network security group to create if one does not exist. Ignored if an NSG already exists.', validator=validate_nsg_name)
c.argument('apply_to_subnet', help='Allow inbound traffic on the subnet instead of the NIC', action='store_true')
c.argument('port', help="The port or port range (ex: 80-100) to open inbound traffic to. Use '*' to allow traffic to all ports.")
c.argument('priority', help='Rule priority, between 100 (highest priority) and 4096 (lowest priority). Must be unique for each rule in the collection.', type=int)
for scope in ['vm show', 'vm list']:
with self.argument_context(scope) as c:
c.argument('show_details', action='store_true', options_list=['--show-details', '-d'], help='show public ip address, FQDN, and power states. command will run slow')
with self.argument_context('vm diagnostics') as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'])
with self.argument_context('vm diagnostics set') as c:
c.argument('storage_account', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'))
with self.argument_context('vm disk') as c:
c.argument('vm_name', options_list=['--vm-name'], id_part=None, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'))
c.argument('new', action='store_true', help='create a new disk')
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Managed Disks Max size: 32767 GB, Unmanaged Disks Max size: 4095 GB', type=int)
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')
with self.argument_context('vm disk attach') as c:
c.argument('enable_write_accelerator', min_api='2017-12-01', action='store_true', help='enable write accelerator')
c.argument('disk', options_list=['--name', '-n', c.deprecate(target='--disk', redirect='--name', hide=True)],
help="The name or ID of the managed disk", validator=validate_vm_disk, id_part='name',
completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
with self.argument_context('vm disk detach') as c:
c.argument('disk_name', arg_type=name_arg_type, help='The data disk name.')
with self.argument_context('vm encryption enable') as c:
c.argument('encrypt_format_all', action='store_true', help='Encrypts-formats data disks instead of encrypting them. Encrypt-formatting is a lot faster than in-place encryption but wipes out the partition getting encrypt-formatted.')
# Place aad arguments in their own group
aad_arguments = 'Azure Active Directory'
c.argument('aad_client_id', arg_group=aad_arguments)
c.argument('aad_client_secret', arg_group=aad_arguments)
c.argument('aad_client_cert_thumbprint', arg_group=aad_arguments)
with self.argument_context('vm extension') as c:
c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part='child_name_1')
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part='name')
c.argument('expand', deprecate_info=c.deprecate(expiration='2.1.0', hide=True))
with self.argument_context('vm extension list') as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)
with self.argument_context('vm secret') as c:
c.argument('secrets', multi_ids_type, options_list=['--secrets', '-s'], help='Space-separated list of key vault secret URIs. Perhaps, produced by \'az keyvault secret list-versions --vault-name vaultname -n cert1 --query "[?attributes.enabled].id" -o tsv\'')
c.argument('keyvault', help='Name or ID of the key vault.', validator=validate_keyvault)
c.argument('certificate', help='key vault certificate name or its full secret URL')
c.argument('certificate_store', help='Windows certificate store names. Default: My')
with self.argument_context('vm secret list') as c:
c.argument('vm_name', arg_type=existing_vm_name, id_part=None)
with self.argument_context('vm image') as c:
c.argument('publisher_name', options_list=['--publisher', '-p'])
c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
c.argument('offer', options_list=['--offer', '-f'], help='image offer')
c.argument('plan', help='image billing plan')
c.argument('sku', options_list=['--sku', '-s'], help='image sku')
c.argument('version', help="image sku's version")
c.argument('urn', help="URN, in format of 'publisher:offer:sku:version'. If specified, other argument values can be omitted")
with self.argument_context('vm image list') as c:
c.argument('image_location', get_location_type(self.cli_ctx))
with self.argument_context('vm image show') as c:
c.argument('skus', options_list=['--sku', '-s'])
with self.argument_context('vm nic') as c:
c.argument('vm_name', existing_vm_name, options_list=['--vm-name'], id_part=None)
c.argument('nics', nargs='+', help='Names or IDs of NICs.', validator=validate_vm_nics)
c.argument('primary_nic', help='Name or ID of the primary NIC. If missing, the first NIC in the list will be the primary.')
with self.argument_context('vm nic show') as c:
c.argument('nic', help='NIC name or ID.', validator=validate_vm_nic)
with self.argument_context('vm unmanaged-disk') as c:
c.argument('new', action='store_true', help='Create a new disk.')
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')
c.argument('vhd_uri', help="Virtual hard disk URI. For example: https://mystorage.blob.core.windows.net/vhds/d1.vhd")
with self.argument_context('vm unmanaged-disk attach') as c:
c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
with self.argument_context('vm unmanaged-disk detach') as c:
c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')
for scope in ['vm unmanaged-disk attach', 'vm unmanaged-disk detach']:
with self.argument_context(scope) as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)
with self.argument_context('vm unmanaged-disk list') as c:
c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None)
with self.argument_context('vm user') as c:
c.argument('username', options_list=['--username', '-u'], help='The user name')
c.argument('password', options_list=['--password', '-p'], help='The user password')
with self.argument_context('vm list-skus') as c:
c.argument('size', options_list=['--size', '-s'], help="size name, partial name is accepted")
c.argument('zone', options_list=['--zone', '-z'], arg_type=get_three_state_flag(), help="show all vm size supporting availability zones")
c.argument('show_all', options_list=['--all'], arg_type=get_three_state_flag(),
help="show all information including vm sizes not available under the current subscription")
c.argument('resource_type', options_list=['--resource-type', '-r'], help='resource types e.g. "availabilitySets", "snapshots", "disk", etc')
with self.argument_context('vm restart') as c:
c.argument('force', action='store_true', help='Force the VM to restart by redeploying it. Use if the VM is unresponsive.')
# endregion
# region VMSS
scaleset_name_aliases = ['vm_scale_set_name', 'virtual_machine_scale_set_name', 'name']
with self.argument_context('vmss') as c:
c.argument('zones', zones_type, min_api='2017-03-30')
c.argument('instance_id', id_part='child_name_1')
c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances. If not provided, the action will be applied on the scaleset itself')
c.argument('tags', tags_type)
c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
for dest in scaleset_name_aliases:
c.argument(dest, vmss_name_type)
for scope in ['vmss deallocate', 'vmss delete-instances', 'vmss restart', 'vmss start', 'vmss stop', 'vmss show', 'vmss update-instances']:
with self.argument_context(scope) as c:
for dest in scaleset_name_aliases:
c.argument(dest, vmss_name_type, id_part=None) # due to instance-ids parameter
with self.argument_context('vmss create') as c:
VMPriorityTypes = self.get_models('VirtualMachinePriorityTypes', resource_type=ResourceType.MGMT_COMPUTE)
VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE)
c.argument('name', name_arg_type)
c.argument('nat_backend_port', default=None, help='Backend port to open with NAT rules. Defaults to 22 on Linux and 3389 on Windows.')
c.argument('single_placement_group', arg_type=get_three_state_flag(), help="Enable replicate using fault domains within the same cluster. Default to 'false' for any zonals, or with 100+ instances"
" See https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups for details")
c.argument('platform_fault_domain_count', type=int, help='Fault Domain count for each placement group in the availability zone', min_api='2017-12-01')
c.argument('vmss_name', name_arg_type, id_part=None, help='Name of the virtual machine scale set.')
c.argument('instance_count', help='Number of VMs in the scale set.', type=int)
c.argument('disable_overprovision', help='Overprovision option (see https://azure.microsoft.com/en-us/documentation/articles/virtual-machine-scale-sets-overview/ for details).', action='store_true')
c.argument('upgrade_policy_mode', help=None, arg_type=get_enum_type(UpgradeMode))
c.argument('health_probe', help='(Preview) probe name from the existing load balancer, mainly used for rolling upgrade')
c.argument('vm_sku', help='Size of VMs in the scale set. Default to "Standard_DS1_v2". See https://azure.microsoft.com/en-us/pricing/details/virtual-machines/ for size info.')
c.argument('nsg', help='Name or ID of an existing Network Security Group.', arg_group='Network')
c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(VMPriorityTypes, default=None),
help="Priority. Use 'Low' to run short-lived workloads in a cost-effective way")
c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None),
help="(PREVIEW) The eviction policy for virtual machines in a low priority scale set.")
c.argument('application_security_groups', resource_type=ResourceType.MGMT_COMPUTE, min_api='2018-06-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
with self.argument_context('vmss create', arg_group='Network Balancer') as c:
LoadBalancerSkuName = self.get_models('LoadBalancerSkuName', resource_type=ResourceType.MGMT_NETWORK)
c.argument('application_gateway', help='Name to use when creating a new application gateway (default) or referencing an existing one. Can also reference an existing application gateway by ID or specify "" for none.', options_list=['--app-gateway'])
c.argument('app_gateway_capacity', help='The number of instances to use when creating a new application gateway.')
c.argument('app_gateway_sku', help='SKU when creating a new application gateway.')
c.argument('app_gateway_subnet_address_prefix', help='The subnet IP address prefix to use when creating a new application gateway in CIDR format.')
c.argument('backend_pool_name', help='Name to use for the backend pool when creating a new load balancer or application gateway.')
c.argument('backend_port', help='When creating a new load balancer, backend port to open with NAT rules (Defaults to 22 on Linux and 3389 on Windows). When creating an application gateway, the backend port to use for the backend HTTP settings.', type=int)
c.argument('load_balancer', help='Name to use when creating a new load balancer (default) or referencing an existing one. Can also reference an existing load balancer by ID or specify "" for none.', options_list=['--load-balancer', '--lb'])
c.argument('load_balancer_sku', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-08-01', options_list=['--lb-sku'], arg_type=get_enum_type(LoadBalancerSkuName),
help="Sku of the Load Balancer to create. Default to 'Standard' when single placement group is turned off; otherwise, default to 'Basic'")
c.argument('nat_pool_name', help='Name to use for the NAT pool when creating a new load balancer.', options_list=['--lb-nat-pool-name', '--nat-pool-name'])
with self.argument_context('vmss create', min_api='2017-03-30', arg_group='Network') as c:
c.argument('public_ip_per_vm', action='store_true', help="Each VM instance will have a public ip. For security, you can use '--nsg' to apply appropriate rules")
c.argument('vm_domain_name', help="domain name of VM instances, once configured, the FQDN is 'vm<vm-index>.<vm-domain-name>.<..rest..>'")
c.argument('dns_servers', nargs='+', help="space-separated IP addresses of DNS servers, e.g. 10.0.0.5 10.0.0.6")
c.argument('accelerated_networking', arg_type=get_three_state_flag(),
help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size")
for scope in ['vmss update-instances', 'vmss delete-instances']:
with self.argument_context(scope) as c:
c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances.')
with self.argument_context('vmss diagnostics') as c:
c.argument('vmss_name', id_part=None, help='Scale set name')
with self.argument_context('vmss disk') as c:
options_list = ['--vmss-name'] + [c.deprecate(target=opt, redirect='--vmss-name', hide=True)for opt in name_arg_type.settings['options_list']]
new_vmss_name_type = CLIArgumentType(overrides=vmss_name_type, options_list=options_list)
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine instance size.')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('vmss_name', new_vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))
c.argument('disk', validator=validate_vmss_disk, help='existing disk name or ID to attach or detach from VM instances',
min_api='2017-12-01', completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('instance_id', help='Scale set VM instance id', min_api='2017-12-01')
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
with self.argument_context('vmss encryption') as c:
c.argument('vmss_name', vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))
with self.argument_context('vmss extension') as c:
c.argument('extension_name', name_arg_type, help='Name of the extension.')
c.argument('vmss_name', vmss_name_type, options_list=['--vmss-name'], id_part=None)
with self.argument_context('vmss nic') as c:
c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], help='Scale set name.', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
c.argument('virtualmachine_index', options_list=['--instance-id'], id_part='child_name_1')
c.argument('network_interface_name', options_list=['--name', '-n'], metavar='NIC_NAME', help='The network interface (NIC).', completer=get_resource_name_completion_list('Microsoft.Network/networkInterfaces'), id_part='child_name_2')
with self.argument_context('vmss nic list') as c:
c.argument('virtual_machine_scale_set_name', arg_type=vmss_name_type, options_list=['--vmss-name'], id_part=None)
# endregion
# region VM & VMSS Shared
for scope in ['vm', 'vmss']:
with self.argument_context(scope) as c:
c.argument('no_auto_upgrade', arg_type=get_three_state_flag(), help='If set, the extension service will not automatically pick or upgrade to the latest minor version, even if the extension is redeployed.')
with self.argument_context('{} run-command'.format(scope)) as c:
c.argument('command_id', completer=get_vm_run_command_completion_list, help="The command id. Use 'az {} run-command list' to get the list".format(scope))
if scope == 'vmss':
c.argument('vmss_name', vmss_name_type)
with self.argument_context('{} run-command invoke'.format(scope)) as c:
c.argument('parameters', nargs='+', help="space-separated parameters in the format of '[name=]value'")
c.argument('scripts', nargs='+', help="script lines separated by whites spaces. Use @{file} to load from a file")
for scope in ['vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
c.argument('assign_identity', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm identity remove', 'vmss identity remove']:
with self.argument_context(scope) as c:
c.argument('identities', nargs='+', help="Space-separated identities to remove. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm identity show', 'vmss identity show']:
with self.argument_context(scope) as c:
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm create', 'vmss create']:
with self.argument_context(scope) as c:
c.argument('location', get_location_type(self.cli_ctx), help='Location in which to create VM and related resources. If default location is not configured, will default to the resource group\'s location')
c.argument('tags', tags_type)
c.argument('no_wait', help='Do not wait for the long-running operation to finish.')
c.argument('validate', options_list=['--validate'], help='Generate and validate the ARM template without creating any resources.', action='store_true')
c.argument('size', help='The VM size to be created. See https://azure.microsoft.com/en-us/pricing/details/virtual-machines/ for size info.')
c.argument('image', completer=get_urn_aliases_completion_list)
c.argument('custom_data', help='Custom init script file or text (cloud-init, cloud-config, etc..)', completer=FilesCompleter(), type=file_type)
c.argument('secrets', multi_ids_type, help='One or many Key Vault secrets as JSON strings or files via `@{path}` containing `[{ "sourceVault": { "id": "value" }, "vaultCertificates": [{ "certificateUrl": "value", "certificateStore": "cert store name (only on windows)"}] }]`', type=file_type, completer=FilesCompleter())
c.argument('assign_identity', nargs='*', arg_group='Managed Service Identity', help="accept system or user assigned identities separated by spaces. Use '[system]' to refer system assigned identity, or a resource id to refer user assigned identity. Check out help for more examples")
c.ignore('aux_subscriptions')
with self.argument_context(scope, arg_group='Authentication') as c:
c.argument('generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing. The keys will be stored in the ~/.ssh directory')
c.argument('admin_username', help='Username for the VM.', default=get_default_admin_username())
c.argument('admin_password', help="Password for the VM if authentication type is 'Password'.")
c.argument('ssh_key_value', help='SSH public key or public key file path.', completer=FilesCompleter(), type=file_type)
c.argument('ssh_dest_key_path', help='Destination file path on the VM for the SSH key.')
c.argument('authentication_type', help='Type of authentication to use with the VM. Defaults to password for Windows and SSH public key for Linux. "all" enables both ssh and password authentication. ', arg_type=get_enum_type(['ssh', 'password', 'all']))
with self.argument_context(scope, arg_group='Storage') as c:
if StorageAccountTypes:
allowed_values = ", ".join([sku.value for sku in StorageAccountTypes])
else:
allowed_values = ", ".join(['Premium_LRS', 'Standard_LRS'])
usage = 'Usage: [--storage-sku SKU | --storage-sku ID=SKU ID=SKU ID=SKU...], where each ID is "os" or a 0-indexed lun.'
allowed_values = 'Allowed values: {}.'.format(allowed_values)
storage_sku_help = 'The SKU of the storage account with which to persist VM. Use a singular sku that would be applied across all disks, ' \
'or specify individual disks. {} {}'.format(usage, allowed_values)
c.argument('os_disk_name', help='The name of the new VM OS disk.')
c.argument('os_type', help='Type of OS installed on a custom VHD. Do not use when specifying an URN or URN alias.', arg_type=get_enum_type(['windows', 'linux']))
c.argument('storage_account', help="Only applicable when used with `--use-unmanaged-disk`. The name to use when creating a new storage account or referencing an existing one. If omitted, an appropriate storage account in the same resource group and location will be used, or a new one will be created.")
c.argument('storage_sku', nargs='+', help=storage_sku_help)
c.argument('storage_container_name', help="Only applicable when used with `--use-unmanaged-disk`. Name of the storage container for the VM OS disk. Default: vhds")
c.ignore('os_publisher', 'os_offer', 'os_sku', 'os_version', 'storage_profile')
c.argument('use_unmanaged_disk', action='store_true', help='Do not use managed disk to persist VM')
c.argument('data_disk_sizes_gb', nargs='+', type=int, help='space-separated empty managed data disk sizes in GB to create')
c.ignore('disk_info', 'storage_account_type', 'public_ip_address_type', 'nsg_type', 'nic_type', 'vnet_type', 'load_balancer_type', 'app_gateway_type')
c.argument('os_caching', options_list=[self.deprecate(target='--storage-caching', redirect='--os-disk-caching', hide=True), '--os-disk-caching'], help='Storage caching type for the VM OS disk. Default: ReadWrite', arg_type=get_enum_type(CachingTypes))
c.argument('data_caching', options_list=['--data-disk-caching'], nargs='+',
help="storage caching type for data disk(s), including 'None', 'ReadOnly', 'ReadWrite', etc. Use a singular value to apply on all disks, or use '<lun>=<vaule1> <lun>=<value2>' to configure individual disk")
c.argument('ultra_ssd_enabled', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='(PREVIEW) Enables or disables the capability to have 1 or more managed data disks with UltraSSD_LRS storage account')
c.argument('ephemeral_os_disk', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='(Preview) Allows you to create an OS disk directly on the host node, providing local disk performance and faster VM/VMSS reimage time.')
with self.argument_context(scope, arg_group='Network') as c:
c.argument('vnet_name', help='Name of the virtual network when creating a new one or referencing an existing one.')
c.argument('vnet_address_prefix', help='The IP address prefix to use when creating a new VNet in CIDR format.')
c.argument('subnet', help='The name of the subnet when creating a new VNet or referencing an existing one. Can also reference an existing subnet by ID. If omitted, an appropriate VNet and subnet will be selected automatically, or a new one will be created.')
c.argument('subnet_address_prefix', help='The subnet IP address prefix to use when creating a new VNet in CIDR format.')
c.argument('nics', nargs='+', help='Names or IDs of existing NICs to attach to the VM. The first NIC will be designated as primary. If omitted, a new NIC will be created. If an existing NIC is specified, do not specify subnet, VNet, public IP or NSG.')
c.argument('private_ip_address', help='Static private IP address (e.g. 10.0.0.5).')
c.argument('public_ip_address', help='Name of the public IP address when creating one (default) or referencing an existing one. Can also reference an existing public IP by ID or specify "" for None.')
c.argument('public_ip_address_allocation', help=None, default=None, arg_type=get_enum_type(['dynamic', 'static']))
c.argument('public_ip_address_dns_name', help='Globally unique DNS name for a newly created public IP.')
if self.supported_api_version(min_api='2017-08-01', resource_type=ResourceType.MGMT_NETWORK):
PublicIPAddressSkuName = self.get_models('PublicIPAddressSkuName', resource_type=ResourceType.MGMT_NETWORK)
c.argument('public_ip_sku', help='Sku', default=None, arg_type=get_enum_type(PublicIPAddressSkuName))
with self.argument_context(scope, arg_group='Marketplace Image Plan') as c:
c.argument('plan_name', help='plan name')
c.argument('plan_product', help='plan product')
c.argument('plan_publisher', help='plan publisher')
c.argument('plan_promotion_code', help='plan promotion code')
for scope in ['vm create', 'vmss create', 'vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
arg_group = 'Managed Service Identity' if scope.split()[-1] == 'create' else None
c.argument('identity_scope', options_list=['--scope'], arg_group=arg_group, help="Scope that the system assigned identity can access")
c.argument('identity_role', options_list=['--role'], arg_group=arg_group, help="Role name or id the system assigned identity will have")
c.ignore('identity_role_id')
for scope in ['vm diagnostics', 'vmss diagnostics']:
with self.argument_context(scope) as c:
c.argument('version', help='version of the diagnostics extension. Will use the latest if not specfied')
c.argument('settings', help='json string or a file path, which defines data to be collected.', type=validate_file_or_dict, completer=FilesCompleter())
c.argument('protected_settings', help='json string or a file path containing private configurations such as storage account keys, etc.', type=validate_file_or_dict, completer=FilesCompleter())
c.argument('is_windows_os', action='store_true', help='for Windows VMs')
for scope in ['vm encryption', 'vmss encryption']:
with self.argument_context(scope) as c:
c.argument('volume_type', help='Type of volume that the encryption operation is performed on', arg_type=get_enum_type(['DATA', 'OS', 'ALL']))
c.argument('force', action='store_true', help='continue by ignoring client side validation errors')
c.argument('disk_encryption_keyvault', help='Name or ID of the key vault where the generated encryption key will be placed.')
c.argument('key_encryption_key', help='Key vault key name or URL used to encrypt the disk encryption key.')
c.argument('key_encryption_keyvault', help='Name or ID of the key vault containing the key encryption key used to encrypt the disk encryption key. If missing, CLI will use `--disk-encryption-keyvault`.')
for scope in ['vm extension', 'vmss extension']:
with self.argument_context(scope) as c:
c.argument('publisher', help='The name of the extension publisher.')
c.argument('settings', type=validate_file_or_dict, help='Extension settings in JSON format. A JSON file path is also accepted.')
c.argument('protected_settings', type=validate_file_or_dict, help='Protected settings in JSON format for sensitive information like credentials. A JSON file path is also accepted.')
c.argument('version', help='The version of the extension')
with self.argument_context('vm extension set') as c:
c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
c.argument('extension_instance_name', extension_instance_name_type, arg_group='Resource Id')
with self.argument_context('vmss extension set', min_api='2017-12-01') as c:
c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
c.argument('extension_instance_name', extension_instance_name_type)
c.argument('provision_after_extensions', nargs='+', help='Space-separated list of extension names after which this extension should be provisioned. These extensions must already be set on the vm.')
for scope in ['vm extension image', 'vmss extension image']:
with self.argument_context(scope) as c:
c.argument('image_location', options_list=['--location', '-l'], help='Image location.')
c.argument('name', help='Image name', id_part=None)
c.argument('publisher_name', options_list=['--publisher', '-p'], help='Image publisher name')
c.argument('type', options_list=['--name', '-n'], help='Name of the extension')
c.argument('latest', action='store_true', help='Show the latest version only.')
c.argument('version', help='Extension version')
c.argument('orderby', help="the $orderby odata query option")
c.argument('top', help='the $top odata query option')
for scope in ['vm create', 'vm update', 'vmss create', 'vmss update']:
with self.argument_context(scope) as c:
license_msg = "Specifies that the Windows image or disk was licensed on-premises. " \
"To enable Azure Hybrid Benefit for Windows Server, use 'Windows_Server'. " \
"To enable Multitenant Hosting Rights for Windows 10, use 'Windows_Client'. " \
"For more information see the Azure Windows VM online docs."
c.argument('license_type', help=license_msg, arg_type=get_enum_type(['Windows_Server', 'Windows_Client', 'None']))
with self.argument_context('sig') as c:
c.argument('gallery_name', options_list=['--gallery-name', '-r'], help='gallery name')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], help='gallery image definition')
c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='gallery image version')
for scope in ['sig show', 'sig image-definition show', 'sig image-definition delete']:
with self.argument_context(scope) as c:
c.argument('gallery_name', options_list=['--gallery-name', '-r'], id_part='name', help='gallery name')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], id_part='child_name_1', help='gallery image definition')
with self.argument_context('sig image-definition create') as c:
c.argument('offer', options_list=['--offer', '-f'], help='image offer')
c.argument('sku', options_list=['--sku', '-s'], help='image sku')
c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD')
c.ignore('os_state') # service is not ready
c.argument('minimum_cpu_core', type=int, arg_group='Recommendation', help='minimum cpu cores')
c.argument('maximum_cpu_core', type=int, arg_group='Recommendation', help='maximum cpu cores')
c.argument('minimum_memory', type=int, arg_group='Recommendation', help='minimum memory in MB')
c.argument('maximum_memory', type=int, arg_group='Recommendation', help='maximum memory in MB')
c.argument('plan_publisher', help='plan publisher', arg_group='Purchase plan')
c.argument('plan_name', help='plan name', arg_group='Purchase plan')
c.argument('plan_product', help='plan product', arg_group='Purchase plan')
c.argument('eula', help='The Eula agreement for the gallery image')
c.argument('privacy_statement_uri', help='The privacy statement uri')
c.argument('release_note_uri', help='The release note uri')
c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'")
c.argument('disallowed_disk_types', nargs='*', help='disk types which would not work with the image, e.g., Standard_LRS')
with self.argument_context('sig create') as c:
c.argument('description', help='the description of the gallery')
with self.argument_context('sig update') as c:
c.ignore('gallery')
with self.argument_context('sig image-definition create') as c:
c.argument('description', help='the description of the gallery image definition')
with self.argument_context('sig image-definition update') as c:
c.ignore('gallery_image')
with self.argument_context('sig image-version') as c:
deprecated_option = c.deprecate(target='--gallery-image-version-name', redirect='--gallery-image-version', hide=True, expiration="2.1.0")
c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e', deprecated_option], )
with self.argument_context('sig image-version create') as c:
c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'],
help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. <MajorVersion>.<MinorVersion>.<Patch>')
c.argument('description', help='the description of the gallery image version')
c.argument('managed_image', help='image name(if in the same resource group) or resource id')
c.argument('exclude_from_latest', arg_type=get_three_state_flag(), help='The flag means that if it is set to true, people deploying VMs with version omitted will not use this version.')
c.argument('version', help='image version')
c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'")
with self.argument_context('sig image-version show') as c:
c.argument('expand', help="The expand expression to apply on the operation, e.g. 'ReplicationStatus'")
for scope in ['sig image-version create', 'sig image-version update']:
with self.argument_context(scope) as c:
c.argument('target_regions', nargs='*', validator=process_gallery_image_version_namespace,
help='Space-separated list of regions and their replica counts. Use "<region>=<replica count>" to set the replica count for each region. If only the region is specified, the default replica count will be used.')
c.argument('replica_count', help='The default number of replicas to be created per region. To set regional replication counts, use --target-regions', type=int)
# endregion
| def load_arguments(self, _):
StorageAccountTypes, UpgradeMode, CachingTypes = self.get_models('StorageAccountTypes', 'UpgradeMode', 'CachingTypes')
OperatingSystemTypes = self.get_models('OperatingSystemTypes')
# REUSABLE ARGUMENT DEFINITIONS
name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME')
multi_ids_type = CLIArgumentType(nargs='+')
existing_vm_name = CLIArgumentType(overrides=name_arg_type,
configured_default='vm',
help="The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=<name>`",
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'), id_part='name')
existing_disk_name = CLIArgumentType(overrides=name_arg_type, help='The name of the managed disk', completer=get_resource_name_completion_list('Microsoft.Compute/disks'), id_part='name')
existing_snapshot_name = CLIArgumentType(overrides=name_arg_type, help='The name of the snapshot', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'), id_part='name')
vmss_name_type = CLIArgumentType(name_arg_type,
configured_default='vmss',
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'),
help="Scale set name. You can configure the default using `az configure --defaults vmss=<name>`",
id_part='name')
extension_instance_name_type = CLIArgumentType(help="Name of the vm's instance of the extension. Default: name of the extension.")
if StorageAccountTypes:
disk_sku = CLIArgumentType(arg_type=get_enum_type(StorageAccountTypes))
else:
# StorageAccountTypes introduced in api version 2016_04_30_preview of Resource.MGMT.Compute package..
# However, 2017-03-09-profile targets version 2016-03-30 of compute package.
disk_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
# special case for `network nic scale-set list` command alias
with self.argument_context('network nic scale-set list') as c:
c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
# region MixedScopes
for scope in ['vm', 'disk', 'snapshot', 'image', 'sig']:
with self.argument_context(scope) as c:
c.argument('tags', tags_type)
for scope in ['disk', 'snapshot']:
with self.argument_context(scope) as c:
c.ignore('source_blob_uri', 'source_disk', 'source_snapshot')
c.argument('source_storage_account_id', help='used when source blob is in a different subscription')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('duration_in_seconds', help='Time duration in seconds until the SAS access expires', type=int)
for scope in ['disk create', 'snapshot create']:
with self.argument_context(scope) as c:
c.argument('source', help='source to create the disk/snapshot from, including unmanaged blob uri, managed disk id or name, or snapshot id or name')
# endregion
# region Disks
with self.argument_context('disk') as c:
c.argument('zone', zone_type, min_api='2017-03-30', options_list=['--zone']) # TODO: --size-gb currently has claimed -z. We can do a breaking change later if we want to.
c.argument('disk_name', existing_disk_name, completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('name', arg_type=name_arg_type)
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
c.argument('os_type', arg_type=get_enum_type(OperatingSystemTypes), help='The Operating System type of the Disk.')
c.argument('disk_iops_read_write', type=int, min_api='2018-06-01', help='The number of IOPS allowed for this disk. Only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes')
c.argument('disk_mbps_read_write', type=int, min_api='2018-06-01', help="The bandwidth allowed for this disk. Only settable for UltraSSD disks. MBps means millions of bytes per second with ISO notation of powers of 10")
# endregion
# region Snapshots
with self.argument_context('snapshot', resource_type=ResourceType.MGMT_COMPUTE, operation_group='snapshots') as c:
c.argument('snapshot_name', existing_snapshot_name, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'))
c.argument('name', arg_type=name_arg_type)
if self.supported_api_version(min_api='2018-04-01', operation_group='snapshots'):
c.argument('sku', arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS', 'Standard_ZRS']))
else:
c.argument('sku', arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
# endregion
# region Images
with self.argument_context('image') as c:
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']))
c.argument('image_name', arg_type=name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/images'))
c.argument('tags', tags_type)
with self.argument_context('image create') as c:
# here we collpase all difference image sources to under 2 common arguments --os-disk-source --data-disk-sources
c.argument('name', arg_type=name_arg_type, help='new image name')
c.argument('source', help='OS disk source from the same region, including a virtual machine ID or name, OS disk blob URI, managed OS disk ID or name, or OS snapshot ID or name')
c.argument('data_disk_sources', nargs='+', help='Space-separated list of data disk sources, including unmanaged blob URI, managed disk ID or name, or snapshot ID or name')
c.argument('zone_resilient', min_api='2017-12-01', arg_type=get_three_state_flag(), help='Specifies whether an image is zone resilient or not. '
'Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage')
c.argument('storage_sku', arg_type=disk_sku, help='The SKU of the storage account with which to create the VM image. Unused if source VM is specified.')
c.argument('os_disk_caching', arg_type=get_enum_type(CachingTypes), help="Storage caching type for the image's OS disk.")
c.ignore('source_virtual_machine', 'os_blob_uri', 'os_disk', 'os_snapshot', 'data_blob_uris', 'data_disks', 'data_snapshots')
# endregion
# region AvailabilitySets
with self.argument_context('vm availability-set') as c:
c.argument('availability_set_name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
with self.argument_context('vm availability-set create') as c:
c.argument('availability_set_name', name_arg_type, validator=get_default_location_from_resource_group, help='Name of the availability set')
c.argument('platform_update_domain_count', type=int, help='Update Domain count. If unspecified, the server will pick the most optimal number like 5.')
c.argument('platform_fault_domain_count', type=int, help='Fault Domain count.')
c.argument('validate', help='Generate and validate the ARM template without creating any resources.', action='store_true')
c.argument('unmanaged', action='store_true', min_api='2016-04-30-preview', help='contained VMs should use unmanaged disks')
with self.argument_context('vm availability-set update') as c:
if self.supported_api_version(max_api='2016-04-30-preview', operation_group='virtual_machines'):
c.argument('name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
c.argument('availability_set_name', options_list=['--availability-set-name'])
# endregion
# region VirtualMachines
with self.argument_context('vm') as c:
c.argument('vm_name', existing_vm_name)
c.argument('size', completer=get_vm_size_completion_list)
c.argument('name', arg_type=name_arg_type)
c.argument('zone', zone_type, min_api='2017-03-30')
c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none.', arg_group='Network')
c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux.', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH']))
c.argument('application_security_groups', min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network')
with self.argument_context('vm capture') as c:
c.argument('overwrite', action='store_true')
with self.argument_context('vm update') as c:
c.argument('os_disk', min_api='2017-12-01', help="Managed OS disk ID or name to swap to")
c.argument('write_accelerator', nargs='*', min_api='2017-12-01',
help="enable/disable disk write accelerator. Use singular value 'true/false' to apply across, or specify individual disks, e.g.'os=true 1=true 2=true' for os disk and data disks with lun of 1 & 2")
c.argument('disk_caching', nargs='*', help="Use singular value to apply across, or specify individual disks, e.g. 'os=ReadWrite 0=None 1=ReadOnly' should enable update os disk and 2 data disks")
with self.argument_context('vm create') as c:
c.argument('name', name_arg_type, validator=_resource_not_exists(self.cli_ctx, 'Microsoft.Compute/virtualMachines'))
c.argument('vm_name', name_arg_type, id_part=None, help='Name of the virtual machine.', completer=None)
c.argument('os_disk_size_gb', type=int, help='the size of the os disk in GB', arg_group='Storage')
c.argument('attach_os_disk', help='Attach an existing OS disk to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')
c.argument('attach_data_disks', nargs='+', help='Attach existing data disks to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')
c.argument('availability_set', help='Name or ID of an existing availability set to add the VM to. None by default.')
c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none.', arg_group='Network')
c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux.', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH']))
c.argument('application_security_groups', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
c.argument('boot_diagnostics_storage',
help='pre-existing storage account name or its blob uri to capture boot diagnostics. Its sku should be one of Standard_GRS, Standard_LRS and Standard_RAGRS')
c.argument('accelerated_networking', resource_type=ResourceType.MGMT_NETWORK, min_api='2016-09-01', arg_type=get_three_state_flag(), arg_group='Network',
help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size")
with self.argument_context('vm open-port') as c:
c.argument('vm_name', name_arg_type, help='The name of the virtual machine to open inbound traffic on.')
c.argument('network_security_group_name', options_list=('--nsg-name',), help='The name of the network security group to create if one does not exist. Ignored if an NSG already exists.', validator=validate_nsg_name)
c.argument('apply_to_subnet', help='Allow inbound traffic on the subnet instead of the NIC', action='store_true')
c.argument('port', help="The port or port range (ex: 80-100) to open inbound traffic to. Use '*' to allow traffic to all ports.")
c.argument('priority', help='Rule priority, between 100 (highest priority) and 4096 (lowest priority). Must be unique for each rule in the collection.', type=int)
for scope in ['vm show', 'vm list']:
with self.argument_context(scope) as c:
c.argument('show_details', action='store_true', options_list=['--show-details', '-d'], help='show public ip address, FQDN, and power states. command will run slow')
with self.argument_context('vm diagnostics') as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'])
with self.argument_context('vm diagnostics set') as c:
c.argument('storage_account', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'))
with self.argument_context('vm disk') as c:
c.argument('vm_name', options_list=['--vm-name'], id_part=None, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'))
c.argument('new', action='store_true', help='create a new disk')
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Managed Disks Max size: 32767 GB. Unmanaged Disks Max size: 4095 GB', type=int)
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')
with self.argument_context('vm disk attach') as c:
c.argument('enable_write_accelerator', min_api='2017-12-01', action='store_true', help='enable write accelerator')
c.argument('disk', options_list=['--name', '-n', c.deprecate(target='--disk', redirect='--name', hide=True)],
help="The name or ID of the managed disk", validator=validate_vm_disk, id_part='name',
completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
with self.argument_context('vm disk detach') as c:
c.argument('disk_name', arg_type=name_arg_type, help='The data disk name.')
with self.argument_context('vm encryption enable') as c:
c.argument('encrypt_format_all', action='store_true', help='Encrypts-formats data disks instead of encrypting them. Encrypt-formatting is a lot faster than in-place encryption but wipes out the partition getting encrypt-formatted.')
# Place aad arguments in their own group
aad_arguments = 'Azure Active Directory'
c.argument('aad_client_id', arg_group=aad_arguments)
c.argument('aad_client_secret', arg_group=aad_arguments)
c.argument('aad_client_cert_thumbprint', arg_group=aad_arguments)
with self.argument_context('vm extension') as c:
c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part='child_name_1')
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part='name')
c.argument('expand', deprecate_info=c.deprecate(expiration='2.1.0', hide=True))
with self.argument_context('vm extension list') as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)
with self.argument_context('vm secret') as c:
c.argument('secrets', multi_ids_type, options_list=['--secrets', '-s'], help='Space-separated list of key vault secret URIs. Perhaps, produced by \'az keyvault secret list-versions --vault-name vaultname -n cert1 --query "[?attributes.enabled].id" -o tsv\'')
c.argument('keyvault', help='Name or ID of the key vault.', validator=validate_keyvault)
c.argument('certificate', help='key vault certificate name or its full secret URL')
c.argument('certificate_store', help='Windows certificate store names. Default: My')
with self.argument_context('vm secret list') as c:
c.argument('vm_name', arg_type=existing_vm_name, id_part=None)
with self.argument_context('vm image') as c:
c.argument('publisher_name', options_list=['--publisher', '-p'])
c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
c.argument('offer', options_list=['--offer', '-f'], help='image offer')
c.argument('plan', help='image billing plan')
c.argument('sku', options_list=['--sku', '-s'], help='image sku')
c.argument('version', help="image sku's version")
c.argument('urn', help="URN, in format of 'publisher:offer:sku:version'. If specified, other argument values can be omitted")
with self.argument_context('vm image list') as c:
c.argument('image_location', get_location_type(self.cli_ctx))
with self.argument_context('vm image show') as c:
c.argument('skus', options_list=['--sku', '-s'])
with self.argument_context('vm nic') as c:
c.argument('vm_name', existing_vm_name, options_list=['--vm-name'], id_part=None)
c.argument('nics', nargs='+', help='Names or IDs of NICs.', validator=validate_vm_nics)
c.argument('primary_nic', help='Name or ID of the primary NIC. If missing, the first NIC in the list will be the primary.')
with self.argument_context('vm nic show') as c:
c.argument('nic', help='NIC name or ID.', validator=validate_vm_nic)
with self.argument_context('vm unmanaged-disk') as c:
c.argument('new', action='store_true', help='Create a new disk.')
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')
c.argument('vhd_uri', help="Virtual hard disk URI. For example: https://mystorage.blob.core.windows.net/vhds/d1.vhd")
with self.argument_context('vm unmanaged-disk attach') as c:
c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
with self.argument_context('vm unmanaged-disk detach') as c:
c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')
for scope in ['vm unmanaged-disk attach', 'vm unmanaged-disk detach']:
with self.argument_context(scope) as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)
with self.argument_context('vm unmanaged-disk list') as c:
c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None)
with self.argument_context('vm user') as c:
c.argument('username', options_list=['--username', '-u'], help='The user name')
c.argument('password', options_list=['--password', '-p'], help='The user password')
with self.argument_context('vm list-skus') as c:
c.argument('size', options_list=['--size', '-s'], help="size name, partial name is accepted")
c.argument('zone', options_list=['--zone', '-z'], arg_type=get_three_state_flag(), help="show all vm size supporting availability zones")
c.argument('show_all', options_list=['--all'], arg_type=get_three_state_flag(),
help="show all information including vm sizes not available under the current subscription")
c.argument('resource_type', options_list=['--resource-type', '-r'], help='resource types e.g. "availabilitySets", "snapshots", "disk", etc')
with self.argument_context('vm restart') as c:
c.argument('force', action='store_true', help='Force the VM to restart by redeploying it. Use if the VM is unresponsive.')
# endregion
# region VMSS
scaleset_name_aliases = ['vm_scale_set_name', 'virtual_machine_scale_set_name', 'name']
with self.argument_context('vmss') as c:
c.argument('zones', zones_type, min_api='2017-03-30')
c.argument('instance_id', id_part='child_name_1')
c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances. If not provided, the action will be applied on the scaleset itself')
c.argument('tags', tags_type)
c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
for dest in scaleset_name_aliases:
c.argument(dest, vmss_name_type)
for scope in ['vmss deallocate', 'vmss delete-instances', 'vmss restart', 'vmss start', 'vmss stop', 'vmss show', 'vmss update-instances']:
with self.argument_context(scope) as c:
for dest in scaleset_name_aliases:
c.argument(dest, vmss_name_type, id_part=None) # due to instance-ids parameter
with self.argument_context('vmss create') as c:
VMPriorityTypes = self.get_models('VirtualMachinePriorityTypes', resource_type=ResourceType.MGMT_COMPUTE)
VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE)
c.argument('name', name_arg_type)
c.argument('nat_backend_port', default=None, help='Backend port to open with NAT rules. Defaults to 22 on Linux and 3389 on Windows.')
c.argument('single_placement_group', arg_type=get_three_state_flag(), help="Enable replicate using fault domains within the same cluster. Default to 'false' for any zonals, or with 100+ instances"
" See https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups for details")
c.argument('platform_fault_domain_count', type=int, help='Fault Domain count for each placement group in the availability zone', min_api='2017-12-01')
c.argument('vmss_name', name_arg_type, id_part=None, help='Name of the virtual machine scale set.')
c.argument('instance_count', help='Number of VMs in the scale set.', type=int)
c.argument('disable_overprovision', help='Overprovision option (see https://azure.microsoft.com/en-us/documentation/articles/virtual-machine-scale-sets-overview/ for details).', action='store_true')
c.argument('upgrade_policy_mode', help=None, arg_type=get_enum_type(UpgradeMode))
c.argument('health_probe', help='(Preview) probe name from the existing load balancer, mainly used for rolling upgrade')
c.argument('vm_sku', help='Size of VMs in the scale set. Default to "Standard_DS1_v2". See https://azure.microsoft.com/en-us/pricing/details/virtual-machines/ for size info.')
c.argument('nsg', help='Name or ID of an existing Network Security Group.', arg_group='Network')
c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(VMPriorityTypes, default=None),
help="Priority. Use 'Low' to run short-lived workloads in a cost-effective way")
c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None),
help="(PREVIEW) The eviction policy for virtual machines in a low priority scale set.")
c.argument('application_security_groups', resource_type=ResourceType.MGMT_COMPUTE, min_api='2018-06-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
with self.argument_context('vmss create', arg_group='Network Balancer') as c:
LoadBalancerSkuName = self.get_models('LoadBalancerSkuName', resource_type=ResourceType.MGMT_NETWORK)
c.argument('application_gateway', help='Name to use when creating a new application gateway (default) or referencing an existing one. Can also reference an existing application gateway by ID or specify "" for none.', options_list=['--app-gateway'])
c.argument('app_gateway_capacity', help='The number of instances to use when creating a new application gateway.')
c.argument('app_gateway_sku', help='SKU when creating a new application gateway.')
c.argument('app_gateway_subnet_address_prefix', help='The subnet IP address prefix to use when creating a new application gateway in CIDR format.')
c.argument('backend_pool_name', help='Name to use for the backend pool when creating a new load balancer or application gateway.')
c.argument('backend_port', help='When creating a new load balancer, backend port to open with NAT rules (Defaults to 22 on Linux and 3389 on Windows). When creating an application gateway, the backend port to use for the backend HTTP settings.', type=int)
c.argument('load_balancer', help='Name to use when creating a new load balancer (default) or referencing an existing one. Can also reference an existing load balancer by ID or specify "" for none.', options_list=['--load-balancer', '--lb'])
c.argument('load_balancer_sku', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-08-01', options_list=['--lb-sku'], arg_type=get_enum_type(LoadBalancerSkuName),
help="Sku of the Load Balancer to create. Default to 'Standard' when single placement group is turned off; otherwise, default to 'Basic'")
c.argument('nat_pool_name', help='Name to use for the NAT pool when creating a new load balancer.', options_list=['--lb-nat-pool-name', '--nat-pool-name'])
with self.argument_context('vmss create', min_api='2017-03-30', arg_group='Network') as c:
c.argument('public_ip_per_vm', action='store_true', help="Each VM instance will have a public ip. For security, you can use '--nsg' to apply appropriate rules")
c.argument('vm_domain_name', help="domain name of VM instances, once configured, the FQDN is 'vm<vm-index>.<vm-domain-name>.<..rest..>'")
c.argument('dns_servers', nargs='+', help="space-separated IP addresses of DNS servers, e.g. 10.0.0.5 10.0.0.6")
c.argument('accelerated_networking', arg_type=get_three_state_flag(),
help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size")
for scope in ['vmss update-instances', 'vmss delete-instances']:
with self.argument_context(scope) as c:
c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances.')
with self.argument_context('vmss diagnostics') as c:
c.argument('vmss_name', id_part=None, help='Scale set name')
with self.argument_context('vmss disk') as c:
options_list = ['--vmss-name'] + [c.deprecate(target=opt, redirect='--vmss-name', hide=True)for opt in name_arg_type.settings['options_list']]
new_vmss_name_type = CLIArgumentType(overrides=vmss_name_type, options_list=options_list)
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine instance size.')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('vmss_name', new_vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))
c.argument('disk', validator=validate_vmss_disk, help='existing disk name or ID to attach or detach from VM instances',
min_api='2017-12-01', completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('instance_id', help='Scale set VM instance id', min_api='2017-12-01')
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
with self.argument_context('vmss encryption') as c:
c.argument('vmss_name', vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))
with self.argument_context('vmss extension') as c:
c.argument('extension_name', name_arg_type, help='Name of the extension.')
c.argument('vmss_name', vmss_name_type, options_list=['--vmss-name'], id_part=None)
with self.argument_context('vmss nic') as c:
c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], help='Scale set name.', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
c.argument('virtualmachine_index', options_list=['--instance-id'], id_part='child_name_1')
c.argument('network_interface_name', options_list=['--name', '-n'], metavar='NIC_NAME', help='The network interface (NIC).', completer=get_resource_name_completion_list('Microsoft.Network/networkInterfaces'), id_part='child_name_2')
with self.argument_context('vmss nic list') as c:
c.argument('virtual_machine_scale_set_name', arg_type=vmss_name_type, options_list=['--vmss-name'], id_part=None)
# endregion
# region VM & VMSS Shared
for scope in ['vm', 'vmss']:
with self.argument_context(scope) as c:
c.argument('no_auto_upgrade', arg_type=get_three_state_flag(), help='If set, the extension service will not automatically pick or upgrade to the latest minor version, even if the extension is redeployed.')
with self.argument_context('{} run-command'.format(scope)) as c:
c.argument('command_id', completer=get_vm_run_command_completion_list, help="The command id. Use 'az {} run-command list' to get the list".format(scope))
if scope == 'vmss':
c.argument('vmss_name', vmss_name_type)
with self.argument_context('{} run-command invoke'.format(scope)) as c:
c.argument('parameters', nargs='+', help="space-separated parameters in the format of '[name=]value'")
c.argument('scripts', nargs='+', help="script lines separated by whites spaces. Use @{file} to load from a file")
for scope in ['vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
c.argument('assign_identity', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm identity remove', 'vmss identity remove']:
with self.argument_context(scope) as c:
c.argument('identities', nargs='+', help="Space-separated identities to remove. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm identity show', 'vmss identity show']:
with self.argument_context(scope) as c:
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm create', 'vmss create']:
with self.argument_context(scope) as c:
c.argument('location', get_location_type(self.cli_ctx), help='Location in which to create VM and related resources. If default location is not configured, will default to the resource group\'s location')
c.argument('tags', tags_type)
c.argument('no_wait', help='Do not wait for the long-running operation to finish.')
c.argument('validate', options_list=['--validate'], help='Generate and validate the ARM template without creating any resources.', action='store_true')
c.argument('size', help='The VM size to be created. See https://azure.microsoft.com/en-us/pricing/details/virtual-machines/ for size info.')
c.argument('image', completer=get_urn_aliases_completion_list)
c.argument('custom_data', help='Custom init script file or text (cloud-init, cloud-config, etc..)', completer=FilesCompleter(), type=file_type)
c.argument('secrets', multi_ids_type, help='One or many Key Vault secrets as JSON strings or files via `@{path}` containing `[{ "sourceVault": { "id": "value" }, "vaultCertificates": [{ "certificateUrl": "value", "certificateStore": "cert store name (only on windows)"}] }]`', type=file_type, completer=FilesCompleter())
c.argument('assign_identity', nargs='*', arg_group='Managed Service Identity', help="accept system or user assigned identities separated by spaces. Use '[system]' to refer system assigned identity, or a resource id to refer user assigned identity. Check out help for more examples")
c.ignore('aux_subscriptions')
with self.argument_context(scope, arg_group='Authentication') as c:
c.argument('generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing. The keys will be stored in the ~/.ssh directory')
c.argument('admin_username', help='Username for the VM.', default=get_default_admin_username())
c.argument('admin_password', help="Password for the VM if authentication type is 'Password'.")
c.argument('ssh_key_value', help='SSH public key or public key file path.', completer=FilesCompleter(), type=file_type)
c.argument('ssh_dest_key_path', help='Destination file path on the VM for the SSH key.')
c.argument('authentication_type', help='Type of authentication to use with the VM. Defaults to password for Windows and SSH public key for Linux. "all" enables both ssh and password authentication. ', arg_type=get_enum_type(['ssh', 'password', 'all']))
with self.argument_context(scope, arg_group='Storage') as c:
if StorageAccountTypes:
allowed_values = ", ".join([sku.value for sku in StorageAccountTypes])
else:
allowed_values = ", ".join(['Premium_LRS', 'Standard_LRS'])
usage = 'Usage: [--storage-sku SKU | --storage-sku ID=SKU ID=SKU ID=SKU...], where each ID is "os" or a 0-indexed lun.'
allowed_values = 'Allowed values: {}.'.format(allowed_values)
storage_sku_help = 'The SKU of the storage account with which to persist VM. Use a singular sku that would be applied across all disks, ' \
'or specify individual disks. {} {}'.format(usage, allowed_values)
c.argument('os_disk_name', help='The name of the new VM OS disk.')
c.argument('os_type', help='Type of OS installed on a custom VHD. Do not use when specifying an URN or URN alias.', arg_type=get_enum_type(['windows', 'linux']))
c.argument('storage_account', help="Only applicable when used with `--use-unmanaged-disk`. The name to use when creating a new storage account or referencing an existing one. If omitted, an appropriate storage account in the same resource group and location will be used, or a new one will be created.")
c.argument('storage_sku', nargs='+', help=storage_sku_help)
c.argument('storage_container_name', help="Only applicable when used with `--use-unmanaged-disk`. Name of the storage container for the VM OS disk. Default: vhds")
c.ignore('os_publisher', 'os_offer', 'os_sku', 'os_version', 'storage_profile')
c.argument('use_unmanaged_disk', action='store_true', help='Do not use managed disk to persist VM')
c.argument('data_disk_sizes_gb', nargs='+', type=int, help='space-separated empty managed data disk sizes in GB to create')
c.ignore('disk_info', 'storage_account_type', 'public_ip_address_type', 'nsg_type', 'nic_type', 'vnet_type', 'load_balancer_type', 'app_gateway_type')
c.argument('os_caching', options_list=[self.deprecate(target='--storage-caching', redirect='--os-disk-caching', hide=True), '--os-disk-caching'], help='Storage caching type for the VM OS disk. Default: ReadWrite', arg_type=get_enum_type(CachingTypes))
c.argument('data_caching', options_list=['--data-disk-caching'], nargs='+',
help="storage caching type for data disk(s), including 'None', 'ReadOnly', 'ReadWrite', etc. Use a singular value to apply on all disks, or use '<lun>=<vaule1> <lun>=<value2>' to configure individual disk")
c.argument('ultra_ssd_enabled', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='(PREVIEW) Enables or disables the capability to have 1 or more managed data disks with UltraSSD_LRS storage account')
c.argument('ephemeral_os_disk', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='(Preview) Allows you to create an OS disk directly on the host node, providing local disk performance and faster VM/VMSS reimage time.')
with self.argument_context(scope, arg_group='Network') as c:
c.argument('vnet_name', help='Name of the virtual network when creating a new one or referencing an existing one.')
c.argument('vnet_address_prefix', help='The IP address prefix to use when creating a new VNet in CIDR format.')
c.argument('subnet', help='The name of the subnet when creating a new VNet or referencing an existing one. Can also reference an existing subnet by ID. If omitted, an appropriate VNet and subnet will be selected automatically, or a new one will be created.')
c.argument('subnet_address_prefix', help='The subnet IP address prefix to use when creating a new VNet in CIDR format.')
c.argument('nics', nargs='+', help='Names or IDs of existing NICs to attach to the VM. The first NIC will be designated as primary. If omitted, a new NIC will be created. If an existing NIC is specified, do not specify subnet, VNet, public IP or NSG.')
c.argument('private_ip_address', help='Static private IP address (e.g. 10.0.0.5).')
c.argument('public_ip_address', help='Name of the public IP address when creating one (default) or referencing an existing one. Can also reference an existing public IP by ID or specify "" for None.')
c.argument('public_ip_address_allocation', help=None, default=None, arg_type=get_enum_type(['dynamic', 'static']))
c.argument('public_ip_address_dns_name', help='Globally unique DNS name for a newly created public IP.')
if self.supported_api_version(min_api='2017-08-01', resource_type=ResourceType.MGMT_NETWORK):
PublicIPAddressSkuName = self.get_models('PublicIPAddressSkuName', resource_type=ResourceType.MGMT_NETWORK)
c.argument('public_ip_sku', help='Sku', default=None, arg_type=get_enum_type(PublicIPAddressSkuName))
with self.argument_context(scope, arg_group='Marketplace Image Plan') as c:
c.argument('plan_name', help='plan name')
c.argument('plan_product', help='plan product')
c.argument('plan_publisher', help='plan publisher')
c.argument('plan_promotion_code', help='plan promotion code')
for scope in ['vm create', 'vmss create', 'vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
arg_group = 'Managed Service Identity' if scope.split()[-1] == 'create' else None
c.argument('identity_scope', options_list=['--scope'], arg_group=arg_group, help="Scope that the system assigned identity can access")
c.argument('identity_role', options_list=['--role'], arg_group=arg_group, help="Role name or id the system assigned identity will have")
c.ignore('identity_role_id')
for scope in ['vm diagnostics', 'vmss diagnostics']:
with self.argument_context(scope) as c:
c.argument('version', help='version of the diagnostics extension. Will use the latest if not specfied')
c.argument('settings', help='json string or a file path, which defines data to be collected.', type=validate_file_or_dict, completer=FilesCompleter())
c.argument('protected_settings', help='json string or a file path containing private configurations such as storage account keys, etc.', type=validate_file_or_dict, completer=FilesCompleter())
c.argument('is_windows_os', action='store_true', help='for Windows VMs')
for scope in ['vm encryption', 'vmss encryption']:
with self.argument_context(scope) as c:
c.argument('volume_type', help='Type of volume that the encryption operation is performed on', arg_type=get_enum_type(['DATA', 'OS', 'ALL']))
c.argument('force', action='store_true', help='continue by ignoring client side validation errors')
c.argument('disk_encryption_keyvault', help='Name or ID of the key vault where the generated encryption key will be placed.')
c.argument('key_encryption_key', help='Key vault key name or URL used to encrypt the disk encryption key.')
c.argument('key_encryption_keyvault', help='Name or ID of the key vault containing the key encryption key used to encrypt the disk encryption key. If missing, CLI will use `--disk-encryption-keyvault`.')
for scope in ['vm extension', 'vmss extension']:
with self.argument_context(scope) as c:
c.argument('publisher', help='The name of the extension publisher.')
c.argument('settings', type=validate_file_or_dict, help='Extension settings in JSON format. A JSON file path is also accepted.')
c.argument('protected_settings', type=validate_file_or_dict, help='Protected settings in JSON format for sensitive information like credentials. A JSON file path is also accepted.')
c.argument('version', help='The version of the extension')
with self.argument_context('vm extension set') as c:
c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
c.argument('extension_instance_name', extension_instance_name_type, arg_group='Resource Id')
with self.argument_context('vmss extension set', min_api='2017-12-01') as c:
c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
c.argument('extension_instance_name', extension_instance_name_type)
c.argument('provision_after_extensions', nargs='+', help='Space-separated list of extension names after which this extension should be provisioned. These extensions must already be set on the vm.')
for scope in ['vm extension image', 'vmss extension image']:
with self.argument_context(scope) as c:
c.argument('image_location', options_list=['--location', '-l'], help='Image location.')
c.argument('name', help='Image name', id_part=None)
c.argument('publisher_name', options_list=['--publisher', '-p'], help='Image publisher name')
c.argument('type', options_list=['--name', '-n'], help='Name of the extension')
c.argument('latest', action='store_true', help='Show the latest version only.')
c.argument('version', help='Extension version')
c.argument('orderby', help="the $orderby odata query option")
c.argument('top', help='the $top odata query option')
for scope in ['vm create', 'vm update', 'vmss create', 'vmss update']:
with self.argument_context(scope) as c:
license_msg = "Specifies that the Windows image or disk was licensed on-premises. " \
"To enable Azure Hybrid Benefit for Windows Server, use 'Windows_Server'. " \
"To enable Multitenant Hosting Rights for Windows 10, use 'Windows_Client'. " \
"For more information see the Azure Windows VM online docs."
c.argument('license_type', help=license_msg, arg_type=get_enum_type(['Windows_Server', 'Windows_Client', 'None']))
with self.argument_context('sig') as c:
c.argument('gallery_name', options_list=['--gallery-name', '-r'], help='gallery name')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], help='gallery image definition')
c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='gallery image version')
for scope in ['sig show', 'sig image-definition show', 'sig image-definition delete']:
with self.argument_context(scope) as c:
c.argument('gallery_name', options_list=['--gallery-name', '-r'], id_part='name', help='gallery name')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], id_part='child_name_1', help='gallery image definition')
with self.argument_context('sig image-definition create') as c:
c.argument('offer', options_list=['--offer', '-f'], help='image offer')
c.argument('sku', options_list=['--sku', '-s'], help='image sku')
c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD')
c.ignore('os_state') # service is not ready
c.argument('minimum_cpu_core', type=int, arg_group='Recommendation', help='minimum cpu cores')
c.argument('maximum_cpu_core', type=int, arg_group='Recommendation', help='maximum cpu cores')
c.argument('minimum_memory', type=int, arg_group='Recommendation', help='minimum memory in MB')
c.argument('maximum_memory', type=int, arg_group='Recommendation', help='maximum memory in MB')
c.argument('plan_publisher', help='plan publisher', arg_group='Purchase plan')
c.argument('plan_name', help='plan name', arg_group='Purchase plan')
c.argument('plan_product', help='plan product', arg_group='Purchase plan')
c.argument('eula', help='The Eula agreement for the gallery image')
c.argument('privacy_statement_uri', help='The privacy statement uri')
c.argument('release_note_uri', help='The release note uri')
c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'")
c.argument('disallowed_disk_types', nargs='*', help='disk types which would not work with the image, e.g., Standard_LRS')
with self.argument_context('sig create') as c:
c.argument('description', help='the description of the gallery')
with self.argument_context('sig update') as c:
c.ignore('gallery')
with self.argument_context('sig image-definition create') as c:
c.argument('description', help='the description of the gallery image definition')
with self.argument_context('sig image-definition update') as c:
c.ignore('gallery_image')
with self.argument_context('sig image-version') as c:
deprecated_option = c.deprecate(target='--gallery-image-version-name', redirect='--gallery-image-version', hide=True, expiration="2.1.0")
c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e', deprecated_option], )
with self.argument_context('sig image-version create') as c:
c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'],
help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. <MajorVersion>.<MinorVersion>.<Patch>')
c.argument('description', help='the description of the gallery image version')
c.argument('managed_image', help='image name(if in the same resource group) or resource id')
c.argument('exclude_from_latest', arg_type=get_three_state_flag(), help='The flag means that if it is set to true, people deploying VMs with version omitted will not use this version.')
c.argument('version', help='image version')
c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'")
with self.argument_context('sig image-version show') as c:
c.argument('expand', help="The expand expression to apply on the operation, e.g. 'ReplicationStatus'")
for scope in ['sig image-version create', 'sig image-version update']:
with self.argument_context(scope) as c:
c.argument('target_regions', nargs='*', validator=process_gallery_image_version_namespace,
help='Space-separated list of regions and their replica counts. Use "<region>=<replica count>" to set the replica count for each region. If only the region is specified, the default replica count will be used.')
c.argument('replica_count', help='The default number of replicas to be created per region. To set regional replication counts, use --target-regions', type=int)
# endregion
|
5,710 | def test_maxfev_test():
def cost(x):
return np.random.rand(1) * 1000 # never converged problem
for imaxfev in [1, 10, 50]:
for method in ['Powell']: # TODO: extend to more methods
result = optimize.minimize(cost, np.random.rand(10),
method=method,
options={'maxfev': imaxfev})
assert result["nfev"] == imaxfev
| def test_maxfev_test():
def cost(x):
return np.random.rand(1) * 1000 # never converged problem
for imaxfev in [1, 10, 50]:
for method in ['Powell']: # TODO: extend to more methods
result = optimize.minimize(cost, rng.random(10),
method=method,
options={'maxfev': imaxfev})
assert result["nfev"] == imaxfev
|
1,513 | def get_package_status(package, min_version):
"""
Returns a dictionary containing a boolean specifying whether given package
is up-to-date, along with the version string (empty string if
not installed).
"""
package_status = {}
try:
p = importlib.import_module(package)
package_version = p.__version__
package_status['up_to_date'] = parse_version(
package_version) >= parse_version(min_version)
package_status['version'] = package_version
except ImportError:
traceback.print_exc()
package_status['up_to_date'] = False
package_status['version'] = ""
return package_status
| def get_package_status(package, min_version):
"""
Returns a dictionary containing a boolean specifying whether given package
is up-to-date, along with the version string (empty string if
not installed).
"""
package_status = {}
try:
p = importlib.import_module(package)
package_version = module.__version__
package_status['up_to_date'] = parse_version(
package_version) >= parse_version(min_version)
package_status['version'] = package_version
except ImportError:
traceback.print_exc()
package_status['up_to_date'] = False
package_status['version'] = ""
return package_status
|
8,381 | def air_to_vac(wavelength, scheme='inversion', method='Griesen2006', co2=None,
precision=1e-12, maxiter=30):
"""
Converts air to vacuum wavelengths using different methods.
Parameters
----------
wavelength : `Quantity` object (number or sequence)
Air wavelengths with an astropy.unit.
scheme : str, optional
How the to convert from vacuum to air wavelengths. Options are:
'inversion' (default) - result is simply the inversion (1 / n) of the
refraction index of air. Griesen et al. (2006) report that the error
in naively inverting is less than 10^-9.
'Piskunov' - uses an analytical solution used derived by Nikolai Piskunov
and used by the Vienna Atomic Line Database (VALD).
'iteration' - uses an iterative scheme to invert the index of refraction.
method : str, optional
Only used if scheme is 'inversion' or 'iteration'. One of the methods
in refraction_index().
co2 : number, optional
Atmospheric CO2 concentration in ppm. Only used of scheme='inversion' and
method='Ciddor1996'. If not given, a default concentration of 450 ppm is used.
precision : float
Maximum fractional in refraction conversion beyond which iteration will
be stopped. Only used if scheme='iteration'.
maxiter : integer
Maximum number of iterations to run. Only used if scheme='iteration'.
Returns
-------
vac_wavelength : `Quantity` object (number or sequence)
Vacuum wavelengths with the same unit as wavelength.
"""
VALID_SCHEMES = ['inversion', 'iteration', 'piskunov']
assert isinstance(scheme, str), 'scheme must be a string'
scheme = scheme.lower()
if scheme == 'inversion':
refr = refraction_index(wavelength, method=method, co2=co2)
#return wavelength * refr
elif scheme == 'piskunov':
wlum = wavelength.to(u.angstrom).value
sigma2 = (1e4 / wlum)**2
refr = (8.336624212083e-5 + 2.408926869968e-2 / (130.1065924522 - sigma2) +
1.599740894897e-4 / (38.92568793293 - sigma2)) + 1
#return wavelength * refr
elif scheme == 'iteration':
# Refraction index is a function of vacuum wavelengths.
# Iterate to get index of refraction that gives air wavelength that
# is consistent with the reverse transformation.
counter = 0
result = wavelength.copy()
refr = refraction_index(wavelength, method=method, co2=co2)
while True:
counter += 1
diff = wavelength * refr - result
if abs(diff.max().value) < precision:
break
#return wavelength * conv
if counter > maxiter:
raise RuntimeError("Reached maximum number of iterations "
"without reaching desired precision level.")
result += diff
refr = refraction_index(result, method=method, co2=co2)
else:
raise ValueError("Method must be one of " + ", ".join(VALID_SCHEMES))
return wavelength * refr
| def air_to_vac(wavelength, scheme='inversion', method='Griesen2006', co2=None,
precision=1e-12, maxiter=30):
"""
Converts air to vacuum wavelengths using different methods.
Parameters
----------
wavelength : `Quantity` object (number or sequence)
Air wavelengths with an astropy.unit.
scheme : str, optional
How the to convert from vacuum to air wavelengths. Options are:
'inversion' (default) - result is simply the inversion (1 / n) of the
refraction index of air. Griesen et al. (2006) report that the error
in naively inverting is less than 10^-9.
'Piskunov' - uses an analytical solution derived by Nikolai Piskunov
and used by the Vienna Atomic Line Database (VALD).
'iteration' - uses an iterative scheme to invert the index of refraction.
method : str, optional
Only used if scheme is 'inversion' or 'iteration'. One of the methods
in refraction_index().
co2 : number, optional
Atmospheric CO2 concentration in ppm. Only used of scheme='inversion' and
method='Ciddor1996'. If not given, a default concentration of 450 ppm is used.
precision : float
Maximum fractional in refraction conversion beyond which iteration will
be stopped. Only used if scheme='iteration'.
maxiter : integer
Maximum number of iterations to run. Only used if scheme='iteration'.
Returns
-------
vac_wavelength : `Quantity` object (number or sequence)
Vacuum wavelengths with the same unit as wavelength.
"""
VALID_SCHEMES = ['inversion', 'iteration', 'piskunov']
assert isinstance(scheme, str), 'scheme must be a string'
scheme = scheme.lower()
if scheme == 'inversion':
refr = refraction_index(wavelength, method=method, co2=co2)
#return wavelength * refr
elif scheme == 'piskunov':
wlum = wavelength.to(u.angstrom).value
sigma2 = (1e4 / wlum)**2
refr = (8.336624212083e-5 + 2.408926869968e-2 / (130.1065924522 - sigma2) +
1.599740894897e-4 / (38.92568793293 - sigma2)) + 1
#return wavelength * refr
elif scheme == 'iteration':
# Refraction index is a function of vacuum wavelengths.
# Iterate to get index of refraction that gives air wavelength that
# is consistent with the reverse transformation.
counter = 0
result = wavelength.copy()
refr = refraction_index(wavelength, method=method, co2=co2)
while True:
counter += 1
diff = wavelength * refr - result
if abs(diff.max().value) < precision:
break
#return wavelength * conv
if counter > maxiter:
raise RuntimeError("Reached maximum number of iterations "
"without reaching desired precision level.")
result += diff
refr = refraction_index(result, method=method, co2=co2)
else:
raise ValueError("Method must be one of " + ", ".join(VALID_SCHEMES))
return wavelength * refr
|
27,936 | def generate_matrix(shape, dtype=float, **kwargs):
r"""Generates a random matrix with given singular values.
This function generates a random NumPy matrix (or a set of matrices) that
has specified singular values. It can be used to generate the inputs for a
test that can be instable when the input value behaves bad.
Notation: denote the shape of the generated array by :math:`(B..., M, N)`,
and :math:`K = min\{M, N\}`. :math:`B...` may be an empty sequence.
Args:
shape (tuple of int): Shape of the generated array, i.e.,
:math:`(B..., M, N)`.
dtype: Dtype of the generated array.
singular_values (array-like): Singular values of the generated
matrices. It must be broadcastable to shape :math:`(B..., K)`.
"""
singular_values, = argument.parse_kwargs(
kwargs, ('singular_values', None),
)
if len(shape) <= 1:
raise ValueError(
'shpae {} is invalid for matrices: too few axes'.format(shape)
)
k_shape = shape[:-2] + (min(shape[-2:]),)
# TODO(beam2d): consider supporting integer/boolean matrices
dtype = numpy.dtype(dtype)
if dtype.kind not in 'fc':
raise ValueError('dtype {} is not supported'.format(dtype))
if singular_values is None:
raise TypeError('singular_values is not given')
singular_values = numpy.asarray(singular_values)
if (singular_values < 0).any():
raise ValueError('negative singular value is given')
# Generate random matrices with given singular values. We simply generate
# orthogonal vectors using SVD on random matrices and then combine them
# with the given singular values.
a = numpy.random.randn(*shape)
if dtype.kind == 'c':
a = a + 1j * numpy.random.randn(*shape)
u, _, vh = numpy.linalg.svd(a, full_matrices=False)
a = numpy.einsum('...ik,...k,...kj->...ij', u, singular_values, vh)
return a.astype(dtype)
| def generate_matrix(shape, dtype=float, **kwargs):
r"""Generates a random matrix with given singular values.
This function generates a random NumPy matrix (or a set of matrices) that
has specified singular values. It can be used to generate the inputs for a
test that can be instable when the input value behaves bad.
Notation: denote the shape of the generated array by :math:`(B..., M, N)`,
and :math:`K = min\{M, N\}`. :math:`B...` may be an empty sequence.
Args:
shape (tuple of int): Shape of the generated array, i.e.,
:math:`(B..., M, N)`.
dtype: Dtype of the generated array.
singular_values (array-like): Singular values of the generated
matrices. It must be broadcastable to shape :math:`(B..., K)`.
"""
singular_values, = argument.parse_kwargs(
kwargs, ('singular_values', None),
)
if len(shape) <= 1:
raise ValueError(
'shape {} is invalid for matrices: too few axes'.format(shape)
)
k_shape = shape[:-2] + (min(shape[-2:]),)
# TODO(beam2d): consider supporting integer/boolean matrices
dtype = numpy.dtype(dtype)
if dtype.kind not in 'fc':
raise ValueError('dtype {} is not supported'.format(dtype))
if singular_values is None:
raise TypeError('singular_values is not given')
singular_values = numpy.asarray(singular_values)
if (singular_values < 0).any():
raise ValueError('negative singular value is given')
# Generate random matrices with given singular values. We simply generate
# orthogonal vectors using SVD on random matrices and then combine them
# with the given singular values.
a = numpy.random.randn(*shape)
if dtype.kind == 'c':
a = a + 1j * numpy.random.randn(*shape)
u, _, vh = numpy.linalg.svd(a, full_matrices=False)
a = numpy.einsum('...ik,...k,...kj->...ij', u, singular_values, vh)
return a.astype(dtype)
|
43,576 | def strong_ent_layers_uniform(n_layers, n_wires, low=0, high=2 * pi, seed=None):
r"""Creates an initial parameter array for :func:`~.StronglyEntanglingLayers`, drawn from a uniform
distribution.
The shape of the parameter array is ``(n_layers, n_wires, 3)`` and each parameter is drawn uniformly at random \
from between ``low`` and ``high``. The parameters define the three rotation angles
applied in each layer.
Args:
n_layers (int): number of layers
n_wires (int): number of qubits
Keyword Args:
low (float): minimum value of uniform distribution
high (float): maximum value of uniform distribution
seed (int): seed used in sampling the parameters, makes function call deterministic
Returns:
parameter array
"""
if seed is not None:
np.random.seed(seed)
params = np.random.uniform(low=low, high=high, size=(n_layers, n_wires, 3))
return params
| def strong_ent_layers_uniform(n_layers, n_wires, low=0, high=2 * pi, seed=None):
r"""Creates an initial parameter array for :func:`~.StronglyEntanglingLayers`, drawn from a uniform
distribution.
The shape of the parameter array is ``(n_layers, n_wires, 3)`` and each parameter is drawn uniformly at random \
from between ``low`` and ``high``. The parameters define the three rotation angles
applied in each layer.
Args:
n_layers (int): number of layers
n_wires (int): number of qubits
Keyword Args:
low (float): minimum value of uniform distribution
high (float): maximum value of uniform distribution
seed (int): seed used in sampling the parameters, makes function call deterministic
Returns:
array: parameter array
"""
if seed is not None:
np.random.seed(seed)
params = np.random.uniform(low=low, high=high, size=(n_layers, n_wires, 3))
return params
|
12,698 | def test_handle_extras(empty_pyproject_toml: PyProjectToml) -> None:
# Tested with git/path/url in those respective tests.
attr = {"version": "1.0.0", "extras": "[extra1]"}
assert handle_dict_attr("requests", attr, empty_pyproject_toml) == "requests[extra1] ==1.0.0"
attr_multi = {"version": "1.0.0", "extras": ["extra1", "extra2", "extra3"]}
assert (
handle_dict_attr("requests", attr_multi, empty_pyproject_toml)
== "requests[extra1,extra2,extra3] ==1.0.0"
)
| def test_handle_extras(empty_pyproject_toml: PyProjectToml) -> None:
# This is also tested with git/path/url in those respective tests.
attr = {"version": "1.0.0", "extras": "[extra1]"}
assert handle_dict_attr("requests", attr, empty_pyproject_toml) == "requests[extra1] ==1.0.0"
attr_multi = {"version": "1.0.0", "extras": ["extra1", "extra2", "extra3"]}
assert (
handle_dict_attr("requests", attr_multi, empty_pyproject_toml)
== "requests[extra1,extra2,extra3] ==1.0.0"
)
|
23,279 | def min_len(length):
"""
A validator that raises `ValueError` if the initializer is called
with a string or iterable that is shorter than *length*.
:param int length: Minimum length of the string or iterable
.. versionadded:: 21.5.0
"""
return _MinLengthValidator(length)
| def min_len(length):
"""
A validator that raises `ValueError` if the initializer is called
with a string or iterable that is shorter than *length*.
:param int length: Minimum length of the string or iterable
.. versionadded:: 22.1.0
"""
return _MinLengthValidator(length)
|
40,371 | def bipartite_subgraph(subset: Union[PairTensor, Tuple[List[int], List[int]]],
edge_index: Tensor, edge_attr: Optional[Tensor] = None,
relabel_nodes: bool = False,
num_nodes: Tuple[int, int] = None,
return_edge_mask: bool = False):
r"""Returns the induced subgraph of :obj:`(edge_index, edge_attr)`
containing the nodes in :obj:`subset`, for a bipartite graph.
Args:
subset (PairTensor or tuple([int],[int])): The nodes to keep.
edge_index (LongTensor): The edge indices.
edge_attr (Tensor, optional): Edge weights or multi-dimensional
edge features. (default: :obj:`None`)
relabel_nodes (bool, optional): If set to :obj:`True`, the resulting
:obj:`edge_index` will be relabeled to hold consecutive indices
starting from zero. (default: :obj:`False`)
num_nodes (tuple, optional): The number of nodes.
(default: :obj:`None`)
return_edge_mask (bool, optional): If set to :obj:`True`, will return
the edge mask to filter out additional edge features.
(default: :obj:`False`)
:rtype: (:class:`LongTensor`, :class:`Tensor`)
"""
device = edge_index.device
if isinstance(subset[0], (list, tuple)):
subset = (torch.tensor(subset[0], dtype=torch.long, device=device),
torch.tensor(subset[1], dtype=torch.long, device=device))
if subset[0].dtype == torch.bool or subset[0].dtype == torch.uint8:
num_nodes = subset[0].size(0), subset[1].size(0)
else:
if num_nodes is None:
num_nodes = (maybe_num_nodes(edge_index[0]),
maybe_num_nodes(edge_index[1]))
subset = (index_to_mask(subset[0], size=num_nodes[0]),
index_to_mask(subset[1], size=num_nodes[1]))
node_mask_i, node_mask_j = subset[0], subset[1]
edge_mask = node_mask_i[edge_index[0]] & node_mask_j[edge_index[1]]
edge_index = edge_index[:, edge_mask]
edge_attr = edge_attr[edge_mask] if edge_attr is not None else None
if relabel_nodes:
node_idx_i = torch.zeros(node_mask_i.size(0), dtype=torch.long,
device=device)
node_idx_j = torch.zeros(node_mask_j.size(0), dtype=torch.long,
device=device)
node_idx_i[subset[0]] = torch.arange(subset[0].sum().item(),
device=device)
node_idx_j[subset[1]] = torch.arange(subset[1].sum().item(),
device=device)
edge_index = torch.stack(
[node_idx_i[edge_index[0]], node_idx_j[edge_index[1]]])
if return_edge_mask:
return edge_index, edge_attr, edge_mask
else:
return edge_index, edge_attr
| def bipartite_subgraph(subset: Union[PairTensor, Tuple[List[int], List[int]]],
edge_index: Tensor, edge_attr: Optional[Tensor] = None,
relabel_nodes: bool = False,
num_nodes: Tuple[int, int] = None,
return_edge_mask: bool = False):
r"""Returns the induced subgraph of the bipartite graph :obj:`(edge_index, edge_attr)`
containing the nodes in :obj:`subset`, for a bipartite graph.
Args:
subset (PairTensor or tuple([int],[int])): The nodes to keep.
edge_index (LongTensor): The edge indices.
edge_attr (Tensor, optional): Edge weights or multi-dimensional
edge features. (default: :obj:`None`)
relabel_nodes (bool, optional): If set to :obj:`True`, the resulting
:obj:`edge_index` will be relabeled to hold consecutive indices
starting from zero. (default: :obj:`False`)
num_nodes (tuple, optional): The number of nodes.
(default: :obj:`None`)
return_edge_mask (bool, optional): If set to :obj:`True`, will return
the edge mask to filter out additional edge features.
(default: :obj:`False`)
:rtype: (:class:`LongTensor`, :class:`Tensor`)
"""
device = edge_index.device
if isinstance(subset[0], (list, tuple)):
subset = (torch.tensor(subset[0], dtype=torch.long, device=device),
torch.tensor(subset[1], dtype=torch.long, device=device))
if subset[0].dtype == torch.bool or subset[0].dtype == torch.uint8:
num_nodes = subset[0].size(0), subset[1].size(0)
else:
if num_nodes is None:
num_nodes = (maybe_num_nodes(edge_index[0]),
maybe_num_nodes(edge_index[1]))
subset = (index_to_mask(subset[0], size=num_nodes[0]),
index_to_mask(subset[1], size=num_nodes[1]))
node_mask_i, node_mask_j = subset[0], subset[1]
edge_mask = node_mask_i[edge_index[0]] & node_mask_j[edge_index[1]]
edge_index = edge_index[:, edge_mask]
edge_attr = edge_attr[edge_mask] if edge_attr is not None else None
if relabel_nodes:
node_idx_i = torch.zeros(node_mask_i.size(0), dtype=torch.long,
device=device)
node_idx_j = torch.zeros(node_mask_j.size(0), dtype=torch.long,
device=device)
node_idx_i[subset[0]] = torch.arange(subset[0].sum().item(),
device=device)
node_idx_j[subset[1]] = torch.arange(subset[1].sum().item(),
device=device)
edge_index = torch.stack(
[node_idx_i[edge_index[0]], node_idx_j[edge_index[1]]])
if return_edge_mask:
return edge_index, edge_attr, edge_mask
else:
return edge_index, edge_attr
|
29,678 | def get_backend(scheme: str, require: object = None) -> Backend:
"""
Get the Backend instance for the given *scheme*.
It looks for matching scheme in dask's internal cache, and falls-back to
package metadata for the group name ``distributed.comm.backends``
Parameters
----------
require : object
Deprecated, previously verified that the backends requirements are
properly installed.
"""
backend = backends.get(scheme)
if backend is not None:
return backend
for backend_class_ep in _entry_points(
name=scheme, group="distributed.comm.backends"
):
backend = backend_class_ep.load()()
backends[scheme] = backend
return backend
raise ValueError(
"unknown address scheme %r (known schemes: %s)" % (scheme, sorted(backends))
)
| def get_backend(scheme: str, require: object = None) -> Backend:
"""
Get the Backend instance for the given *scheme*.
It looks for matching scheme in dask's internal cache, and falls-back to
package metadata for the group name ``distributed.comm.backends``
Parameters
----------
require : object
Deprecated, previously verified that the backends requirements are
properly installed.
"""
backend = backends.get(scheme)
if backend is not None:
return backend
for backend_class_ep in _entry_points(
name=scheme, group="distributed.comm.backends"
):
backend = backend_class_ep.load()()
backends[scheme] = backend
return backend
raise ValueError(
f"unknown address scheme {scheme!r} (known schemes: {sorted(backends)})"
)
|
31,560 | def main():
params = demisto.params()
aws_default_region = params.get('defaultRegion')
aws_role_arn = params.get('roleArn')
aws_role_session_name = params.get('roleSessionName')
aws_role_session_duration = params.get('sessionDuration')
aws_role_policy = None
aws_access_key_id = params.get('access_key')
aws_secret_access_key = params.get('secret_key')
verify_certificate = not params.get('insecure', True)
timeout = demisto.params().get('timeout')
retries = demisto.params().get('retries') or 5
validate_params(aws_default_region, aws_role_arn, aws_role_session_name, aws_access_key_id,
aws_secret_access_key)
aws_client = AWSClient(aws_default_region, aws_role_arn, aws_role_session_name, aws_role_session_duration,
aws_role_policy, aws_access_key_id, aws_secret_access_key, verify_certificate, timeout,
retries)
command = demisto.command()
args = demisto.args()
try:
LOG('Command being called is {command}'.format(command=demisto.command()))
if command == 'test-module':
test_function(aws_client)
elif command == 'aws-iam-create-user':
create_user(args, aws_client)
elif command == 'aws-iam-create-login-profile':
create_login_profile(args, aws_client)
elif command == 'aws-iam-get-user':
get_user(args, aws_client)
elif command == 'aws-iam-list-users':
list_users(args, aws_client)
elif command == 'aws-iam-update-user':
update_user(args, aws_client)
elif command == 'aws-iam-delete-user':
delete_user(args, aws_client)
elif command == 'aws-iam-update-login-profile':
update_login_profile(args, aws_client)
elif command == 'aws-iam-create-group':
create_group(args, aws_client)
elif command == 'aws-iam-list-groups':
list_groups(args, aws_client)
elif command == 'aws-iam-list-groups-for-user':
list_groups_for_user(args, aws_client)
elif command == 'aws-iam-create-access-key':
create_access_key(args, aws_client)
elif command == 'aws-iam-update-access-key':
update_access_key(args, aws_client)
elif command == 'aws-iam-list-access-keys-for-user':
list_access_key_for_user(args, aws_client)
elif command == 'aws-iam-list-policies':
list_policies(args, aws_client)
elif command == 'aws-iam-list-roles':
list_roles(args, aws_client)
elif command == 'aws-iam-attach-policy':
attach_policy(args, aws_client)
elif command == 'aws-iam-detach-policy':
detach_policy(args, aws_client)
elif command == 'aws-iam-delete-login-profile':
delete_login_profile(args, aws_client)
elif command == 'aws-iam-add-user-to-group':
add_user_to_group(args, aws_client)
elif command == 'aws-iam-delete-group':
delete_group(args, aws_client)
elif command == 'aws-iam-remove-user-from-group':
remove_user_from_group(args, aws_client)
elif command == 'aws-iam-delete-access-key':
delete_access_key(args, aws_client)
elif command == 'aws-iam-create-instance-profile':
create_instance_profile(args, aws_client)
elif command == 'aws-iam-delete-instance-profile':
delete_instance_profile(args, aws_client)
elif command == 'aws-iam-list-instance-profiles':
list_instance_profiles(args, aws_client)
elif command == 'aws-iam-add-role-to-instance-profile':
add_role_to_instance_profile(args, aws_client)
elif command == 'aws-iam-remove-role-from-instance-profile':
remove_role_from_instance_profile(args, aws_client)
elif command == 'aws-iam-list-instance-profiles-for-role':
list_instance_profiles_for_role(args, aws_client)
elif command == 'aws-iam-get-instance-profile':
get_instance_profile(args, aws_client)
elif command == 'aws-iam-get-role':
get_role(args, aws_client)
elif command == 'aws-iam-delete-role':
delete_role(args, aws_client)
elif command == 'aws-iam-create-role':
create_role(args, aws_client)
elif command == 'aws-iam-create-policy':
create_policy(args, aws_client)
elif command == 'aws-iam-delete-policy':
delete_policy(args, aws_client)
elif command == 'aws-iam-create-policy-version':
create_policy_version(args, aws_client)
elif command == 'aws-iam-delete-policy-version':
delete_policy_version(args, aws_client)
elif command == 'aws-iam-list-policy-versions':
list_policy_versions(args, aws_client)
elif command == 'aws-iam-get-policy-version':
get_policy_version(args, aws_client)
elif command == 'aws-iam-set-default-policy-version':
set_default_policy_version(args, aws_client)
elif command == 'aws-iam-create-account-alias':
create_account_alias(args, aws_client)
elif command == 'aws-iam-delete-account-alias':
delete_account_alias(args, aws_client)
elif command == 'aws-iam-get-account-password-policy':
get_account_password_policy(args, aws_client)
elif command == 'aws-iam-update-account-password-policy':
update_account_password_policy(args, aws_client)
except Exception as e:
LOG(str(e))
return_error('Error has occurred in the AWS IAM Integration: {code}\n {message}'.format(
code=type(e), message=str(e)))
| def main():
params = demisto.params()
aws_default_region = params.get('defaultRegion')
aws_role_arn = params.get('roleArn')
aws_role_session_name = params.get('roleSessionName')
aws_role_session_duration = params.get('sessionDuration')
aws_role_policy = None
aws_access_key_id = params.get('access_key')
aws_secret_access_key = params.get('secret_key')
verify_certificate = not params.get('insecure', True)
timeout = demisto.params().get('timeout')
retries = demisto.params().get('retries') or 5
validate_params(aws_default_region, aws_role_arn, aws_role_session_name, aws_access_key_id,
aws_secret_access_key)
aws_client = AWSClient(aws_default_region, aws_role_arn, aws_role_session_name, aws_role_session_duration,
aws_role_policy, aws_access_key_id, aws_secret_access_key, verify_certificate, timeout,
retries)
command = demisto.command()
args = demisto.args()
try:
LOG('Command being called is {command}'.format(command=command))
if command == 'test-module':
test_function(aws_client)
elif command == 'aws-iam-create-user':
create_user(args, aws_client)
elif command == 'aws-iam-create-login-profile':
create_login_profile(args, aws_client)
elif command == 'aws-iam-get-user':
get_user(args, aws_client)
elif command == 'aws-iam-list-users':
list_users(args, aws_client)
elif command == 'aws-iam-update-user':
update_user(args, aws_client)
elif command == 'aws-iam-delete-user':
delete_user(args, aws_client)
elif command == 'aws-iam-update-login-profile':
update_login_profile(args, aws_client)
elif command == 'aws-iam-create-group':
create_group(args, aws_client)
elif command == 'aws-iam-list-groups':
list_groups(args, aws_client)
elif command == 'aws-iam-list-groups-for-user':
list_groups_for_user(args, aws_client)
elif command == 'aws-iam-create-access-key':
create_access_key(args, aws_client)
elif command == 'aws-iam-update-access-key':
update_access_key(args, aws_client)
elif command == 'aws-iam-list-access-keys-for-user':
list_access_key_for_user(args, aws_client)
elif command == 'aws-iam-list-policies':
list_policies(args, aws_client)
elif command == 'aws-iam-list-roles':
list_roles(args, aws_client)
elif command == 'aws-iam-attach-policy':
attach_policy(args, aws_client)
elif command == 'aws-iam-detach-policy':
detach_policy(args, aws_client)
elif command == 'aws-iam-delete-login-profile':
delete_login_profile(args, aws_client)
elif command == 'aws-iam-add-user-to-group':
add_user_to_group(args, aws_client)
elif command == 'aws-iam-delete-group':
delete_group(args, aws_client)
elif command == 'aws-iam-remove-user-from-group':
remove_user_from_group(args, aws_client)
elif command == 'aws-iam-delete-access-key':
delete_access_key(args, aws_client)
elif command == 'aws-iam-create-instance-profile':
create_instance_profile(args, aws_client)
elif command == 'aws-iam-delete-instance-profile':
delete_instance_profile(args, aws_client)
elif command == 'aws-iam-list-instance-profiles':
list_instance_profiles(args, aws_client)
elif command == 'aws-iam-add-role-to-instance-profile':
add_role_to_instance_profile(args, aws_client)
elif command == 'aws-iam-remove-role-from-instance-profile':
remove_role_from_instance_profile(args, aws_client)
elif command == 'aws-iam-list-instance-profiles-for-role':
list_instance_profiles_for_role(args, aws_client)
elif command == 'aws-iam-get-instance-profile':
get_instance_profile(args, aws_client)
elif command == 'aws-iam-get-role':
get_role(args, aws_client)
elif command == 'aws-iam-delete-role':
delete_role(args, aws_client)
elif command == 'aws-iam-create-role':
create_role(args, aws_client)
elif command == 'aws-iam-create-policy':
create_policy(args, aws_client)
elif command == 'aws-iam-delete-policy':
delete_policy(args, aws_client)
elif command == 'aws-iam-create-policy-version':
create_policy_version(args, aws_client)
elif command == 'aws-iam-delete-policy-version':
delete_policy_version(args, aws_client)
elif command == 'aws-iam-list-policy-versions':
list_policy_versions(args, aws_client)
elif command == 'aws-iam-get-policy-version':
get_policy_version(args, aws_client)
elif command == 'aws-iam-set-default-policy-version':
set_default_policy_version(args, aws_client)
elif command == 'aws-iam-create-account-alias':
create_account_alias(args, aws_client)
elif command == 'aws-iam-delete-account-alias':
delete_account_alias(args, aws_client)
elif command == 'aws-iam-get-account-password-policy':
get_account_password_policy(args, aws_client)
elif command == 'aws-iam-update-account-password-policy':
update_account_password_policy(args, aws_client)
except Exception as e:
LOG(str(e))
return_error('Error has occurred in the AWS IAM Integration: {code}\n {message}'.format(
code=type(e), message=str(e)))
|
58,327 | def rk4(f, x, t, dt, stages=4, s=0.0):
"""Runge-Kutta (explicit, non-adaptive) numerical (S)ODE solvers.
The rule has strong / weak convergence order 1.0 for generic SDEs and order 4.0
convergence for ODEs when stages=4. For stages=1, this becomes the Euler-Maruyama
schemefor SDEs (s > 0.0) with strong / weak convergence order 1.0 for SDEs with
additive noise as defined in the below. See `bib.grudzien2020numerical`.
Parameters
----------
f : function
The time derivative of the dynamical system. Must be of the form `f(t, x)`
x : ndarray or float
State vector of the forcing term
t : float
Starting time of the integration
dt : float
Integration time step.
stages : int, optional
The number of stages of the RK method. Default: 4. When stages=1, this becomes
Euler / Euler-Maruyama.
s : float
The diffusion coeffient for models with additive noise. Default: 0 for
deterministic integration.
Returns
-------
ndarray
State vector at the new time, `t+dt`
"""
if s > 0.0:
# non-trivial diffusion, this defines the SDE integration with additive noise
# generate perturbation for Brownian motion
dims = np.shape(x)
if len(dims) > 1:
N_e, N_x , = dims
W = np.sqrt(dt) * np.random.standard_normal(N_e, N_x)
else:
N_x , = dims
W = np.sqrt(dt) * np.random.standard_normal(N_x)
if stages >=1: k1 = dt * f(t , x) + s * W # noqa
if stages >=2: k2 = dt * f(t+dt/2.0, x+k1/2.0) + s * W # noqa
if stages ==3: k3 = dt * f(t+dt , x+k2*2.0-k1) + s * W # noqa
if stages ==4: # noqa
k3 = dt * f(t+dt/2.0, x+k2/2.0) + s * W # noqa
k4 = dt * f(t+dt , x+k3) + s * W # noqa
if stages ==1: return x + k1 # noqa
elif stages ==2: return x + k2 # noqa
elif stages ==3: return x + (k1 + 4.0*k2 + k3)/6.0 # noqa
elif stages ==4: return x + (k1 + 2.0*(k2 + k3) + k4)/6.0 # noqa
else: raise NotImplementedError # noqa
else:
# deterministic integration
if stages >=1: k1 = dt * f(t , x) # noqa
if stages >=2: k2 = dt * f(t+dt/2.0, x+k1/2.0) # noqa
if stages ==3: k3 = dt * f(t+dt , x+k2*2.0-k1) # noqa
if stages ==4: # noqa
k3 = dt * f(t+dt/2.0, x+k2/2.0) # noqa
k4 = dt * f(t+dt , x+k3) # noqa
if stages ==1: return x + k1 # noqa
elif stages ==2: return x + k2 # noqa
elif stages ==3: return x + (k1 + 4.0*k2 + k3)/6.0 # noqa
elif stages ==4: return x + (k1 + 2.0*(k2 + k3) + k4)/6.0 # noqa
else: raise NotImplementedError # noqa
# fmt: on
| def rk4(f, x, t, dt, stages=4, s=0.0):
"""Runge-Kutta (explicit, non-adaptive) numerical (S)ODE solvers.
This scheme has order-4 convergence for ODEs when `stages=4`.
For SDEs with additive noise as defined below, it has strong / weak convergence
of order 1.0 (for any value of `stages`). See `bib.grudzien2020numerical`.
Parameters
----------
f : function
The time derivative of the dynamical system. Must be of the form `f(t, x)`
x : ndarray or float
State vector of the forcing term
t : float
Starting time of the integration
dt : float
Integration time step.
stages : int, optional
The number of stages of the RK method. Default: 4. When stages=1, this becomes
Euler / Euler-Maruyama.
s : float
The diffusion coeffient for models with additive noise. Default: 0 for
deterministic integration.
Returns
-------
ndarray
State vector at the new time, `t+dt`
"""
if s > 0.0:
# non-trivial diffusion, this defines the SDE integration with additive noise
# generate perturbation for Brownian motion
dims = np.shape(x)
if len(dims) > 1:
N_e, N_x , = dims
W = np.sqrt(dt) * np.random.standard_normal(N_e, N_x)
else:
N_x , = dims
W = np.sqrt(dt) * np.random.standard_normal(N_x)
if stages >=1: k1 = dt * f(t , x) + s * W # noqa
if stages >=2: k2 = dt * f(t+dt/2.0, x+k1/2.0) + s * W # noqa
if stages ==3: k3 = dt * f(t+dt , x+k2*2.0-k1) + s * W # noqa
if stages ==4: # noqa
k3 = dt * f(t+dt/2.0, x+k2/2.0) + s * W # noqa
k4 = dt * f(t+dt , x+k3) + s * W # noqa
if stages ==1: return x + k1 # noqa
elif stages ==2: return x + k2 # noqa
elif stages ==3: return x + (k1 + 4.0*k2 + k3)/6.0 # noqa
elif stages ==4: return x + (k1 + 2.0*(k2 + k3) + k4)/6.0 # noqa
else: raise NotImplementedError # noqa
else:
# deterministic integration
if stages >=1: k1 = dt * f(t , x) # noqa
if stages >=2: k2 = dt * f(t+dt/2.0, x+k1/2.0) # noqa
if stages ==3: k3 = dt * f(t+dt , x+k2*2.0-k1) # noqa
if stages ==4: # noqa
k3 = dt * f(t+dt/2.0, x+k2/2.0) # noqa
k4 = dt * f(t+dt , x+k3) # noqa
if stages ==1: return x + k1 # noqa
elif stages ==2: return x + k2 # noqa
elif stages ==3: return x + (k1 + 4.0*k2 + k3)/6.0 # noqa
elif stages ==4: return x + (k1 + 2.0*(k2 + k3) + k4)/6.0 # noqa
else: raise NotImplementedError # noqa
# fmt: on
|
57,606 | def fetch_consumption(zone_key="CA-QC", session=None, logger=None):
data = _fetch_quebec_consumption()
for elem in reversed(data["details"]):
if "demandeTotal" in elem["valeurs"]:
return {
"zoneKey": zone_key,
"datetime": elem["date"],
"consumption": elem["valeurs"]["demandeTotal"],
"source": "hydroquebec.com",
}
| def fetch_consumption(
zone_key="CA-QC", session=None, target_datetime=None, logger=None
):
data = _fetch_quebec_consumption()
for elem in reversed(data["details"]):
if "demandeTotal" in elem["valeurs"]:
return {
"zoneKey": zone_key,
"datetime": elem["date"],
"consumption": elem["valeurs"]["demandeTotal"],
"source": "hydroquebec.com",
}
|
10,826 | def _rewrite_function(function):
# Mark NRT usage
markpass = _MarkNrtCallVisitor()
markpass.visit_Function(function)
marked = markpass.marked
# Remove NRT usage
for bb in function.basic_blocks:
for inst in list(bb.instructions):
if inst in marked:
bb.instructions.remove(inst)
| def _rewrite_function(function):
# Mark NRT usage
markpass = _MarkNrtCallVisitor()
markpass.visit_Function(function)
marked = markpass.marked
# Remove NRT usage
for bb in function.basic_blocks:
for inst in list(bb.instructions):
if inst in markpass.marked:
bb.instructions.remove(inst)
|
32,583 | def test_MattermostAskUser(mocker):
"""
Given:
- The script args.
When:
- Running the MattermostAskUser command.
Then:
- Validating the results after manipulating the data and execute the 'send-notification' command.
"""
from MattermostAskUser import main
mocker.patch.object(demisto, 'args', return_value={'message': 'message', 'persistent': 'persistent',
'replyEntriesTag': 'replyEntriesTag',
'option1': {'no'}, 'option2': {'yes'},
'task': 'none', 'user': {'emai'}})
execute_command_add_entitlement_res = [{'Type': 1, 'Contents': 'some-guid'}]
execute_command_send_notification_res = [{'Type': 1, 'HumanReadable': 'Message sent to Slack successfully.'
' \nThread ID is: 1660645689.649679'}]
execute_mock = mocker.patch.object(demisto, 'executeCommand', side_effect=[execute_command_add_entitlement_res,
execute_command_send_notification_res])
results_mock = mocker.patch.object(demisto, 'results')
main()
assert execute_mock.call_count == 2
results_mock.assert_called_once()
results = results_mock.call_args[0][0]
assert results == execute_command_send_notification_res
| def test_MattermostAskUser(mocker):
"""
Given:
- The script args.
When:
- Running the MattermostAskUser command.
Then:
- Validating the results after manipulating the data and execute the 'send-notification' command.
"""
from MattermostAskUser import main
mocker.patch.object(demisto, 'args', return_value={'message': 'message', 'persistent': 'persistent',
'replyEntriesTag': 'replyEntriesTag',
'option1': {'no'}, 'option2': {'yes'},
'task': 'none', 'user': {'emai'}})
execute_command_add_entitlement_res = [{'Type': 1, 'Contents': 'some-guid'}]
execute_command_send_notification_res = [{'Type': EntryType.NOTE, 'HumanReadable': 'Message sent to Slack successfully.'
' \nThread ID is: 1660645689.649679'}]
execute_mock = mocker.patch.object(demisto, 'executeCommand', side_effect=[execute_command_add_entitlement_res,
execute_command_send_notification_res])
results_mock = mocker.patch.object(demisto, 'results')
main()
assert execute_mock.call_count == 2
results_mock.assert_called_once()
results = results_mock.call_args[0][0]
assert results == execute_command_send_notification_res
|
12,975 | def resolve_product_variants(
info, access_to_all, requestor, ids=None, channel_slug=None
) -> ChannelQsContext:
visible_products = models.Product.objects.visible_to_user(requestor, channel_slug)
if not access_to_all:
visible_products = visible_products.annotate_visible_in_listings(
channel_slug
).exclude(visible_in_listings=False)
qs = models.ProductVariant.objects.filter(product__id__in=visible_products)
if ids:
db_ids = [get_database_id(info, node_id, "ProductVariant") for node_id in ids]
qs = qs.filter(pk__in=db_ids)
return ChannelQsContext(qs=qs, channel_slug=channel_slug)
| def resolve_product_variants(
info, requestor_has_access_to_all, requestor, ids=None, channel_slug=None
) -> ChannelQsContext:
visible_products = models.Product.objects.visible_to_user(requestor, channel_slug)
if not access_to_all:
visible_products = visible_products.annotate_visible_in_listings(
channel_slug
).exclude(visible_in_listings=False)
qs = models.ProductVariant.objects.filter(product__id__in=visible_products)
if ids:
db_ids = [get_database_id(info, node_id, "ProductVariant") for node_id in ids]
qs = qs.filter(pk__in=db_ids)
return ChannelQsContext(qs=qs, channel_slug=channel_slug)
|
14,016 | def clip_shp(shp, clip_obj):
"""Clip points, lines, or polygon geometries to the clip_obj extent.
Both layers must be in the same Coordinate Reference System (CRS) and will
be clipped to the full extent of the clip object.
If there are multiple polygons in clip_obj,
data from shp will be clipped to the total boundary of
all polygons in clip_obj.
Parameters
----------
shp : GeoDataFrame
Vector layer (point, line, polygon) to be clipped to clip_obj.
clip_obj : GeoDataFrame
Polygon vector layer used to clip shp.
The clip_obj's geometry is dissolved into one geometric feature
and intersected with shp.
Returns
-------
GeoDataFrame
Vector data (points, lines, polygons) from shp clipped to
polygon boundary from clip_obj.
Examples
--------
Clipping points (glacier locations in the state of Colorado) with
a polygon (the boundary of Rocky Mountain National Park):
>>> import geopandas as gpd
>>> import earthpy.clip as cl
>>> from earthpy.io import path_to_example
>>> rmnp = gpd.read_file(path_to_example('rmnp.shp'))
>>> glaciers = gpd.read_file(path_to_example('colorado-glaciers.geojson'))
>>> glaciers.shape
(134, 2)
>>> rmnp_glaciers = cl.clip_shp(glaciers, rmnp)
>>> rmnp_glaciers.shape
(36, 2)
Clipping a line (the Continental Divide Trail) with a
polygon (the boundary of Rocky Mountain National Park):
>>> cdt = gpd.read_file(path_to_example('continental-div-trail.geojson'))
>>> rmnp_cdt_section = cl.clip_shp(cdt, rmnp)
>>> cdt['geometry'].length > rmnp_cdt_section['geometry'].length
0 True
dtype: bool
Clipping a polygon (Colorado counties) with another polygon
(the boundary of Rocky Mountain National Park):
>>> counties = gpd.read_file(path_to_example('colorado-counties.geojson'))
>>> counties.shape
(64, 13)
>>> rmnp_counties = cl.clip_shp(counties, rmnp)
>>> rmnp_counties.shape
(4, 13)
"""
try:
shp.geometry
clip_obj.geometry
except AttributeError:
raise AttributeError(
"Please make sure that your input and clip GeoDataFrames have a"
" valid geometry column"
)
if not any(shp.intersects(clip_obj.unary_union)):
raise ValueError("Shape and crop extent do not overlap.")
if any(shp.geometry.type == "MultiPoint"):
return _clip_multi_point(shp, clip_obj)
elif any(shp.geometry.type == "Point"):
return _clip_points(shp, clip_obj)
elif any(shp.geometry.type == "MultiPolygon") or any(
shp.geometry.type == "MultiLineString"
):
return _clip_multi_poly_line(shp, clip_obj)
else:
return _clip_line_poly(shp, clip_obj)
| def clip(shp, clip_obj):
"""Clip points, lines, or polygon geometries to the clip_obj extent.
Both layers must be in the same Coordinate Reference System (CRS) and will
be clipped to the full extent of the clip object.
If there are multiple polygons in clip_obj,
data from shp will be clipped to the total boundary of
all polygons in clip_obj.
Parameters
----------
shp : GeoDataFrame
Vector layer (point, line, polygon) to be clipped to clip_obj.
clip_obj : GeoDataFrame
Polygon vector layer used to clip shp.
The clip_obj's geometry is dissolved into one geometric feature
and intersected with shp.
Returns
-------
GeoDataFrame
Vector data (points, lines, polygons) from shp clipped to
polygon boundary from clip_obj.
Examples
--------
Clipping points (glacier locations in the state of Colorado) with
a polygon (the boundary of Rocky Mountain National Park):
>>> import geopandas as gpd
>>> import earthpy.clip as cl
>>> from earthpy.io import path_to_example
>>> rmnp = gpd.read_file(path_to_example('rmnp.shp'))
>>> glaciers = gpd.read_file(path_to_example('colorado-glaciers.geojson'))
>>> glaciers.shape
(134, 2)
>>> rmnp_glaciers = cl.clip_shp(glaciers, rmnp)
>>> rmnp_glaciers.shape
(36, 2)
Clipping a line (the Continental Divide Trail) with a
polygon (the boundary of Rocky Mountain National Park):
>>> cdt = gpd.read_file(path_to_example('continental-div-trail.geojson'))
>>> rmnp_cdt_section = cl.clip_shp(cdt, rmnp)
>>> cdt['geometry'].length > rmnp_cdt_section['geometry'].length
0 True
dtype: bool
Clipping a polygon (Colorado counties) with another polygon
(the boundary of Rocky Mountain National Park):
>>> counties = gpd.read_file(path_to_example('colorado-counties.geojson'))
>>> counties.shape
(64, 13)
>>> rmnp_counties = cl.clip_shp(counties, rmnp)
>>> rmnp_counties.shape
(4, 13)
"""
try:
shp.geometry
clip_obj.geometry
except AttributeError:
raise AttributeError(
"Please make sure that your input and clip GeoDataFrames have a"
" valid geometry column"
)
if not any(shp.intersects(clip_obj.unary_union)):
raise ValueError("Shape and crop extent do not overlap.")
if any(shp.geometry.type == "MultiPoint"):
return _clip_multi_point(shp, clip_obj)
elif any(shp.geometry.type == "Point"):
return _clip_points(shp, clip_obj)
elif any(shp.geometry.type == "MultiPolygon") or any(
shp.geometry.type == "MultiLineString"
):
return _clip_multi_poly_line(shp, clip_obj)
else:
return _clip_line_poly(shp, clip_obj)
|
45,567 | def main(args):
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('matplotlib').setLevel(logging.WARN)
utils.configure_colored_logging(args.loglevel)
run(args.actions, args.port, args.cors,)
| def main(args):
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('matplotlib').setLevel(logging.WARN)
utils.configure_colored_logging(args.loglevel)
run(args.actions, args.port, args.cors)
|
45,638 | def create_data(pdb_path):
"""
Function to parse the protein data bank (PDB) file to generate
the input modelData
@param pdb_path
Name of the biomolecular structure file in PDB format
"""
# Create local copy of temp file
copy2(pdb_path, './tmp.pdb')
# Use parmed to read the bond information from temp file
top = pmd.load_file('tmp.pdb')
# Remove the created temp file
os.remove('tmp.pdb')
# Read pdb file to create atom/bond information
with open(pdb_path, 'r') as infile:
# store only non-empty lines
lines = [l.strip() for l in infile if l.strip()]
# Initialize all variables
varNchains = []
serial = []
atmName = []
resName = []
chain = []
resId = []
positions = []
occupancy = []
tempFactor = []
atomType = []
ct = 0
datb = {
'atoms': [],
'bonds': []
}
# Variables that store the character positions of different
# parameters from the molecule PDB file
serialpos = [6, 11]
atmNamepos = [12, 16]
rNamepos = [17, 20]
chainpos = [21, 22]
rIdpos = [22, 26]
xpos = [30, 38]
ypos = [38, 46]
zpos = [46, 54]
occupos = [54, 60]
Bfacpos = [60, 66]
atmTypepos = [77, 79]
for l in lines:
line = l.split()
if "ATOM" in line[0] or "HETATM" in line[0]:
serial.append(int(l[serialpos[0]:serialpos[1]]))
atmName.append(l[atmNamepos[0]:atmNamepos[1]].strip())
val_rName = l[rNamepos[0]:rNamepos[1]].strip()
resName.append(val_rName)
chain_val = l[chainpos[0]:chainpos[1]].strip()
chain.append(chain_val)
if chain_val not in varNchains:
varNchains.append(chain_val)
val_rId = int(l[rIdpos[0]:rIdpos[1]])
resId.append(val_rId)
x = float(l[xpos[0]:xpos[1]])
y = float(l[ypos[0]:ypos[1]])
z = float(l[zpos[0]:zpos[1]])
positions.append([x, y, z])
occupancy.append(l[occupos[0]:occupos[1]].strip())
tempFactor.append(l[Bfacpos[0]:Bfacpos[1]].strip())
atomType.append(l[atmTypepos[0]:atmTypepos[1]].strip())
ct += 1
# Create list of atoms
tmpRes = resId[0]
resct = 1
for i in range(len(chain)):
if tmpRes != resId[i]:
tmpRes = resId[i]
resct += 1
datb['atoms'].append({
"name": atmName[i],
"chain": chain[i],
"positions": positions[i],
"residue_index": resct,
"element": atomType[i],
"residue_name": resName[i]+str(resId[i]),
"serial": i,
})
# Create list of bonds using the parmed module
for i in range(len(top.bonds)):
bondpair = top.bonds[i].__dict__
atom1 = re.findall(r"\[(\d+)\]", str(bondpair['atom1']))
atom2 = re.findall(r"\[(\d+)\]", str(bondpair['atom2']))
datb['bonds'].append({
'atom2_index': int(atom1[0]),
'atom1_index': int(atom2[0])
})
return json.dumps(datb)
| def create_data(pdb_path):
"""
Function to parse the protein data bank (PDB) file to generate
the input modelData
@param pdb_path
# Read PDB file to create atom/bond information
"""
# Create local copy of temp file
copy2(pdb_path, './tmp.pdb')
# Use parmed to read the bond information from temp file
top = pmd.load_file('tmp.pdb')
# Remove the created temp file
os.remove('tmp.pdb')
# Read pdb file to create atom/bond information
with open(pdb_path, 'r') as infile:
# store only non-empty lines
lines = [l.strip() for l in infile if l.strip()]
# Initialize all variables
varNchains = []
serial = []
atmName = []
resName = []
chain = []
resId = []
positions = []
occupancy = []
tempFactor = []
atomType = []
ct = 0
datb = {
'atoms': [],
'bonds': []
}
# Variables that store the character positions of different
# parameters from the molecule PDB file
serialpos = [6, 11]
atmNamepos = [12, 16]
rNamepos = [17, 20]
chainpos = [21, 22]
rIdpos = [22, 26]
xpos = [30, 38]
ypos = [38, 46]
zpos = [46, 54]
occupos = [54, 60]
Bfacpos = [60, 66]
atmTypepos = [77, 79]
for l in lines:
line = l.split()
if "ATOM" in line[0] or "HETATM" in line[0]:
serial.append(int(l[serialpos[0]:serialpos[1]]))
atmName.append(l[atmNamepos[0]:atmNamepos[1]].strip())
val_rName = l[rNamepos[0]:rNamepos[1]].strip()
resName.append(val_rName)
chain_val = l[chainpos[0]:chainpos[1]].strip()
chain.append(chain_val)
if chain_val not in varNchains:
varNchains.append(chain_val)
val_rId = int(l[rIdpos[0]:rIdpos[1]])
resId.append(val_rId)
x = float(l[xpos[0]:xpos[1]])
y = float(l[ypos[0]:ypos[1]])
z = float(l[zpos[0]:zpos[1]])
positions.append([x, y, z])
occupancy.append(l[occupos[0]:occupos[1]].strip())
tempFactor.append(l[Bfacpos[0]:Bfacpos[1]].strip())
atomType.append(l[atmTypepos[0]:atmTypepos[1]].strip())
ct += 1
# Create list of atoms
tmpRes = resId[0]
resct = 1
for i in range(len(chain)):
if tmpRes != resId[i]:
tmpRes = resId[i]
resct += 1
datb['atoms'].append({
"name": atmName[i],
"chain": chain[i],
"positions": positions[i],
"residue_index": resct,
"element": atomType[i],
"residue_name": resName[i]+str(resId[i]),
"serial": i,
})
# Create list of bonds using the parmed module
for i in range(len(top.bonds)):
bondpair = top.bonds[i].__dict__
atom1 = re.findall(r"\[(\d+)\]", str(bondpair['atom1']))
atom2 = re.findall(r"\[(\d+)\]", str(bondpair['atom2']))
datb['bonds'].append({
'atom2_index': int(atom1[0]),
'atom1_index': int(atom2[0])
})
return json.dumps(datb)
|
1,303 | def test_missing_indicator_no_missing():
# check that all features are drop if there are no missing values when
# features='missing-only' (#13491)
X = np.array([[1, 1],
[1, 1]])
mi = MissingIndicator(features='missing-only', missing_values=-1)
Xt = mi.fit_transform(X)
assert Xt.shape[1] == 0
| def test_missing_indicator_no_missing():
# check that all features are dropped if there are no missing values when
# features='missing-only' (#13491)
X = np.array([[1, 1],
[1, 1]])
mi = MissingIndicator(features='missing-only', missing_values=-1)
Xt = mi.fit_transform(X)
assert Xt.shape[1] == 0
|
34,882 | def collect_device_annotation_ops(expr):
"""Collect the device annotation ops for the given expression.
Parameters
----------
expr : tvm.relay.Expr
The input expression.
Returns
-------
ret : Dict[tvm.relay.expr, int]
A dictionary of tvm.relay.expr to device id mapping where the keys are
annotation expressions.
"""
return _ir_pass.CollectDeviceAnnotationOps(expr)
| def collect_device_annotation_ops(expr):
"""Collect the device annotation ops for the given expression.
Parameters
----------
expr : tvm.relay.Expr
The input expression.
Returns
-------
ret : Dict[tvm.relay.expr, int]
A dictionary mapping tvm.relay.Expr to device id where the keys are
annotation expressions.
"""
return _ir_pass.CollectDeviceAnnotationOps(expr)
|
45,929 | def draw_line(
image : torch.Tensor,
p1 : torch.Tensor, p2 : torch.Tensor,
color : torch.Tensor,
) -> torch.Tensor:
r"""Draw a single line into an image.
Args:
image: the input image to where to draw the lines with shape (C,H,W).
p1: the start point of the line with shape (2).
p2: the end point of the line with shape (2).
color: the color of the line with shape (3).
Return:
the image containing the line.
"""
# assign points
x1, y1 = p1
x2, y2 = p2
# calcullate coefficients A,B,C of line
# from equation Ax + By + C = 0
A = y2 - y1
B = x1 - x2
C = x2 * y1 - x1 * y2
# make sure A is positive to utilize the functiom properly
if (A < 0):
A = -A
B = -B
C = -C
# calculate the slope of the line
# check for division by zero
if (B != 0):
m = -A / B
# make sure you start drawing in the right direction
x1, x2 = min(x1, x2), max(x1, x2)
y1, y2 = min(y1, y2), max(y1, y2)
# line equation that determines the distance away from the line
def line_equation(x, y):
return A * x + B * y + C
# vertical line
if B == 0:
image[:, y1:y2 + 1, x1] = color
# horizontal line
elif A == 0:
image[:, y1, x1:x2 + 1] = color
# slope between 0 and 1
elif 0 < m < 1:
for i in range(x1, x2 + 1):
_draw_pixel(image, i, y1, color)
if line_equation(i + 1, y1 + 0.5) > 0:
y1 += 1
# slope greater than or equal to 1
elif m >= 1:
for j in range(y1, y2 + 1):
_draw_pixel(image, x1, j, color)
if line_equation(x1 + 0.5, j + 1) < 0:
x1 += 1
# slope less then -1
elif m <= -1:
for j in range(y1, y2 + 1):
_draw_pixel(image, x2, j, color)
if line_equation(x2 - 0.5, j + 1) > 0:
x2 -= 1
# slope between -1 and 0
elif -1 < m < 0:
for i in range(x1, x2 + 1):
_draw_pixel(image, i, y2, color)
if line_equation(i + 1, y2 - 0.5) > 0:
y2 -= 1
return image
| def draw_line(
image : torch.Tensor,
p1 : torch.Tensor, p2 : torch.Tensor,
color : torch.Tensor,
) -> torch.Tensor:
r"""Draw a single line into an image.
Args:
image: the input image to where to draw the lines with shape (C,H,W).
p1: the start point of the line with shape (2).
p2: the end point [x y] of the line with shape (2).
color: the color of the line with shape (3).
Return:
the image containing the line.
"""
# assign points
x1, y1 = p1
x2, y2 = p2
# calcullate coefficients A,B,C of line
# from equation Ax + By + C = 0
A = y2 - y1
B = x1 - x2
C = x2 * y1 - x1 * y2
# make sure A is positive to utilize the functiom properly
if (A < 0):
A = -A
B = -B
C = -C
# calculate the slope of the line
# check for division by zero
if (B != 0):
m = -A / B
# make sure you start drawing in the right direction
x1, x2 = min(x1, x2), max(x1, x2)
y1, y2 = min(y1, y2), max(y1, y2)
# line equation that determines the distance away from the line
def line_equation(x, y):
return A * x + B * y + C
# vertical line
if B == 0:
image[:, y1:y2 + 1, x1] = color
# horizontal line
elif A == 0:
image[:, y1, x1:x2 + 1] = color
# slope between 0 and 1
elif 0 < m < 1:
for i in range(x1, x2 + 1):
_draw_pixel(image, i, y1, color)
if line_equation(i + 1, y1 + 0.5) > 0:
y1 += 1
# slope greater than or equal to 1
elif m >= 1:
for j in range(y1, y2 + 1):
_draw_pixel(image, x1, j, color)
if line_equation(x1 + 0.5, j + 1) < 0:
x1 += 1
# slope less then -1
elif m <= -1:
for j in range(y1, y2 + 1):
_draw_pixel(image, x2, j, color)
if line_equation(x2 - 0.5, j + 1) > 0:
x2 -= 1
# slope between -1 and 0
elif -1 < m < 0:
for i in range(x1, x2 + 1):
_draw_pixel(image, i, y2, color)
if line_equation(i + 1, y2 - 0.5) > 0:
y2 -= 1
return image
|
48,970 | def _collapse_subpackage_variants(
list_of_metas, root_path, platform, arch, forge_config
):
"""Collapse all subpackage node variants into one aggregate collection of used variables
We get one node per output, but a given recipe can have multiple outputs. Each output
can have its own used_vars, and we must unify all of the used variables for all of the
outputs"""
# things we consider "top-level" are things that we loop over with CI jobs. We don't loop over
# outputs with CI jobs.
top_level_loop_vars = set()
all_used_vars = set()
all_variants = set()
is_noarch = True
for meta in list_of_metas:
all_used_vars.update(meta.get_used_vars())
all_variants.update(
conda_build.utils.HashableDict(v) for v in meta.config.variants
)
all_variants.add(conda_build.utils.HashableDict(meta.config.variant))
if not meta.noarch:
is_noarch = False
top_level_loop_vars = list_of_metas[0].get_used_loop_vars(
force_top_level=True
)
top_level_vars = list_of_metas[0].get_used_vars(force_top_level=True)
if "target_platform" in all_used_vars:
top_level_loop_vars.add("target_platform")
# this is the initial collection of all variants before we discard any. "Squishing"
# them is necessary because the input form is already broken out into one matrix
# configuration per item, and we want a single dict, with each key representing many values
squished_input_variants = (
conda_build.variants.list_of_dicts_to_dict_of_lists(
list_of_metas[0].config.input_variants
)
)
squished_used_variants = (
conda_build.variants.list_of_dicts_to_dict_of_lists(list(all_variants))
)
# these are variables that only occur in the top level, and thus won't show up as loops in the
# above collection of all variants. We need to transfer them from the input_variants.
preserve_top_level_loops = set(top_level_loop_vars) - set(all_used_vars)
# Add in some variables that should always be preserved
always_keep_keys = {
"zip_keys",
"pin_run_as_build",
"MACOSX_DEPLOYMENT_TARGET",
"MACOSX_SDK_VERSION",
"macos_min_version",
"macos_machine",
"channel_sources",
"channel_targets",
"docker_image",
"build_number_decrement",
# The following keys are required for some of our aarch64 builds
# Added in https://github.com/conda-forge/conda-forge-pinning-feedstock/pull/180
"cdt_arch",
"cdt_name",
"BUILD",
}
if not is_noarch:
always_keep_keys.add("target_platform")
all_used_vars.update(always_keep_keys)
all_used_vars.update(top_level_vars)
used_key_values = {
key: squished_input_variants[key]
for key in all_used_vars
if key in squished_input_variants
}
for k, v in squished_used_variants.items():
if k in all_used_vars:
used_key_values[k] = v
for k in preserve_top_level_loops:
used_key_values[k] = squished_input_variants[k]
_trim_unused_zip_keys(used_key_values)
_trim_unused_pin_run_as_build(used_key_values)
# to deduplicate potentially zipped keys, we blow out the collection of variables, then
# do a set operation, then collapse it again
used_key_values = conda_build.variants.dict_of_lists_to_list_of_dicts(
used_key_values
)
used_key_values = {
conda_build.utils.HashableDict(variant) for variant in used_key_values
}
used_key_values = conda_build.variants.list_of_dicts_to_dict_of_lists(
list(used_key_values)
)
_trim_unused_zip_keys(used_key_values)
_trim_unused_pin_run_as_build(used_key_values)
logger.info("top_level_loop_vars {}".format(top_level_loop_vars))
logger.info("used_key_values {}".format(used_key_values))
return (
break_up_top_level_values(top_level_loop_vars, used_key_values),
top_level_loop_vars,
)
| def _collapse_subpackage_variants(
list_of_metas, root_path, platform, arch, forge_config
):
"""Collapse all subpackage node variants into one aggregate collection of used variables
We get one node per output, but a given recipe can have multiple outputs. Each output
can have its own used_vars, and we must unify all of the used variables for all of the
outputs"""
# things we consider "top-level" are things that we loop over with CI jobs. We don't loop over
# outputs with CI jobs.
top_level_loop_vars = set()
all_used_vars = set()
all_variants = set()
is_noarch = True
for meta in list_of_metas:
all_used_vars.update(meta.get_used_vars())
all_variants.update(
conda_build.utils.HashableDict(v) for v in meta.config.variants
)
all_variants.add(conda_build.utils.HashableDict(meta.config.variant))
if not meta.noarch:
is_noarch = False
top_level_loop_vars = list_of_metas[0].get_used_loop_vars(
force_top_level=True
)
top_level_vars = list_of_metas[0].get_used_vars(force_top_level=True)
if "target_platform" in all_used_vars:
top_level_loop_vars.add("target_platform")
# this is the initial collection of all variants before we discard any. "Squishing"
# them is necessary because the input form is already broken out into one matrix
# configuration per item, and we want a single dict, with each key representing many values
squished_input_variants = (
conda_build.variants.list_of_dicts_to_dict_of_lists(
list_of_metas[0].config.input_variants
)
)
squished_used_variants = (
conda_build.variants.list_of_dicts_to_dict_of_lists(list(all_variants))
)
# these are variables that only occur in the top level, and thus won't show up as loops in the
# above collection of all variants. We need to transfer them from the input_variants.
preserve_top_level_loops = set(top_level_loop_vars) - set(all_used_vars)
# Add in some variables that should always be preserved
always_keep_keys = {
"zip_keys",
"pin_run_as_build",
"MACOSX_DEPLOYMENT_TARGET",
"MACOSX_SDK_VERSION",
"macos_min_version",
"macos_machine",
"channel_sources",
"channel_targets",
"docker_image",
"build_number_decrement",
# The following keys are required for some of our aarch64 builds
# Added in https://github.com/conda-forge/conda-forge-pinning-feedstock/pull/180
"cdt_arch",
"cdt_name",
"BUILD",
}
if not is_noarch:
always_keep_keys.add("target_platform")
all_used_vars.update(always_keep_keys)
all_used_vars.update(top_level_vars)
used_key_values = {
key: squished_input_variants[key]
for key in all_used_vars
if key in squished_input_variants
}
for k, v in squished_used_variants.items():
if k in all_used_vars:
used_key_values[k] = v
for k in preserve_top_level_loops:
used_key_values[k] = squished_input_variants[k]
_trim_unused_zip_keys(used_key_values)
_trim_unused_pin_run_as_build(used_key_values)
# to deduplicate potentially zipped keys, we blow out the collection of variables, then
# do a set operation, then collapse it again
used_key_values = conda_build.variants.dict_of_lists_to_list_of_dicts(
used_key_values
)
used_key_values = {
conda_build.utils.HashableDict(variant) for variant in used_key_values
}
used_key_values = conda_build.variants.list_of_dicts_to_dict_of_lists(
list(used_key_values)
)
_trim_unused_zip_keys(used_key_values)
_trim_unused_pin_run_as_build(used_key_values)
logger.debug("top_level_loop_vars {}".format(top_level_loop_vars))
logger.debug("used_key_values {}".format(used_key_values))
return (
break_up_top_level_values(top_level_loop_vars, used_key_values),
top_level_loop_vars,
)
|
32,636 | def fetch_incidents(args: dict, params: dict):
client = aws_session(
region=args.get('region'),
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
last_fetch = demisto.getLastRun()
first_fetch = params.get('first_fetch')
attribute_key = params.get('AttributeKey')
if not attribute_key:
attribute_key = 'EventName'
attribute_value = params.get('AttributeValue')
fetch_limit = int(params.get('fetch_limit'))
if fetch_limit > 50 or fetch_limit <= 0:
fetch_limit = 50
fetch_start_time = calculate_fetch_start_time(last_fetch, first_fetch)
demisto.debug("Fetch start time")
demisto.debug(str(fetch_start_time))
incidents = []
incident_created_time = fetch_start_time
kwargs = {
'LookupAttributes': [{
'AttributeKey': attribute_key,
'AttributeValue': attribute_value
}]
}
kwargs.update({'StartTime': fetch_start_time})
client.lookup_events(**kwargs)
paginator = client.get_paginator('lookup_events')
for response in paginator.paginate(PaginationConfig={'MaxItems': fetch_limit}, **kwargs):
for i, event in enumerate(response['Events']):
incident = {
'EventId': event.get('EventId'),
'Name': event.get('EventName'),
'EventTime': handle_returning_date_to_string(event.get('EventTime', '01-01-01T00:00:00')),
'EventSource': event.get('EventSource'),
'ResourceName': event.get('Resources')[0].get('ResourceName') if event.get('Resources') else None,
'ResourceType': event.get('Resources')[0].get('ResourceType') if event.get('Resources') else None,
'CloudTrailEvent': event.get('CloudTrailEvent'),
'Username': event.get('Username'),
'rawJSON': json.dumps(event, indent=4, sort_keys=True, default=str)
}
incidents.append(incident)
incident_created_time = (event.get('EventTime', '01-01-01T00:00:00') + timedelta(seconds=1)).timestamp()
if incident_created_time > fetch_start_time:
last_fetch = str(incident_created_time)
demisto.setLastRun(last_fetch)
demisto.debug("Last fetch time")
demisto.debug(str(last_fetch))
demisto.incidents(incidents)
| def fetch_incidents(args: dict, params: dict):
client = aws_session(
region=args.get('region'),
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
last_fetch = demisto.getLastRun()
first_fetch = params.get('first_fetch')
attribute_key = params.get('AttributeKey')
if not attribute_key:
attribute_key = 'EventName'
attribute_value = params.get('AttributeValue')
fetch_limit = int(params.get('fetch_limit'))
if fetch_limit > 50 or fetch_limit <= 0:
fetch_limit = 50
fetch_start_time = calculate_fetch_start_time(last_fetch, first_fetch)
demisto.debug("Fetch start time")
demisto.debug(str(fetch_start_time))
incidents = []
incident_created_time = fetch_start_time
kwargs = {
'LookupAttributes': [{
'AttributeKey': attribute_key,
'AttributeValue': attribute_value
}]
}
kwargs.update({'StartTime': fetch_start_time})
client.lookup_events(**kwargs)
paginator = client.get_paginator('lookup_events')
for response in paginator.paginate(PaginationConfig={'MaxItems': fetch_limit}, **kwargs):
for event in response.get('Events', []):
incident = {
'EventId': event.get('EventId'),
'Name': event.get('EventName'),
'EventTime': handle_returning_date_to_string(event.get('EventTime', '01-01-01T00:00:00')),
'EventSource': event.get('EventSource'),
'ResourceName': event.get('Resources')[0].get('ResourceName') if event.get('Resources') else None,
'ResourceType': event.get('Resources')[0].get('ResourceType') if event.get('Resources') else None,
'CloudTrailEvent': event.get('CloudTrailEvent'),
'Username': event.get('Username'),
'rawJSON': json.dumps(event, indent=4, sort_keys=True, default=str)
}
incidents.append(incident)
incident_created_time = (event.get('EventTime', '01-01-01T00:00:00') + timedelta(seconds=1)).timestamp()
if incident_created_time > fetch_start_time:
last_fetch = str(incident_created_time)
demisto.setLastRun(last_fetch)
demisto.debug("Last fetch time")
demisto.debug(str(last_fetch))
demisto.incidents(incidents)
|
23,213 | def install_pakcages_for_ja(app: Sphinx) -> None:
"""Install packages for Japanese."""
if app.config.language == 'ja' and app.config.latex_engine in ('platex', 'uplatex'):
app.add_latex_package('pxjahyper', after_hyperref=True)
| def install_packages_for_ja(app: Sphinx) -> None:
"""Install packages for Japanese."""
if app.config.language == 'ja' and app.config.latex_engine in ('platex', 'uplatex'):
app.add_latex_package('pxjahyper', after_hyperref=True)
|
31,316 | def get_domain_details(client: Client, **args) -> CommandResults:
domain = args.get("domain")
uri = f"/domain/{domain}"
response = client._http_request("GET", uri)
md = ""
current_dns = response["current_dns"]
del response["current_dns"]
md = tableToMarkdown(f"Details for {domain}", response)
for record_type, record_values in current_dns.items():
# If a record type has multiple values, this will output the last item in MD
temp_values = {}
for x in record_values["values"]:
temp_values.update(**x)
record_values.update(temp_values)
del record_values["values"]
md += tableToMarkdown(f"DNS {record_type} Records for {domain}", record_values)
results = CommandResults(
outputs_prefix="SecurityTrails",
outputs_key_field=f"SecurityTrails.domain_details.{domain}",
outputs={domain: {"domain_details": response}},
readable_output=md,
)
return results
| def get_domain_details(client: Client, **args) -> CommandResults:
domain = args.get("domain")
uri = f"/domain/{domain}"
response = client._http_request("GET", uri)
md = ""
current_dns = response.get("current_dns")
del response["current_dns"]
md = tableToMarkdown(f"Details for {domain}", response)
for record_type, record_values in current_dns.items():
# If a record type has multiple values, this will output the last item in MD
temp_values = {}
for x in record_values["values"]:
temp_values.update(**x)
record_values.update(temp_values)
del record_values["values"]
md += tableToMarkdown(f"DNS {record_type} Records for {domain}", record_values)
results = CommandResults(
outputs_prefix="SecurityTrails",
outputs_key_field=f"SecurityTrails.domain_details.{domain}",
outputs={domain: {"domain_details": response}},
readable_output=md,
)
return results
|
55,015 | def derivative(H, x, i, delta=0.005291772):
r"""Compute the derivative :math:`\partial \hat{H}(x)/\partial x_i` of the electronic Hamiltonian
with respect to the :math:`i`-th nuclear coordinate using a central difference approximation.
.. math::
\frac{\partial \hat{H}(x)}{\partial x_i} \approx \frac{\hat{H}(x_i+\delta/2)
- \hat{H}(x_i-\delta/2)}{\delta}
Args:
H (callable): function with signature ``H(x)`` that builds the electronic
Hamiltonian of the molecule for a given set of nuclear coordinates ``x``
x (array[float]): 1D array with the nuclear coordinates given in Angstroms.
The size of the array should be ``3*N`` where ``N`` is the number of atoms
in the molecule.
i (int): index of the nuclear coordinate involved in the derivative
:math:`\partial \hat{H}(x)/\partial x_i`
delta (float): Step size in Angstroms used to displace the nuclear coordinate.
Its default value corresponds to 0.01 Bohr radius.
Returns:
pennylane.Hamiltonian: the derivative of the Hamiltonian
:math:`\partial \hat{H}(x)/\partial x_i`
**Example**
>>> def H(x):
... return qml.qchem.molecular_hamiltonian(['H', 'H'], x)[0]
>>> x = np.array([0., 0., 0.35, 0., 0., -0.35])
>>> print(derivative(H, x, 2))
(-0.7763135743293005) [I0]
+ (-0.08534360840293387) [Z0]
+ (-0.08534360840293387) [Z1]
+ (0.2669341092545041) [Z2]
+ (0.26693410925450134) [Z3]
+ (-0.025233628744274508) [Z0 Z1]
+ (0.0072162443961340415) [Y0 X1 X2 Y3]
+ (-0.0072162443961340415) [Y0 Y1 X2 X3]
+ (-0.0072162443961340415) [X0 X1 Y2 Y3]
+ (0.0072162443961340415) [X0 Y1 Y2 X3]
+ (-0.030654287745411964) [Z0 Z2]
+ (-0.023438043349280003) [Z0 Z3]
+ (-0.023438043349280003) [Z1 Z2]
+ (-0.030654287745411964) [Z1 Z3]
+ (-0.02494407786332001) [Z2 Z3]
"""
to_bohr = 1.8897261254535
# plus
x_plus = x.copy()
x_plus[i] += delta * 0.5
# minus
x_minus = x.copy()
x_minus[i] -= delta * 0.5
return (H(x_plus) - H(x_minus)) * (delta * to_bohr) ** -1
| def derivative(H, x, i, delta=0.005291772):
r"""Compute the derivative :math:`\partial \hat{H}(x)/\partial x_i` of the electronic Hamiltonian
with respect to the :math:`i`-th nuclear coordinate using a central difference approximation.
.. math::
\frac{\partial \hat{H}(x)}{\partial x_i} \approx \frac{\hat{H}(x_i+\delta/2)
- \hat{H}(x_i-\delta/2)}{\delta}
Args:
H (callable): function with signature ``H(x)`` that builds the electronic
Hamiltonian of the molecule for a given set of nuclear coordinates ``x``.
x (array[float]): 1D array with the nuclear coordinates given in Angstroms.
The size of the array should be ``3*N`` where ``N`` is the number of atoms
in the molecule.
i (int): index of the nuclear coordinate involved in the derivative
:math:`\partial \hat{H}(x)/\partial x_i`
delta (float): Step size in Angstroms used to displace the nuclear coordinate.
Its default value corresponds to 0.01 Bohr radius.
Returns:
pennylane.Hamiltonian: the derivative of the Hamiltonian
:math:`\partial \hat{H}(x)/\partial x_i`
**Example**
>>> def H(x):
... return qml.qchem.molecular_hamiltonian(['H', 'H'], x)[0]
>>> x = np.array([0., 0., 0.35, 0., 0., -0.35])
>>> print(derivative(H, x, 2))
(-0.7763135743293005) [I0]
+ (-0.08534360840293387) [Z0]
+ (-0.08534360840293387) [Z1]
+ (0.2669341092545041) [Z2]
+ (0.26693410925450134) [Z3]
+ (-0.025233628744274508) [Z0 Z1]
+ (0.0072162443961340415) [Y0 X1 X2 Y3]
+ (-0.0072162443961340415) [Y0 Y1 X2 X3]
+ (-0.0072162443961340415) [X0 X1 Y2 Y3]
+ (0.0072162443961340415) [X0 Y1 Y2 X3]
+ (-0.030654287745411964) [Z0 Z2]
+ (-0.023438043349280003) [Z0 Z3]
+ (-0.023438043349280003) [Z1 Z2]
+ (-0.030654287745411964) [Z1 Z3]
+ (-0.02494407786332001) [Z2 Z3]
"""
to_bohr = 1.8897261254535
# plus
x_plus = x.copy()
x_plus[i] += delta * 0.5
# minus
x_minus = x.copy()
x_minus[i] -= delta * 0.5
return (H(x_plus) - H(x_minus)) * (delta * to_bohr) ** -1
|
30,959 | def check_if_user_exists_by_samaccountname(default_base_dn, page_size, samaccountname):
query = f'(&(objectClass=User)(objectCategory=person)(samaccountname={samaccountname}))'
entries = search_with_paging(
query,
default_base_dn,
attributes=["samaccountname"],
size_limit=DEFAULT_LIMIT,
page_size=page_size
)
if entries.get('flat'):
return True
return False
| def check_if_user_exists_by_samaccountname(default_base_dn, page_size, samaccountname):
query = f'(&(objectClass=User)(objectCategory=person)(samaccountname={samaccountname}))'
entries = search_with_paging(
query,
default_base_dn,
attributes=["samaccountname"],
size_limit=1,
page_size=page_size
)
if entries.get('flat'):
return True
return False
|
33,707 | def detect_config_single(func):
"""Check if func({}) workers."""
func_sig = inspect.signature(func)
use_config_single = True
try:
func_sig.bind({})
except Exception as e:
logger.debug(str(e))
use_config_single = False
return use_config_single
| def detect_config_single(func):
"""Check if func({}) works."""
func_sig = inspect.signature(func)
use_config_single = True
try:
func_sig.bind({})
except Exception as e:
logger.debug(str(e))
use_config_single = False
return use_config_single
|
5,406 | def wait_for_event(name, id_list, event_id="id", timeout=300, node="master"):
"""
Watch Salt's event bus and block until a condition is met
.. versionadded:: 2014.7.0
name
An event tag to watch for; supports Reactor-style globbing.
id_list
A list of event identifiers to watch for -- usually the minion ID. Each
time an event tag is matched the event data is inspected for
``event_id``, if found it is removed from ``id_list``. When ``id_list``
is empty this function returns success.
event_id : id
The name of a key in the event data. Default is ``id`` for the minion
ID, another common value is ``name`` for use with orchestrating
salt-cloud events.
timeout : 300
The maximum time in seconds to wait before failing.
The following example blocks until all the listed minions complete a
restart and reconnect to the Salt master:
.. code-block:: yaml
reboot_all_minions:
salt.function:
- name: system.reboot
- tgt: '*'
wait_for_reboots:
salt.wait_for_event:
- name: salt/minion/*/start
- id_list:
- jerry
- stuart
- dave
- phil
- kevin
- mike
- require:
- salt: reboot_all_minions
"""
ret = {"name": name, "changes": {}, "comment": "", "result": False}
if __opts__.get("test"):
ret["comment"] = "Orchestration would wait for event '{}'".format(name)
ret["result"] = None
return ret
with salt.utils.event.get_event(
node, __opts__["sock_dir"], __opts__["transport"], opts=__opts__, listen=True
) as sevent:
del_counter = 0
starttime = time.time()
timelimit = starttime + timeout
while True:
event = sevent.get_event(full=True)
is_timedout = time.time() > timelimit
if event is None and not is_timedout:
log.trace("wait_for_event: No event data; waiting.")
continue
elif event is None and is_timedout:
ret["comment"] = "Timeout value reached."
return ret
if fnmatch.fnmatch(event["tag"], name):
val = event["data"].get(event_id)
if val is None and "data" in event["data"]:
val = event["data"]["data"].get(event_id)
if val is not None:
if isinstance(val, list):
val_list = [id for id in id_list if id in val]
if len(val_list) == 0:
log.trace(
"wait_for_event: Event identifier '%s' not in "
"id_list; skipping",
event_id,
)
elif len(val_list) > 0:
minions_seen = ret["changes"].setdefault("minions_seen", [])
for found_val in val_list:
id_list.remove(found_val)
del_counter += 1
minions_seen.append(found_val)
log.debug(
"wait_for_event: Event identifier '%s' removed "
"from id_list; %s items remaining.",
found_val,
len(id_list),
)
else:
try:
val_idx = id_list.index(val)
except ValueError:
log.trace(
"wait_for_event: Event identifier '%s' not in "
"id_list; skipping.",
event_id,
)
else:
del id_list[val_idx]
del_counter += 1
minions_seen = ret["changes"].setdefault("minions_seen", [])
minions_seen.append(val)
log.debug(
"wait_for_event: Event identifier '%s' removed "
"from id_list; %s items remaining.",
val,
len(id_list),
)
else:
log.trace(
"wait_for_event: Event identifier '%s' not in event "
"'%s'; skipping.",
event_id,
event["tag"],
)
else:
log.debug("wait_for_event: Skipping unmatched event '%s'", event["tag"])
if len(id_list) == 0:
ret["result"] = True
ret["comment"] = "All events seen in {} seconds.".format(
time.time() - starttime
)
return ret
if is_timedout:
ret["comment"] = "Timeout value reached."
return ret
| def wait_for_event(name, id_list, event_id="id", timeout=300, node="master"):
"""
Watch Salt's event bus and block until a condition is met
.. versionadded:: 2014.7.0
name
An event tag to watch for; supports Reactor-style globbing.
id_list
A list of event identifiers to watch for -- usually the minion ID. Each
time an event tag is matched the event data is inspected for
``event_id``, if found it is removed from ``id_list``. When ``id_list``
is empty this function returns success.
event_id : id
The name of a key in the event data. Default is ``id`` for the minion
ID, another common value is ``name`` for use with orchestrating
salt-cloud events.
timeout : 300
The maximum time in seconds to wait before failing.
The following example blocks until all the listed minions complete a
restart and reconnect to the Salt master:
.. code-block:: yaml
reboot_all_minions:
salt.function:
- name: system.reboot
- tgt: '*'
wait_for_reboots:
salt.wait_for_event:
- name: salt/minion/*/start
- id_list:
- jerry
- stuart
- dave
- phil
- kevin
- mike
- require:
- salt: reboot_all_minions
"""
ret = {"name": name, "changes": {}, "comment": "", "result": False}
if __opts__.get("test"):
ret["comment"] = "Orchestration would wait for event '{}'".format(name)
ret["result"] = None
return ret
with salt.utils.event.get_event(
node, __opts__["sock_dir"], __opts__["transport"], opts=__opts__, listen=True
) as sevent:
del_counter = 0
starttime = time.time()
timelimit = starttime + timeout
while True:
event = sevent.get_event(full=True)
is_timedout = time.time() > timelimit
if event is None and not is_timedout:
log.trace("wait_for_event: No event data; waiting.")
continue
elif event is None and is_timedout:
ret["comment"] = "Timeout value reached."
return ret
if fnmatch.fnmatch(event["tag"], name):
val = event["data"].get(event_id)
if val is None and "data" in event["data"]:
val = event["data"]["data"].get(event_id)
if val is not None:
if isinstance(val, list):
val_list = [id for id in id_list if id in val]
if len(val_list) == 0:
log.trace(
"wait_for_event: Event identifier '%s' not in "
"id_list; skipping",
event_id,
)
else:
minions_seen = ret["changes"].setdefault("minions_seen", [])
for found_val in val_list:
id_list.remove(found_val)
del_counter += 1
minions_seen.append(found_val)
log.debug(
"wait_for_event: Event identifier '%s' removed "
"from id_list; %s items remaining.",
found_val,
len(id_list),
)
else:
try:
val_idx = id_list.index(val)
except ValueError:
log.trace(
"wait_for_event: Event identifier '%s' not in "
"id_list; skipping.",
event_id,
)
else:
del id_list[val_idx]
del_counter += 1
minions_seen = ret["changes"].setdefault("minions_seen", [])
minions_seen.append(val)
log.debug(
"wait_for_event: Event identifier '%s' removed "
"from id_list; %s items remaining.",
val,
len(id_list),
)
else:
log.trace(
"wait_for_event: Event identifier '%s' not in event "
"'%s'; skipping.",
event_id,
event["tag"],
)
else:
log.debug("wait_for_event: Skipping unmatched event '%s'", event["tag"])
if len(id_list) == 0:
ret["result"] = True
ret["comment"] = "All events seen in {} seconds.".format(
time.time() - starttime
)
return ret
if is_timedout:
ret["comment"] = "Timeout value reached."
return ret
|
44,910 | def test_azure_aws_creds_not_serialized():
azure = storage.Azure(
container="container", connection_string="conn", blob_name="name"
)
serialized = AzureSchema().dump(azure)
assert serialized
assert serialized["__version__"] == prefect.__version__
assert serialized["container"] == "container"
assert serialized["blob_name"] == "name"
assert serialized.get("connection_string") is None
| def test_azure_creds_not_serialized():
azure = storage.Azure(
container="container", connection_string="conn", blob_name="name"
)
serialized = AzureSchema().dump(azure)
assert serialized
assert serialized["__version__"] == prefect.__version__
assert serialized["container"] == "container"
assert serialized["blob_name"] == "name"
assert serialized.get("connection_string") is None
|
7,426 | def local_binary_pattern(image, P, R, method='default'):
"""Gray scale and rotation invariant LBP (Local Binary Patterns).
LBP is an invariant descriptor that can be used for texture classification.
Parameters
----------
image : (N, M) array
Graylevel image.
P : int
Number of circularly symmetric neighbor set points (quantization of
the angular space).
R : float
Radius of circle (spatial resolution of the operator).
method : {'default', 'ror', 'uniform', 'var'}
Method to determine the pattern.
* 'default': original local binary pattern which is gray scale but not
rotation invariant.
* 'ror': extension of default implementation which is gray scale and
rotation invariant.
* 'uniform': improved rotation invariance with uniform patterns and
finer quantization of the angular space which is gray scale and
rotation invariant.
* 'nri_uniform': non rotation-invariant uniform patterns variant
which is only gray scale invariant [2]_, [3]_.
* 'var': rotation invariant variance measures of the contrast of local
image texture which is rotation but not gray scale invariant.
Returns
-------
output : (N, M) array
LBP image.
References
----------
.. [1] T. Ojala, M. Pietikainen, T. Maenpaa, "Multiresolution gray-scale
and rotation invariant texture classification with local binary
patterns", IEEE Transactions on Pattern Analysis and Machine
Intelligence, vol. 24, no. 7, pp. 971-987, July 2002
:DOI:`10.1109/TPAMI.2002.1017623`
.. [2] T. Ahonen, A. Hadid and M. Pietikainen. "Face recognition with
local binary patterns", in Proc. Eighth European Conf. Computer
Vision, Prague, Czech Republic, May 11-14, 2004, pp. 469-481, 2004.
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.214.6851
:DOI:`10.1007/978-3-540-24670-1_36`
.. [3] T. Ahonen, A. Hadid and M. Pietikainen, "Face Description with
Local Binary Patterns: Application to Face Recognition",
IEEE Transactions on Pattern Analysis and Machine Intelligence,
vol. 28, no. 12, pp. 2037-2041, Dec. 2006
:DOI:`10.1109/TPAMI.2006.244.`
"""
check_nD(image, 2)
methods = {
'default': ord('D'),
'ror': ord('R'),
'uniform': ord('U'),
'nri_uniform': ord('N'),
'var': ord('V')
}
if np.issubdtype(image.dtype, np.floating):
warnings.warn(
"Applying `local_binary_pattern` to floating point images may "
"give unexpected results when small numerical differences between "
"adjacent pixels are present. It is recommended to use this "
"function with images having an integer dtype.")
image = np.ascontiguousarray(image, dtype=np.double)
output = _local_binary_pattern(image, P, R, methods[method.lower()])
return output
| def local_binary_pattern(image, P, R, method='default'):
"""Gray scale and rotation invariant LBP (Local Binary Patterns).
LBP is an invariant descriptor that can be used for texture classification.
Parameters
----------
image : (N, M) array
Graylevel image.
P : int
Number of circularly symmetric neighbor set points (quantization of
the angular space).
R : float
Radius of circle (spatial resolution of the operator).
method : {'default', 'ror', 'uniform', 'var'}
Method to determine the pattern.
* 'default': original local binary pattern which is gray scale but not
rotation invariant.
* 'ror': extension of default implementation which is gray scale and
rotation invariant.
* 'uniform': improved rotation invariance with uniform patterns and
finer quantization of the angular space which is gray scale and
rotation invariant.
* 'nri_uniform': non rotation-invariant uniform patterns variant
which is only gray scale invariant [2]_, [3]_.
* 'var': rotation invariant variance measures of the contrast of local
image texture which is rotation but not gray scale invariant.
Returns
-------
output : (N, M) array
LBP image.
References
----------
.. [1] T. Ojala, M. Pietikainen, T. Maenpaa, "Multiresolution gray-scale
and rotation invariant texture classification with local binary
patterns", IEEE Transactions on Pattern Analysis and Machine
Intelligence, vol. 24, no. 7, pp. 971-987, July 2002
:DOI:`10.1109/TPAMI.2002.1017623`
.. [2] T. Ahonen, A. Hadid and M. Pietikainen. "Face recognition with
local binary patterns", in Proc. Eighth European Conf. Computer
Vision, Prague, Czech Republic, May 11-14, 2004, pp. 469-481, 2004.
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.214.6851
:DOI:`10.1007/978-3-540-24670-1_36`
.. [3] T. Ahonen, A. Hadid and M. Pietikainen, "Face Description with
Local Binary Patterns: Application to Face Recognition",
IEEE Transactions on Pattern Analysis and Machine Intelligence,
vol. 28, no. 12, pp. 2037-2041, Dec. 2006
:DOI:`10.1109/TPAMI.2006.244.`
"""
check_nD(image, 2)
methods = {
'default': ord('D'),
'ror': ord('R'),
'uniform': ord('U'),
'nri_uniform': ord('N'),
'var': ord('V')
}
if np.issubdtype(image.dtype, np.floating):
warnings.warn(
"Applying `local_binary_pattern` to floating point images may "
"give unexpected results when small numerical differences between "
"adjacent pixels are present. It is recommended to use this "
"function with images of integer dtype.")
image = np.ascontiguousarray(image, dtype=np.double)
output = _local_binary_pattern(image, P, R, methods[method.lower()])
return output
|
559 | def sync_usercases_if_applicable(user, spawn_task):
if settings.UNIT_TESTING and not user.project:
return
if (user.project.call_center_config.enabled or user.project.usercase_enabled):
if spawn_task:
sync_usercases_task.delay(user._id, user.project)
else:
sync_usercases_task(user._id, user.project)
| def sync_usercases_if_applicable(user, spawn_task):
if settings.UNIT_TESTING and not user.project:
return
if (user.project.call_center_config.enabled or user.project.usercase_enabled):
if spawn_task:
sync_usercases_task.delay(user._id, user.domain)
else:
sync_usercases_task(user._id, user.project)
|
14,071 | def check_equality(left, right, check_less_precise):
if check_less_precise:
if not geom_almost_equals(left, right):
unequal_geoms = left[left.geom_almost_equals(right)]
raise AssertionError(
f"{len(unequal_geoms)} out of {len(left)} geometries"
f" are not almost equal. These geometries are "
f"not almost equal: {unequal_geoms}"
)
else:
if not geom_equals(left, right):
unequal_geoms = left[left.geom_equals(right)]
raise AssertionError(
f"{len(unequal_geoms)} out of {len(left)} geometries"
f" are not almost equal. These geometries are "
f"not almost equal: {unequal_geoms}"
)
| def check_equality(left, right, check_less_precise):
if check_less_precise:
if not geom_almost_equals(left, right):
unequal_geoms = left[left.geom_almost_equals(right)]
raise AssertionError(
f"{len(unequal_geoms)} out of {len(left)} geometries"
f" are not almost equal. These geometries are "
f"not almost equal: {unequal_geoms}"
)
else:
if not geom_equals(left, right):
unequal_geoms = left[left.geom_equals(right)]
raise AssertionError(
f"{len(unequal_geoms)} out of {len(left)} geometries"
f" are not almost equal. These geometries are "
f"not almost equal: \n{unequal_geoms}"
)
|
35,738 | def make_segmentation_masks(
image_sizes=((32, 32), (32, 42)),
dtypes=(torch.long,),
extra_dims=((), (4,)),
):
for image_size, dtype, extra_dims_ in itertools.product(image_sizes, dtypes, extra_dims):
yield make_segmentation_mask(size=image_size, dtype=dtype, extra_dims=extra_dims_)
| def make_segmentation_masks(
image_sizes=((32, 32), (32, 42)),
dtypes=(torch.long,),
extra_dims=((), (4,), (2, 3)),
):
for image_size, dtype, extra_dims_ in itertools.product(image_sizes, dtypes, extra_dims):
yield make_segmentation_mask(size=image_size, dtype=dtype, extra_dims=extra_dims_)
|
38,395 | def _setup_polar_coordinates(registry, axis_id):
f1, f2 = _get_coord_fields(axis_id["r"])
registry.add_field(
("index", "dr"),
sampling_type="cell",
function=f1,
display_field=False,
units="code_length",
)
registry.add_field(
("index", "r"),
sampling_type="cell",
function=f2,
display_field=False,
units="code_length",
)
f1, f2 = _get_coord_fields(axis_id["theta"], "")
registry.add_field(
("index", "dtheta"),
sampling_type="cell",
function=f1,
display_field=False,
units="",
)
registry.add_field(
("index", "theta"),
sampling_type="cell",
function=f2,
display_field=False,
units="",
)
def _path_r(field, data):
return data["index", "dr"]
registry.add_field(
("index", "path_element_r"),
sampling_type="cell",
function=_path_r,
units="code_length",
)
def _path_theta(field, data):
# Note: this already assumes cell-centered
return data["index", "r"] * data["index", "dtheta"]
registry.add_field(
("index", "path_element_theta"),
sampling_type="cell",
function=_path_theta,
units="code_length",
)
| def _setup_polar_coordinates(registry, axis_id):
f1, f2 = _get_coord_fields(axis_id["r"])
registry.add_field(
("index", "dr"),
sampling_type="cell",
function=f1,
display_field=False,
units="code_length",
)
registry.add_field(
("index", "r"),
sampling_type="cell",
function=f2,
display_field=False,
units="code_length",
)
f1, f2 = _get_coord_fields(axis_id["theta"], "")
registry.add_field(
("index", "dtheta"),
sampling_type="cell",
function=f1,
display_field=False,
units="",
)
registry.add_field(
("index", "theta"),
sampling_type="cell",
function=f2,
display_field=False,
units="dimensionless",
)
def _path_r(field, data):
return data["index", "dr"]
registry.add_field(
("index", "path_element_r"),
sampling_type="cell",
function=_path_r,
units="code_length",
)
def _path_theta(field, data):
# Note: this already assumes cell-centered
return data["index", "r"] * data["index", "dtheta"]
registry.add_field(
("index", "path_element_theta"),
sampling_type="cell",
function=_path_theta,
units="code_length",
)
|
41,236 | def _get_random_placement(
problem_topology: 'cirq.NamedTopology',
device: 'cirq.Device',
rs: np.random.RandomState,
topo_node_to_qubit_func: Callable[[Any], 'cirq.Qid'] = default_topo_node_to_qubit,
) -> Dict['cirq.Qid', 'cirq.Qid']:
"""Place `problem_topology` randomly onto a device.
This is a helper function used by `RandomDevicePlacer.place_circuit`.
"""
placements = _cached_get_placements(problem_topology, device)
if len(placements) == 0:
raise CouldNotPlaceError
random_i = int(rs.random_integers(0, len(placements) - 1, size=1))
placement = placements[random_i]
placement_gq = {topo_node_to_qubit_func(k): v for k, v in placement.items()}
return placement_gq
| def _get_random_placement(
problem_topology: 'cirq.NamedTopology',
device: 'cirq.Device',
rs: np.random.RandomState,
topo_node_to_qubit_func: Callable[[Any], 'cirq.Qid'] = default_topo_node_to_qubit,
) -> Dict['cirq.Qid', 'cirq.Qid']:
"""Place `problem_topology` randomly onto a device.
This is a helper function used by `RandomDevicePlacer.place_circuit`.
"""
placements = _cached_get_placements(problem_topology, device)
if len(placements) == 0:
raise CouldNotPlaceError
random_i = rs.randint(len(placements))
placement = placements[random_i]
placement_gq = {topo_node_to_qubit_func(k): v for k, v in placement.items()}
return placement_gq
|
55,054 | def track(dev, version="default", **kwargs):
r"""Creates a tracking context and applies it to a device.
Args:
dev (~.Device): a PennyLane-compatible device
version (str): name of tracker to use. The current options are
`default` and `timing`.
Keyword Args:
reset_on_enter=True (bool): whether or not to reset information
entering the context
**Usage Information**
Note that with backpropagation, this functions should take ``qnode.device``
instead of the device used to create the QNode.
.. code-block:: python
dev = qml.device('default.qubit', wires=1)
@qml.qnode(dev, diff_method="parameter-shift")
def circuit(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0))
With the default version, total execution information is printed on
each device execution. The printed data depends on the device and tracker version,
but for standard PennyLane devices, the object will track executions and shots.
>>> with qml.track(circuit.device) as tracker:
... qml.grad(circuit)(0.1, shots=10)
Total: executions = 1 shots = 10
Total: executions = 2 shots = 20
Total: executions = 3 shots = 30
In with the ``'timing'`` implementation, the instance also tracks the time
between entering the context and the completion of an execution.
>>> with qml.track(circuit.device, version='timing') as timing_tracker:
... circuit(0.1)
... circuit(0.2)
Total: executions = 1 time = 0.0011134147644042969
Total: executions = 2 time = 0.0027322769165039062
After completion, one can also access the recorded information:
>>> timing_tracker.totals
defaultdict(int, {'executions': 2, 'shots': 30, 'time': 0.00311279296875})
>>> timing_tracker.history
defaultdict(list,
{'executions': [1, 1],
'shots': [None, None],
'time': [0.0012764930725097656, 0.0018362998962402344]})
By specifying ``reset_on_enter=False``, you can reuse the same tracker accross
multiple runtime contexts.
>>> with qml.track(circuit.device, reset_on_enter=False) as tracker:
... circuit(0.1)
Total: executions = 1
>>> with tracker:
... circuit(0.2)
Total: executions = 2
"""
if version == "timing":
return TimingTracker(dev, **kwargs)
elif version == "default":
return DefaultTracker(dev, **kwargs)
else:
raise qml.QuantumFunctionError(
f"version {version} supplied to track. " f"Current options are `timing` and `default`."
)
| def track(dev, version="default", **kwargs):
r"""Creates a tracking context and applies it to a device.
Args:
dev (~.Device): a PennyLane-compatible device
version (str): name of tracker to use. The current options are
`default` and `timing`.
Keyword Args:
reset_on_enter=True (bool): whether or not to reset information
entering the context
**Usage Information**
.. note::
With backpropagation, this functions should take ``qnode.device``
instead of the device used to create the QNode.
.. code-block:: python
dev = qml.device('default.qubit', wires=1)
@qml.qnode(dev, diff_method="parameter-shift")
def circuit(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0))
With the default version, total execution information is printed on
each device execution. The printed data depends on the device and tracker version,
but for standard PennyLane devices, the object will track executions and shots.
>>> with qml.track(circuit.device) as tracker:
... qml.grad(circuit)(0.1, shots=10)
Total: executions = 1 shots = 10
Total: executions = 2 shots = 20
Total: executions = 3 shots = 30
In with the ``'timing'`` implementation, the instance also tracks the time
between entering the context and the completion of an execution.
>>> with qml.track(circuit.device, version='timing') as timing_tracker:
... circuit(0.1)
... circuit(0.2)
Total: executions = 1 time = 0.0011134147644042969
Total: executions = 2 time = 0.0027322769165039062
After completion, one can also access the recorded information:
>>> timing_tracker.totals
defaultdict(int, {'executions': 2, 'shots': 30, 'time': 0.00311279296875})
>>> timing_tracker.history
defaultdict(list,
{'executions': [1, 1],
'shots': [None, None],
'time': [0.0012764930725097656, 0.0018362998962402344]})
By specifying ``reset_on_enter=False``, you can reuse the same tracker accross
multiple runtime contexts.
>>> with qml.track(circuit.device, reset_on_enter=False) as tracker:
... circuit(0.1)
Total: executions = 1
>>> with tracker:
... circuit(0.2)
Total: executions = 2
"""
if version == "timing":
return TimingTracker(dev, **kwargs)
elif version == "default":
return DefaultTracker(dev, **kwargs)
else:
raise qml.QuantumFunctionError(
f"version {version} supplied to track. " f"Current options are `timing` and `default`."
)
|
49,570 | def set_index(
df,
index,
npartitions=None,
shuffle=None,
compute=False,
drop=True,
upsample=1.0,
divisions=None,
partition_size=128e6,
**kwargs,
):
""" See _Frame.set_index for docstring """
if isinstance(index, Series) and index._name == df.index._name:
return df
if isinstance(index, (DataFrame, tuple, list)):
# Accept ["a"], but not [["a"]]
if (
isinstance(index, list)
and len(index) == 1
and not isinstance(index[0], list) # if index = [["a"]], leave it that way
):
index = index[0]
else:
raise NotImplementedError(
"Dask dataframe does not yet support multi-indexes.\n"
"You tried to index with this index: %s\n"
"Indexes must be single columns only." % str(index)
)
if npartitions == "auto":
repartition = True
npartitions = max(100, df.npartitions)
else:
if npartitions is None:
npartitions = df.npartitions
repartition = False
if not isinstance(index, Series):
index2 = df[index]
else:
index2 = index
if divisions is None:
if repartition:
index2, df = base.optimize(index2, df)
parts = df.to_delayed(optimize_graph=False)
sizes = [delayed(sizeof)(part) for part in parts]
else:
(index2,) = base.optimize(index2)
sizes = []
divisions = index2._repartition_quantiles(npartitions, upsample=upsample)
mins = index2.map_partitions(operator.methodcaller("min"))
maxes = index2.map_partitions(operator.methodcaller("max"))
sizes, mins, maxes = base.optimize(sizes, mins, maxes)
divisions, sizes, mins, maxes = base.compute(
divisions, sizes, mins, maxes, optimize_graph=False
)
divisions = methods.tolist(divisions)
mins = methods.tolist(mins)
maxes = methods.tolist(maxes)
empty_dataframe_detected = pd.isnull(divisions).all()
if repartition or empty_dataframe_detected:
total = sum(sizes)
npartitions = max(math.ceil(total / partition_size), 1)
npartitions = min(npartitions, df.npartitions)
n = len(divisions)
try:
divisions = np.interp(
x=np.linspace(0, n - 1, npartitions + 1),
xp=np.linspace(0, n - 1, n),
fp=divisions,
).tolist()
except (TypeError, ValueError): # str type
indexes = np.linspace(0, n - 1, npartitions + 1).astype(int)
divisions = [divisions[i] for i in indexes]
mins = remove_nans(mins)
maxes = remove_nans(maxes)
if pd.api.types.is_categorical_dtype(index2.dtype):
dtype = index2.dtype
mins = pd.Categorical(mins, dtype=dtype).codes.tolist()
maxes = pd.Categorical(maxes, dtype=dtype).codes.tolist()
if (
mins == sorted(mins)
and maxes == sorted(maxes)
and all(mx < mn for mx, mn in zip(maxes[:-1], mins[1:]))
):
divisions = mins + [maxes[-1]]
result = set_sorted_index(df, index, drop=drop, divisions=divisions)
return result.map_partitions(M.sort_index)
return set_partition(
df, index, divisions, shuffle=shuffle, drop=drop, compute=compute, **kwargs
)
| def set_index(
df,
index,
npartitions=None,
shuffle=None,
compute=False,
drop=True,
upsample=1.0,
divisions=None,
partition_size=128e6,
**kwargs,
):
""" See _Frame.set_index for docstring """
if isinstance(index, Series) and index._name == df.index._name:
return df
if isinstance(index, (DataFrame, tuple, list)):
# Accept ["a"], but not [["a"]]
if (
isinstance(index, list)
and len(index) == 1
and not isinstance(index[0], list) # if index = [["a"]], leave it that way
):
index = index[0]
else:
raise NotImplementedError(
"Dask dataframe does not yet support multi-indexes.\n"
"You tried to index with this index: %s\n"
"Indexes must be single columns only." % str(index)
)
if npartitions == "auto":
repartition = True
npartitions = max(100, df.npartitions)
else:
if npartitions is None:
npartitions = df.npartitions
repartition = False
if not isinstance(index, Series):
index2 = df[index]
else:
index2 = index
if divisions is None:
if repartition:
index2, df = base.optimize(index2, df)
parts = df.to_delayed(optimize_graph=False)
sizes = [delayed(sizeof)(part) for part in parts]
else:
(index2,) = base.optimize(index2)
sizes = []
divisions = index2._repartition_quantiles(npartitions, upsample=upsample)
mins = index2.map_partitions(M.min)
maxes = index2.map_partitions(M.max)
sizes, mins, maxes = base.optimize(sizes, mins, maxes)
divisions, sizes, mins, maxes = base.compute(
divisions, sizes, mins, maxes, optimize_graph=False
)
divisions = methods.tolist(divisions)
mins = methods.tolist(mins)
maxes = methods.tolist(maxes)
empty_dataframe_detected = pd.isnull(divisions).all()
if repartition or empty_dataframe_detected:
total = sum(sizes)
npartitions = max(math.ceil(total / partition_size), 1)
npartitions = min(npartitions, df.npartitions)
n = len(divisions)
try:
divisions = np.interp(
x=np.linspace(0, n - 1, npartitions + 1),
xp=np.linspace(0, n - 1, n),
fp=divisions,
).tolist()
except (TypeError, ValueError): # str type
indexes = np.linspace(0, n - 1, npartitions + 1).astype(int)
divisions = [divisions[i] for i in indexes]
mins = remove_nans(mins)
maxes = remove_nans(maxes)
if pd.api.types.is_categorical_dtype(index2.dtype):
dtype = index2.dtype
mins = pd.Categorical(mins, dtype=dtype).codes.tolist()
maxes = pd.Categorical(maxes, dtype=dtype).codes.tolist()
if (
mins == sorted(mins)
and maxes == sorted(maxes)
and all(mx < mn for mx, mn in zip(maxes[:-1], mins[1:]))
):
divisions = mins + [maxes[-1]]
result = set_sorted_index(df, index, drop=drop, divisions=divisions)
return result.map_partitions(M.sort_index)
return set_partition(
df, index, divisions, shuffle=shuffle, drop=drop, compute=compute, **kwargs
)
|
20,403 | def tools_migrations_migrate(targets=[], skip=False, auto=False, force_rerun=False, revert=False, accept_disclaimer=False):
"""
Perform migrations
targets A list migrations to run (all pendings by default)
--skip Skip specified migrations (to be used only if you know what you are doing) (must explicit which migrations)
--auto Automatic mode, won't run manual migrations (to be used only if you know what you are doing) (must explicit which migrations)
--force-rerun Re-run already-ran migrations (to be used only if you know what you are doing)(must explicit which migrations)
--revert Attempt to revert already-ran migrations (must explicit which migrations)
--accept-disclaimer Accept disclaimers of migrations (please read them before using this option) (only valid for one migration)
"""
all_migrations = _get_migrations_list()
# Small utility that allows up to get a migration given a name, id or number later
def get_matching_migration(target):
for m in all_migrations:
if m.id == target or m.name == target or m.id.split("_")[0] == target:
return m
raise YunohostError("migrations_no_such_migration", id=target)
# auto, skip, revert and force are exclusive options
if auto + skip + revert + force_rerun > 1:
raise YunohostError("migrations_exclusive_options")
# If no target specified
if not targets:
# skip, revert or force require explicit targets
if (revert or force_rerun):
raise YunohostError("migrations_must_provide_explicit_targets")
# Otherwise, targets are all pending migrations
targets = [m for m in all_migrations if m.state == "pending"]
# If explicit targets are provided, we shall validate them
else:
targets = [get_matching_migration(t) for t in targets]
done = [t.id for t in targets if t.state != "pending"]
pending = [t.id for t in targets if t.state == "pending"]
if skip and done:
raise YunohostError("migrations_not_pending_cant_skip", ids=', '.join(done))
if (revert or force_rerun) and pending:
raise YunohostError("migrations_pending_cant_revert_or_rerun", ids=', '.join(pending))
if not (skip or revert or force_rerun) and done:
raise YunohostError("migrations_already_ran", ids=', '.join(done))
# So, is there actually something to do ?
if not targets:
logger.info(m18n.n('migrations_no_migrations_to_run'))
return
# Actually run selected migrations
for migration in targets:
# If we are migrating in "automatic mode" (i.e. from debian configure
# during an upgrade of the package) but we are asked to run migrations
# to be ran manually by the user, stop there and ask the user to
# run the migration manually.
if auto and migration.mode == "manual":
logger.warn(m18n.n('migrations_to_be_ran_manually', id=migration.id))
# We go to the next migration
continue
# Check for migration dependencies
if not revert and not skip:
dependencies = [get_matching_migration(dep) for dep in migration.dependencies]
pending_dependencies = [dep.id for dep in dependencies if dep.state == "pending"]
if pending_dependencies:
logger.error(m18n.n('migrations_dependencies_not_satisfied',
id=migration.id,
dependencies_id=', '.join(pending_dependencies)))
continue
# If some migrations have disclaimers (and we're not trying to skip them)
if migration.disclaimer and not skip and not revert:
# require the --accept-disclaimer option.
# Otherwise, go to the next migration
if not accept_disclaimer:
logger.warn(m18n.n('migrations_need_to_accept_disclaimer',
id=migration.id,
disclaimer=migration.disclaimer))
continue
# --accept-disclaimer will only work for the first migration
else:
accept_disclaimer = False
# Start register change on system
mode = "backward" if revert else "forward"
operation_logger = OperationLogger('tools_migrations_migrate_' + mode)
operation_logger.start()
if skip:
logger.warn(m18n.n('migrations_skip_migration', id=migration.id))
migration.state = "skipped"
_write_migration_state(migration.id, "skipped")
operation_logger.success()
else:
try:
migration.operation_logger = operation_logger
if revert:
logger.info(m18n.n('migrations_running_backward', id=migration.id))
migration.backward()
else:
logger.info(m18n.n('migrations_running_forward', id=migration.id))
migration.migrate()
except Exception as e:
# migration failed, let's stop here but still update state because
# we managed to run the previous ones
msg = m18n.n('migrations_migration_has_failed',
exception=e, id=migration.id)
logger.error(msg, exc_info=1)
operation_logger.error(msg)
else:
if revert:
logger.success(m18n.n('migrations_success_revert', id=migration.id))
migration.state = "pending"
_write_migration_state(migration.id, "pending")
else:
logger.success(m18n.n('migrations_success_forward', id=migration.id))
migration.state = "done"
_write_migration_state(migration.id, "done")
operation_logger.success()
| def tools_migrations_migrate(targets=[], skip=False, auto=False, force_rerun=False, revert=False, accept_disclaimer=False):
"""
Perform migrations
targets A list migrations to run (all pendings by default)
--skip Skip specified migrations (to be used only if you know what you are doing) (must explicit which migrations)
--auto Automatic mode, won't run manual migrations (to be used only if you know what you are doing) (must explicit which migrations)
--force-rerun Re-run already-ran migrations (to be used only if you know what you are doing)(must explicit which migrations)
--revert Attempt to revert already-ran migrations (must explicit which migrations)
--accept-disclaimer Accept disclaimers of migrations (please read them before using this option) (only valid for one migration)
"""
all_migrations = _get_migrations_list()
# Small utility that allows up to get a migration given a name, id or number later
def get_matching_migration(target):
for m in all_migrations:
if m.id == target or m.name == target or m.id.split("_")[0] == target:
return m
raise YunohostError("migrations_no_such_migration", id=target)
# auto, skip, revert and force are exclusive options
if auto + skip + revert + force_rerun > 1:
raise YunohostError("migrations_exclusive_options")
# If no target specified
if not targets:
# skip, revert or force require explicit targets
if (revert or force_rerun):
raise YunohostError("migrations_must_provide_explicit_targets")
# Otherwise, targets are all pending migrations
targets = [m for m in all_migrations if m.state == "pending"]
# If explicit targets are provided, we shall validate them
else:
targets = [get_matching_migration(t) for t in targets]
done = [t.id for t in targets if t.state != "done"]
pending = [t.id for t in targets if t.state == "pending"]
if skip and done:
raise YunohostError("migrations_not_pending_cant_skip", ids=', '.join(done))
if (revert or force_rerun) and pending:
raise YunohostError("migrations_pending_cant_revert_or_rerun", ids=', '.join(pending))
if not (skip or revert or force_rerun) and done:
raise YunohostError("migrations_already_ran", ids=', '.join(done))
# So, is there actually something to do ?
if not targets:
logger.info(m18n.n('migrations_no_migrations_to_run'))
return
# Actually run selected migrations
for migration in targets:
# If we are migrating in "automatic mode" (i.e. from debian configure
# during an upgrade of the package) but we are asked to run migrations
# to be ran manually by the user, stop there and ask the user to
# run the migration manually.
if auto and migration.mode == "manual":
logger.warn(m18n.n('migrations_to_be_ran_manually', id=migration.id))
# We go to the next migration
continue
# Check for migration dependencies
if not revert and not skip:
dependencies = [get_matching_migration(dep) for dep in migration.dependencies]
pending_dependencies = [dep.id for dep in dependencies if dep.state == "pending"]
if pending_dependencies:
logger.error(m18n.n('migrations_dependencies_not_satisfied',
id=migration.id,
dependencies_id=', '.join(pending_dependencies)))
continue
# If some migrations have disclaimers (and we're not trying to skip them)
if migration.disclaimer and not skip and not revert:
# require the --accept-disclaimer option.
# Otherwise, go to the next migration
if not accept_disclaimer:
logger.warn(m18n.n('migrations_need_to_accept_disclaimer',
id=migration.id,
disclaimer=migration.disclaimer))
continue
# --accept-disclaimer will only work for the first migration
else:
accept_disclaimer = False
# Start register change on system
mode = "backward" if revert else "forward"
operation_logger = OperationLogger('tools_migrations_migrate_' + mode)
operation_logger.start()
if skip:
logger.warn(m18n.n('migrations_skip_migration', id=migration.id))
migration.state = "skipped"
_write_migration_state(migration.id, "skipped")
operation_logger.success()
else:
try:
migration.operation_logger = operation_logger
if revert:
logger.info(m18n.n('migrations_running_backward', id=migration.id))
migration.backward()
else:
logger.info(m18n.n('migrations_running_forward', id=migration.id))
migration.migrate()
except Exception as e:
# migration failed, let's stop here but still update state because
# we managed to run the previous ones
msg = m18n.n('migrations_migration_has_failed',
exception=e, id=migration.id)
logger.error(msg, exc_info=1)
operation_logger.error(msg)
else:
if revert:
logger.success(m18n.n('migrations_success_revert', id=migration.id))
migration.state = "pending"
_write_migration_state(migration.id, "pending")
else:
logger.success(m18n.n('migrations_success_forward', id=migration.id))
migration.state = "done"
_write_migration_state(migration.id, "done")
operation_logger.success()
|
49,888 | def haydavies(surface_tilt, surface_azimuth, dhi, dni, dni_extra,
solar_zenith=None, solar_azimuth=None, projection_ratio=None):
r'''
Determine diffuse irradiance from the sky on a tilted surface using
Hay & Davies' 1980 model
.. math::
I_{d} = DHI ( A R_b + (1 - A) (\frac{1 + \cos\beta}{2}) )
Hay and Davies' 1980 model determines the diffuse irradiance from
the sky (ground reflected irradiance is not included in this
algorithm) on a tilted surface using the surface tilt angle, surface
azimuth angle, diffuse horizontal irradiance, direct normal
irradiance, extraterrestrial irradiance, sun zenith angle, and sun
azimuth angle.
Parameters
----------
surface_tilt : numeric
Surface tilt angles in decimal degrees. The tilt angle is
defined as degrees from horizontal (e.g. surface facing up = 0,
surface facing horizon = 90)
surface_azimuth : numeric
Surface azimuth angles in decimal degrees. The azimuth
convention is defined as degrees east of north (e.g. North=0,
South=180, East=90, West=270).
dhi : numeric
Diffuse horizontal irradiance in W/m^2.
dni : numeric
Direct normal irradiance in W/m^2.
dni_extra : numeric
Extraterrestrial normal irradiance in W/m^2.
solar_zenith : None or numeric, default None
Solar apparent (refraction-corrected) zenith angles in decimal
degrees. Must supply ``solar_zenith`` and ``solar_azimuth`` or
supply ``projection_ratio``.
solar_azimuth : None or numeric, default None
Solar azimuth angles in decimal degrees. Must supply
``solar_zenith`` and ``solar_azimuth`` or supply
``projection_ratio``.
projection_ratio : None or numeric, default None
Ratio of angle of incidence projection to solar zenith angle
projection. Must supply ``solar_zenith`` and ``solar_azimuth``
or supply ``projection_ratio``.
Returns
--------
sky_diffuse : numeric
The sky diffuse component of the solar radiation.
Notes
------
When supplying projection_ratio, consider constraining its values
when zenith angle approaches 90 degrees. See code for details.
References
-----------
.. [1] Loutzenhiser P.G. et. al. "Empirical validation of models to
compute solar irradiance on inclined surfaces for building energy
simulation" 2007, Solar Energy vol. 81. pp. 254-267
.. [2] Hay, J.E., Davies, J.A., 1980. Calculations of the solar
radiation incident on an inclined surface. In: Hay, J.E., Won, T.K.
(Eds.), Proc. of First Canadian Solar Radiation Data Workshop, 59.
Ministry of Supply and Services, Canada.
'''
# if necessary, calculate ratio of titled and horizontal beam irradiance
if projection_ratio is None:
cos_tt = aoi_projection(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth)
cos_tt = np.maximum(cos_tt, 0) # GH 526
cos_solar_zenith = tools.cosd(solar_zenith)
Rb = cos_tt / np.maximum(cos_solar_zenith, 0.01745) # GH 432
else:
Rb = projection_ratio
# Anisotropy Index
AI = dni / dni_extra
# these are the () and [] sub-terms of the second term of eqn 7
term1 = 1 - AI
term2 = 0.5 * (1 + tools.cosd(surface_tilt))
sky_diffuse = dhi * (AI * Rb + term1 * term2)
sky_diffuse = np.maximum(sky_diffuse, 0)
return sky_diffuse
| def haydavies(surface_tilt, surface_azimuth, dhi, dni, dni_extra,
solar_zenith=None, solar_azimuth=None, projection_ratio=None):
r'''
Determine diffuse irradiance from the sky on a tilted surface using
Hay & Davies' 1980 model
.. math::
I_{d} = DHI ( A R_b + (1 - A) (\frac{1 + \cos\beta}{2}) )
Hay and Davies' 1980 model determines the diffuse irradiance from
the sky (ground reflected irradiance is not included in this
algorithm) on a tilted surface using the surface tilt angle, surface
azimuth angle, diffuse horizontal irradiance, direct normal
irradiance, extraterrestrial irradiance, sun zenith angle, and sun
azimuth angle.
Parameters
----------
surface_tilt : numeric
Surface tilt angles in decimal degrees. The tilt angle is
defined as degrees from horizontal (e.g. surface facing up = 0,
surface facing horizon = 90)
surface_azimuth : numeric
Surface azimuth angles in decimal degrees. The azimuth
convention is defined as degrees east of north (e.g. North=0,
South=180, East=90, West=270).
dhi : numeric
Diffuse horizontal irradiance in W/m^2.
dni : numeric
Direct normal irradiance in W/m^2.
dni_extra : numeric
Extraterrestrial normal irradiance in W/m^2.
solar_zenith : None or numeric, default None
Solar apparent (refraction-corrected) zenith angles in decimal
degrees. Must supply ``solar_zenith`` and ``solar_azimuth`` or
supply ``projection_ratio``.
solar_azimuth : None or numeric, default None
Solar azimuth angles in decimal degrees. Must supply
``solar_zenith`` and ``solar_azimuth`` or supply
``projection_ratio``.
projection_ratio : None or numeric, default None
Ratio of angle of incidence projection to solar zenith angle
projection. Must supply ``solar_zenith`` and ``solar_azimuth``
or supply ``projection_ratio``.
Returns
--------
sky_diffuse : numeric
The sky diffuse component of the solar radiation.
Notes
------
When supplying ``projection_ratio``, consider constraining its values
when zenith angle approaches 90 degrees or angle of incidence
projection is negative. See code for details.
References
-----------
.. [1] Loutzenhiser P.G. et. al. "Empirical validation of models to
compute solar irradiance on inclined surfaces for building energy
simulation" 2007, Solar Energy vol. 81. pp. 254-267
.. [2] Hay, J.E., Davies, J.A., 1980. Calculations of the solar
radiation incident on an inclined surface. In: Hay, J.E., Won, T.K.
(Eds.), Proc. of First Canadian Solar Radiation Data Workshop, 59.
Ministry of Supply and Services, Canada.
'''
# if necessary, calculate ratio of titled and horizontal beam irradiance
if projection_ratio is None:
cos_tt = aoi_projection(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth)
cos_tt = np.maximum(cos_tt, 0) # GH 526
cos_solar_zenith = tools.cosd(solar_zenith)
Rb = cos_tt / np.maximum(cos_solar_zenith, 0.01745) # GH 432
else:
Rb = projection_ratio
# Anisotropy Index
AI = dni / dni_extra
# these are the () and [] sub-terms of the second term of eqn 7
term1 = 1 - AI
term2 = 0.5 * (1 + tools.cosd(surface_tilt))
sky_diffuse = dhi * (AI * Rb + term1 * term2)
sky_diffuse = np.maximum(sky_diffuse, 0)
return sky_diffuse
|
46,406 | def train_test_split(
data: Union[TimeSeries, Sequence[TimeSeries]],
test_size: Optional[Union[float, int]] = 0.25,
axis: Optional[int] = 0,
input_size: Optional[int] = 0,
horizon: Optional[int] = 0,
vertical_split_type: Optional[str] = 'simple',
lazy: bool = False
) -> Union[Tuple[TimeSeries, TimeSeries], Tuple[Sequence[TimeSeries], Sequence[TimeSeries]]]:
"""
Splits the dataset into training and test dataset. Supports splitting along the sample axis and time axis.
If the input type is single TimeSeries, then only splitting over time axis is available, thus ``n`` and ``horizon``
have to be provided.
When splitting over the time axis, splitter tries to greedy satisfy the requested test set size, i.e. when one of
the timeseries in sequence is too small, all samples will go to the test set and the warning will be issued.
Parameters
----------
data
original dataset to split into training and test
test_size
size of the test set. If the value is between 0 and 1, parameter is treated as a split proportion. Otherwise
it is treated as a absolute number of samples from each timeseries that will be in the test set. [default = 0.25]
axis
Axis to split the dataset on. When 0 (default) it is split on samples. Otherwise, if ``axis = 1``,
timeseries are split along time axis (columns). Note that for single timeseries the default option is 1 (0 makes
no sense). [default: 0 for sequence of timeseries, 1 for timeseries]
input_size
size of the input [default: 0]
horizon
forecast horizon [default: 0]
vertical_split_type
can be either ``simple``, where the exact number from test size will be deducted from timeseries for test set and
remaining will go to training set; or ``model-aware``, where you have to provide ``input_size`` and ``horizon``
as well. Note, that second option is more efficient timestep-wise, since training and test sets will be
partially overlapping. [default: ``simple``]
lazy
by default, train and test datasets are returned as a sequences of timeseries. However this may be memory
inefficient if dataset is large. Warning: turning ``lazy`` on disables some sanity checks for the datasets
that may result in exceptions during sample generation. [default: False]
Returns
-------
tuple of two Sequence[TimeSeries], or tuple of two Timeseries
Training and test datasets tuple.
"""
return SplitTimeSeriesSequence.make_splitter(data, test_size, axis, input_size, horizon, vertical_split_type, lazy) | def train_test_split(
data: Union[TimeSeries, Sequence[TimeSeries]],
test_size: Optional[Union[float, int]] = 0.25,
axis: Optional[int] = 0,
input_size: Optional[int] = 0,
horizon: Optional[int] = 0,
vertical_split_type: Optional[str] = 'simple',
lazy: bool = False
) -> Union[Tuple[TimeSeries, TimeSeries], Tuple[Sequence[TimeSeries], Sequence[TimeSeries]]]:
"""
Splits the dataset into training and test dataset. Supports splitting along the sample axis and time axis.
If the input type is single TimeSeries, then only splitting over time axis is available, thus ``n`` and ``horizon``
have to be provided.
When splitting over the time axis, splitter tries to greedy satisfy the requested test set size, i.e. when one of
the timeseries in sequence is too small, all samples will go to the test set and the warning will be issued.
Parameters
----------
data
original dataset to split into training and test
test_size
size of the test set. If the value is between 0 and 1, parameter is treated as a split proportion. Otherwise
it is treated as a absolute number of samples from each timeseries that will be in the test set. [default = 0.25]
axis
Axis to split the dataset on. When 0 (default) it is split on samples. Otherwise, if ``axis = 1``,
timeseries are split along time axis. Note that for single timeseries the default option is 1 (0 makes
no sense). [default: 0 for sequence of timeseries, 1 for timeseries]
input_size
size of the input [default: 0]
horizon
forecast horizon [default: 0]
vertical_split_type
can be either ``simple``, where the exact number from test size will be deducted from timeseries for test set and
remaining will go to training set; or ``model-aware``, where you have to provide ``input_size`` and ``horizon``
as well. Note, that second option is more efficient timestep-wise, since training and test sets will be
partially overlapping. [default: ``simple``]
lazy
by default, train and test datasets are returned as a sequences of timeseries. However this may be memory
inefficient if dataset is large. Warning: turning ``lazy`` on disables some sanity checks for the datasets
that may result in exceptions during sample generation. [default: False]
Returns
-------
tuple of two Sequence[TimeSeries], or tuple of two Timeseries
Training and test datasets tuple.
"""
return SplitTimeSeriesSequence.make_splitter(data, test_size, axis, input_size, horizon, vertical_split_type, lazy) |
39,155 | def wav2vec2_model(
extractor_mode: str,
extractor_conv_layer_config: Optional[List[Tuple[int, int, int]]],
extractor_conv_bias: bool,
encoder_embed_dim: int,
encoder_projection_dropout: float,
encoder_pos_conv_kernel: int,
encoder_pos_conv_groups: int,
encoder_num_layers: int,
encoder_num_heads: int,
encoder_attention_dropout: float,
encoder_ff_interm_features: int,
encoder_ff_interm_dropout: float,
encoder_dropout: float,
encoder_layer_norm_first: bool,
encoder_layer_drop: float,
aux_num_out: Optional[int],
) -> Wav2Vec2Model:
"""Build a custom Wav2Vec2Model
Note:
The "feature extractor" below corresponds to
`ConvFeatureExtractionModel <https://github.com/pytorch/fairseq/blob/dd3bd3c0497ae9a7ae7364404a6b0a4c501780b3/fairseq/models/wav2vec/wav2vec2.py#L736>`__
in the original ``fairseq`` implementation.
This is referred as "(convolutional) feature encoder" in the *wav2vec 2.0*
[:footcite:`baevski2020wav2vec`] paper.
The "encoder" below corresponds to `TransformerEncoder <https://github.com/pytorch/fairseq/blob/dd3bd3c0497ae9a7ae7364404a6b0a4c501780b3/fairseq/models/wav2vec/wav2vec2.py#L817>`__,
and this is referred as "Transformer" in the paper.
Args:
extractor_mode (str): Operation mode of feature extractor.
Valid values are ``"group_norm"`` or ``"layer_norm"``.
If ``"group_norm"``, then a single normalization is applied
in the first convolution block. Otherwise, all the convolution
blocks will have layer normalization.
This option corresponds to ``extractor_mode`` from ``fairseq``.
extractor_conv_layer_config (list of integer tuples or None, optional):
Configuration of convolution layers in feature extractor.
List of convolution configuration,
i.e. ``[(output_channel, kernel_size, stride), ...]``
If ``None`` is provided, then the following default value is used.
.. code-block:: python
[
(512, 10, 5),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 2, 2),
(512, 2, 2),
]
This option corresponds to ``conv_feature_layers`` from ``fairseq``.
extractor_conv_bias (bool):
Whether to include bias term to each convolution operation.
This option corresponds to ``conv_bias`` from ``fairseq``.
encoder_embed_dim (int):
The dimension of embedding in encoder.
This option corresponds to ``encoder_embed_dim`` from ``fairseq``.
encoder_projection_dropout (float):
The dropout probability applied after the input feature is projected
to ``encoder_embed_dim``.
This option corresponds to ``dropout_input`` from ``fairseq``.
encoder_pos_conv_kernel (int):
The kernel size of convolutional positional embeddings.
This option corresponds to ``conv_pos`` from ``fairseq``.
encoder_pos_conv_groups (int):
The number of groups of convolutional positional embeddings.
This option corresponds to ``conv_pos_groups`` from ``fairseq``.
encoder_num_layers (int):
The number of self attention layers in transformer block.
This option corresponds to ``encoder_layers`` from ``fairseq``.
encoder_num_heads (int):
The number of heads in self attention layers.
This option corresponds to ``encoder_attention_heads`` from ``fairseq``.
encoder_attention_dropout (float):
The dropout probability applied after softmax in self-attention layer.
This option corresponds to ``attention_dropout`` from ``fairseq``.
encoder_ff_interm_features (int):
The dimension of hidden features in feed forward layer.
This option corresponds to ``encoder_ffn_embed_dim`` from ``fairseq``.
encoder_ff_interm_dropout (float):
The dropout probability applied in feedforward layer.
This option correspinds to ``activation_dropout`` from ``fairseq``.
encoder_dropout (float):
The dropout probability applied at the end of feed forward layer.
This option corresponds to ``dropout`` from ``fairseq``.
encoder_layer_norm_first (bool):
Control the order of layer norm in transformer layer and each encoder layer.
If True, in transformer layer, layer norm is applied before features are fed
to encoder layers. In encoder layer, two layer norms are applied before and after
self attention.
If False, in transformer layer, layer norm is applied after features are fed
to encoder layers. In encoder layer, two layer norms are applied after self
attention, before and after feed forward.
This option corresponds to ``layer_norm_first`` from ``fairseq``.
encoder_layer_drop (float):
Probability to drop each encoder layer during training.
This option corresponds to ``layerdrop`` from ``fairseq``.
aux_num_out (int or None, optional):
When provided, attach an extra liner layer on top of encoder, which can be
used for fine-tuning.
Returns:
Wav2Vec2Model:
The resulting model.
""" # noqa: E501
if extractor_conv_layer_config is None:
extractor_conv_layer_config = [(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512, 2, 2)] * 2
feature_extractor = components._get_feature_extractor(
extractor_mode, extractor_conv_layer_config, extractor_conv_bias)
encoder = components._get_encoder(
in_features=extractor_conv_layer_config[-1][0],
embed_dim=encoder_embed_dim,
dropout_input=encoder_projection_dropout,
pos_conv_kernel=encoder_pos_conv_kernel,
pos_conv_groups=encoder_pos_conv_groups,
num_layers=encoder_num_layers,
num_heads=encoder_num_heads,
attention_dropout=encoder_attention_dropout,
ff_interm_features=encoder_ff_interm_features,
ff_interm_dropout=encoder_ff_interm_dropout,
dropout=encoder_dropout,
layer_norm_first=encoder_layer_norm_first,
layer_drop=encoder_layer_drop,
)
aux = None
if aux_num_out is not None:
aux = torch.nn.Linear(in_features=encoder_embed_dim, out_features=aux_num_out)
return Wav2Vec2Model(feature_extractor, encoder, aux)
| def wav2vec2_model(
extractor_mode: str,
extractor_conv_layer_config: Optional[List[Tuple[int, int, int]]],
extractor_conv_bias: bool,
encoder_embed_dim: int,
encoder_projection_dropout: float,
encoder_pos_conv_kernel: int,
encoder_pos_conv_groups: int,
encoder_num_layers: int,
encoder_num_heads: int,
encoder_attention_dropout: float,
encoder_ff_interm_features: int,
encoder_ff_interm_dropout: float,
encoder_dropout: float,
encoder_layer_norm_first: bool,
encoder_layer_drop: float,
aux_num_out: Optional[int],
) -> Wav2Vec2Model:
"""Build a custom Wav2Vec2Model
Note:
The "feature extractor" below corresponds to
`ConvFeatureExtractionModel <https://github.com/pytorch/fairseq/blob/dd3bd3c0497ae9a7ae7364404a6b0a4c501780b3/fairseq/models/wav2vec/wav2vec2.py#L736>`__
in the original ``fairseq`` implementation.
This is referred as "(convolutional) feature encoder" in the *wav2vec 2.0*
[:footcite:`baevski2020wav2vec`] paper.
The "encoder" below corresponds to `TransformerEncoder <https://github.com/pytorch/fairseq/blob/dd3bd3c0497ae9a7ae7364404a6b0a4c501780b3/fairseq/models/wav2vec/wav2vec2.py#L817>`__,
and this is referred as "Transformer" in the paper.
Args:
extractor_mode (str): Operation mode of feature extractor.
Valid values are ``"group_norm"`` or ``"layer_norm"``.
If ``"group_norm"``, then a single normalization is applied
in the first convolution block. Otherwise, all the convolution
blocks will have layer normalization.
This option corresponds to ``extractor_mode`` from ``fairseq``.
extractor_conv_layer_config (list of integer tuples or None, optional):
Configuration of convolution layers in feature extractor.
List of convolution configuration,
i.e. ``[(output_channel, kernel_size, stride), ...]``
If ``None`` is provided, then the following default value is used.
.. code-block:: python
[
(512, 10, 5),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 3, 2),
(512, 2, 2),
(512, 2, 2),
]
This option corresponds to ``conv_feature_layers`` from ``fairseq``.
extractor_conv_bias (bool):
Whether to include bias term to each convolution operation.
This option corresponds to ``conv_bias`` from ``fairseq``.
encoder_embed_dim (int):
The dimension of embedding in encoder.
This option corresponds to ``encoder_embed_dim`` from ``fairseq``.
encoder_projection_dropout (float):
The dropout probability applied after the input feature is projected
to ``encoder_embed_dim``.
This option corresponds to ``dropout_input`` from ``fairseq``.
encoder_pos_conv_kernel (int):
The kernel size of convolutional positional embeddings.
This option corresponds to ``conv_pos`` from ``fairseq``.
encoder_pos_conv_groups (int):
The number of groups of convolutional positional embeddings.
This option corresponds to ``conv_pos_groups`` from ``fairseq``.
encoder_num_layers (int):
The number of self attention layers in transformer block.
This option corresponds to ``encoder_layers`` from ``fairseq``.
encoder_num_heads (int):
The number of heads in self attention layers.
This option corresponds to ``encoder_attention_heads`` from ``fairseq``.
encoder_attention_dropout (float):
The dropout probability applied after softmax in self-attention layer.
This option corresponds to ``attention_dropout`` from ``fairseq``.
encoder_ff_interm_features (int):
The dimension of hidden features in feed forward layer.
This option corresponds to ``encoder_ffn_embed_dim`` from ``fairseq``.
encoder_ff_interm_dropout (float):
The dropout probability applied in feedforward layer.
This option correspinds to ``activation_dropout`` from ``fairseq``.
encoder_dropout (float):
The dropout probability applied at the end of feed forward layer.
This option corresponds to ``dropout`` from ``fairseq``.
encoder_layer_norm_first (bool):
Control the order of layer norm in transformer layer and each encoder layer.
If True, in transformer layer, layer norm is applied before features are fed
to encoder layers. In encoder layer, two layer norms are applied before and after
self attention.
If False, in transformer layer, layer norm is applied after features are fed
to encoder layers. In encoder layer, two layer norms are applied after self
attention, before and after feed forward.
This option corresponds to ``layer_norm_first`` from ``fairseq``.
encoder_layer_drop (float):
Probability to drop each encoder layer during training.
This option corresponds to ``layerdrop`` from ``fairseq``.
aux_num_out (int or None):
When provided, attach an extra liner layer on top of encoder, which can be
used for fine-tuning.
Returns:
Wav2Vec2Model:
The resulting model.
""" # noqa: E501
if extractor_conv_layer_config is None:
extractor_conv_layer_config = [(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512, 2, 2)] * 2
feature_extractor = components._get_feature_extractor(
extractor_mode, extractor_conv_layer_config, extractor_conv_bias)
encoder = components._get_encoder(
in_features=extractor_conv_layer_config[-1][0],
embed_dim=encoder_embed_dim,
dropout_input=encoder_projection_dropout,
pos_conv_kernel=encoder_pos_conv_kernel,
pos_conv_groups=encoder_pos_conv_groups,
num_layers=encoder_num_layers,
num_heads=encoder_num_heads,
attention_dropout=encoder_attention_dropout,
ff_interm_features=encoder_ff_interm_features,
ff_interm_dropout=encoder_ff_interm_dropout,
dropout=encoder_dropout,
layer_norm_first=encoder_layer_norm_first,
layer_drop=encoder_layer_drop,
)
aux = None
if aux_num_out is not None:
aux = torch.nn.Linear(in_features=encoder_embed_dim, out_features=aux_num_out)
return Wav2Vec2Model(feature_extractor, encoder, aux)
|
32,796 | def main():
parser = argparse.ArgumentParser(
description=USAGE,
prog="ddtrace-run",
usage="ddtrace-run <your usual python command>",
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument("command", nargs=argparse.REMAINDER, type=str, help="Command string to execute.")
parser.add_argument("-d", "--debug", help="enable debug mode (disabled by default)", action="store_true")
parser.add_argument("-i", "--info", help="print library info useful for debugging", action="store_true")
parser.add_argument("-p", "--profiling", help="enable profiling (disabled by default)", action="store_true")
parser.add_argument("-v", "--version", action="version", version="%(prog)s " + ddtrace.__version__)
args = parser.parse_args()
if args.profiling:
os.environ["DD_PROFILING_ENABLED"] = "true"
if args.debug:
os.environ["DD_TRACE_DEBUG"] = "true"
if args.info:
# Inline imports for performance.
import pprint
from ddtrace.internal.debug import collect
pprint.pprint(collect(ddtrace.tracer))
sys.exit(0)
root_dir = os.path.dirname(ddtrace.__file__)
log.debug("ddtrace root: %s", root_dir)
bootstrap_dir = os.path.join(root_dir, "bootstrap")
log.debug("ddtrace bootstrap: %s", bootstrap_dir)
_add_bootstrap_to_pythonpath(bootstrap_dir)
log.debug("PYTHONPATH: %s", os.environ["PYTHONPATH"])
log.debug("sys.path: %s", sys.path)
if not args.command:
parser.print_help()
sys.exit(1)
# Find the executable path
executable = spawn.find_executable(args.command[0])
if not executable:
print("ddtrace-run: failed to find executable '%s'.\n" % args.command[0])
parser.print_usage()
sys.exit(1)
log.debug("program executable: %s", executable)
try:
# Raises OSError for permissions errors in Python 2
# PermissionError for Python 3
os.execl(executable, executable, *args.command[1:])
except (OSError, PermissionError):
print("ddtrace-run: executable '%s' does not have executable permissions.\n" % executable)
parser.print_usage()
sys.exit(1)
sys.exit(0)
| def main():
parser = argparse.ArgumentParser(
description=USAGE,
prog="ddtrace-run",
usage="ddtrace-run <script>",
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument("command", nargs=argparse.REMAINDER, type=str, help="Command string to execute.")
parser.add_argument("-d", "--debug", help="enable debug mode (disabled by default)", action="store_true")
parser.add_argument("-i", "--info", help="print library info useful for debugging", action="store_true")
parser.add_argument("-p", "--profiling", help="enable profiling (disabled by default)", action="store_true")
parser.add_argument("-v", "--version", action="version", version="%(prog)s " + ddtrace.__version__)
args = parser.parse_args()
if args.profiling:
os.environ["DD_PROFILING_ENABLED"] = "true"
if args.debug:
os.environ["DD_TRACE_DEBUG"] = "true"
if args.info:
# Inline imports for performance.
import pprint
from ddtrace.internal.debug import collect
pprint.pprint(collect(ddtrace.tracer))
sys.exit(0)
root_dir = os.path.dirname(ddtrace.__file__)
log.debug("ddtrace root: %s", root_dir)
bootstrap_dir = os.path.join(root_dir, "bootstrap")
log.debug("ddtrace bootstrap: %s", bootstrap_dir)
_add_bootstrap_to_pythonpath(bootstrap_dir)
log.debug("PYTHONPATH: %s", os.environ["PYTHONPATH"])
log.debug("sys.path: %s", sys.path)
if not args.command:
parser.print_help()
sys.exit(1)
# Find the executable path
executable = spawn.find_executable(args.command[0])
if not executable:
print("ddtrace-run: failed to find executable '%s'.\n" % args.command[0])
parser.print_usage()
sys.exit(1)
log.debug("program executable: %s", executable)
try:
# Raises OSError for permissions errors in Python 2
# PermissionError for Python 3
os.execl(executable, executable, *args.command[1:])
except (OSError, PermissionError):
print("ddtrace-run: executable '%s' does not have executable permissions.\n" % executable)
parser.print_usage()
sys.exit(1)
sys.exit(0)
|
31,063 | def main():
install_logging('Destroy_instances.log')
circle_aritfact = sys.argv[1]
env_file = sys.argv[2]
instance_role = sys.argv[3]
time_to_live = sys.argv[4]
with open(env_file, 'r') as json_file:
env_results = json.load(json_file)
filtered_results = [env_result for env_result in env_results if env_result["Role"] == instance_role]
for env in filtered_results:
logging.info(f'Downloading server log from {env.get("Role", "Unknown role")}')
ssh_string = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {}@{} ' \
'"sudo chmod -R 755 /var/log/demisto"'
scp_string = 'scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ' \
'{}@{}:/var/log/demisto/server.log {} || echo "WARN: Failed downloading server.log"'
try:
logging.debug(f'Trying to run {ssh_string}')
subprocess.check_output(
ssh_string.format(env["SSHuser"], env["InstanceDNS"]), shell=True)
except subprocess.CalledProcessError as exc:
logging.exception(exc.output)
try:
server_ip = env["InstanceDNS"].split('.')[0]
subprocess.check_output(
scp_string.format(
env["SSHuser"],
env["InstanceDNS"],
"{}/server_{}_{}.log".format(circle_aritfact, env["Role"].replace(' ', ''), server_ip)),
shell=True)
except subprocess.CalledProcessError as exc:
logging.exception(exc.output)
if time_to_live:
logging.info(f'Skipping - Time to live was set to {time_to_live} minutes')
continue
if os.path.isfile("./Tests/is_build_passed_{}.txt".format(env["Role"].replace(' ', ''))):
logging.info(f'Destroying instance {env.get("Role", "Unknown role")}')
rminstance = aws_functions.destroy_instance(env["Region"], env["InstanceID"])
if aws_functions.isError(rminstance):
logging.error(rminstance)
else:
logging.warning(f'Tests failed on {env.get("Role", "Unknown role")}, keeping instance alive')
| def main():
install_logging('Destroy_instances.log')
circle_aritfact = sys.argv[1]
env_file = sys.argv[2]
instance_role = sys.argv[3]
time_to_live = sys.argv[4]
with open(env_file, 'r') as json_file:
env_results = json.load(json_file)
filtered_results = [env_result for env_result in env_results if env_result["Role"] == instance_role]
for env in filtered_results:
logging.info(f'Downloading server log from {env.get("Role", "Unknown role")}')
ssh_string = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {}@{} ' \
'"sudo chmod -R 755 /var/log/demisto"'
scp_string = 'scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ' \
'{}@{}:/var/log/demisto/server.log {} || echo "WARN: Failed downloading server.log"'
try:
logging.debug(f'Trying to run {ssh_string}')
subprocess.check_output(
ssh_string.format(env["SSHuser"], env["InstanceDNS"]), shell=True)
except subprocess.CalledProcessError:
logging.exception(exc.output)
try:
server_ip = env["InstanceDNS"].split('.')[0]
subprocess.check_output(
scp_string.format(
env["SSHuser"],
env["InstanceDNS"],
"{}/server_{}_{}.log".format(circle_aritfact, env["Role"].replace(' ', ''), server_ip)),
shell=True)
except subprocess.CalledProcessError as exc:
logging.exception(exc.output)
if time_to_live:
logging.info(f'Skipping - Time to live was set to {time_to_live} minutes')
continue
if os.path.isfile("./Tests/is_build_passed_{}.txt".format(env["Role"].replace(' ', ''))):
logging.info(f'Destroying instance {env.get("Role", "Unknown role")}')
rminstance = aws_functions.destroy_instance(env["Region"], env["InstanceID"])
if aws_functions.isError(rminstance):
logging.error(rminstance)
else:
logging.warning(f'Tests failed on {env.get("Role", "Unknown role")}, keeping instance alive')
|
12,148 | def createFileSec(
job,
directoryPath,
parentDiv,
baseDirectoryPath,
sipUUID,
directories,
state,
includeAmdSec=True,
):
"""Creates fileSec and structMap entries for files on disk recursively.
:param directoryPath: Path to recursively traverse and create METS entries for
:param parentDiv: structMap div to attach created children to
:param baseDirectoryPath: SIP path
:param sipUUID: SIP UUID
:param includeAmdSec: If True, creates amdSecs for the files
"""
filesInThisDirectory = []
dspaceMetsDMDID = None
try:
directoryContents = sorted(os.listdir(directoryPath))
except os.error:
# Directory doesn't exist
job.pyprint(directoryPath, "doesn't exist", file=sys.stderr)
return
# Create the <mets:div> element for the directory that this file is in.
# If this directory has been assigned a UUID during transfer, retrieve that
# UUID based on the directory's relative path and document it in its own
# <mets:dmdSec> element.
directoryName = os.path.basename(directoryPath)
relativeDirectoryPath = "%SIPDirectory%" + os.path.join(
directoryPath.replace(baseDirectoryPath, "", 1), ""
)
dir_mdl = directories.get(
relativeDirectoryPath, directories.get(relativeDirectoryPath.rstrip("/"))
)
dir_dmd_id = None
if dir_mdl:
dirDmdSec = getDirDmdSec(dir_mdl, relativeDirectoryPath)
state.globalDmdSecCounter += 1
state.dmdSecs.append(dirDmdSec)
dir_dmd_id = "dmdSec_" + state.globalDmdSecCounter.__str__()
dirDmdSec.set("ID", dir_dmd_id)
structMapDiv = etree.SubElement(
parentDiv, ns.metsBNS + "div", TYPE="Directory", LABEL=directoryName
)
DMDIDS = createDMDIDsFromCSVMetadata(
job, directoryPath.replace(baseDirectoryPath, "", 1), state
)
if DMDIDS or dir_dmd_id:
if DMDIDS and dir_dmd_id:
structMapDiv.set("DMDID", dir_dmd_id + " " + DMDIDS)
elif DMDIDS:
structMapDiv.set("DMDID", DMDIDS)
else:
structMapDiv.set("DMDID", dir_dmd_id)
for item in directoryContents:
itemdirectoryPath = os.path.join(directoryPath, item)
if os.path.isdir(itemdirectoryPath):
createFileSec(
job,
itemdirectoryPath,
structMapDiv,
baseDirectoryPath,
sipUUID,
directories,
state,
includeAmdSec=includeAmdSec,
)
elif os.path.isfile(itemdirectoryPath):
# Setup variables for creating file metadata
DMDIDS = ""
directoryPathSTR = itemdirectoryPath.replace(
baseDirectoryPath, SIP_DIR_VAR, 1
)
kwargs = {
"removedtime__isnull": True,
"sip_id": sipUUID,
"currentlocation": directoryPathSTR,
}
try:
f = File.objects.get(**kwargs)
except File.DoesNotExist:
job.pyprint(
'No uuid for file: "', directoryPathSTR, '"', file=sys.stderr
)
state.error_accumulator.error_count += 1
continue
use = f.filegrpuse
label = f.label
typeOfTransfer = f.transfer.type if f.transfer else None
directoryPathSTR = itemdirectoryPath.replace(baseDirectoryPath, "", 1)
# Special TRIM processing
if typeOfTransfer == "TRIM" and state.trimStructMap is None:
state.trimStructMap = etree.Element(
ns.metsBNS + "structMap",
attrib={
"TYPE": "logical",
"ID": "structMap_2",
"LABEL": "Hierarchical arrangement",
},
)
state.trimStructMapObjects = etree.SubElement(
state.trimStructMap,
ns.metsBNS + "div",
attrib={"TYPE": "File", "LABEL": "objects"},
)
trimDmdSec = getTrimDmdSec(job, baseDirectoryPath, sipUUID)
state.globalDmdSecCounter += 1
state.dmdSecs.append(trimDmdSec)
ID = "dmdSec_" + state.globalDmdSecCounter.__str__()
trimDmdSec.set("ID", ID)
state.trimStructMapObjects.set("DMDID", ID)
trimAmdSec = etree.Element(ns.metsBNS + "amdSec")
state.globalAmdSecCounter += 1
state.amdSecs.append(trimAmdSec)
ID = "amdSec_" + state.globalAmdSecCounter.__str__()
trimAmdSec.set("ID", ID)
digiprovMD = getTrimAmdSec(job, baseDirectoryPath, sipUUID)
state.globalDigiprovMDCounter += 1
digiprovMD.set("ID", "digiprovMD_" + str(state.globalDigiprovMDCounter))
trimAmdSec.append(digiprovMD)
state.trimStructMapObjects.set("ADMID", ID)
# Create <div TYPE="Item"> and child <fptr>
# <fptr FILEID="file-<UUID>" LABEL="filename.ext">
fileId = "file-{}".format(f.uuid)
label = item if not label else label
fileDiv = etree.SubElement(
structMapDiv, ns.metsBNS + "div", LABEL=label, TYPE="Item"
)
etree.SubElement(fileDiv, ns.metsBNS + "fptr", FILEID=fileId)
# Pair items listed in custom structmaps. Strip leading path
# separator if it exists.
state.fileNameToFileID[directoryPathSTR] = fileId
# Determine fileGrp @GROUPID based on the file's fileGrpUse and transfer type
GROUPID = ""
if f.filegrpuuid:
# GROUPID was determined elsewhere
GROUPID = "Group-%s" % (f.filegrpuuid)
if use == "TRIM file metadata":
use = "metadata"
elif use in (
"original",
"submissionDocumentation",
"metadata",
"maildirFile",
):
# These files are in a group defined by themselves
GROUPID = "Group-%s" % (f.uuid)
if use == "maildirFile":
use = "original"
# Check for CSV-based Dublincore dmdSec
if use == "original":
DMDIDS = createDMDIDsFromCSVMetadata(
job,
f.originallocation.replace("%transferDirectory%", "", 1),
state,
)
XMLDMDID = create_dmd_from_xml(
job,
f.originallocation.replace("%transferDirectory%", "", 1),
baseDirectoryPath,
state,
)
if DMDIDS and XMLDMDID:
DMDIDS += " " + XMLDMDID
elif XMLDMDID:
DMDIDS = XMLDMDID
if DMDIDS:
fileDiv.set("DMDID", DMDIDS)
# More special TRIM processing
if typeOfTransfer == "TRIM":
trimFileDiv = etree.SubElement(
state.trimStructMapObjects,
ns.metsBNS + "div",
attrib={"TYPE": "Item"},
)
trimFileDmdSec = getTrimFileDmdSec(
job, baseDirectoryPath, sipUUID, f.uuid
)
state.globalDmdSecCounter += 1
state.dmdSecs.append(trimFileDmdSec)
ID = "dmdSec_" + state.globalDmdSecCounter.__str__()
trimFileDmdSec.set("ID", ID)
trimFileDiv.set("DMDID", ID)
etree.SubElement(
trimFileDiv, ns.metsBNS + "fptr", FILEID=fileId
)
elif typeOfTransfer == "Dspace" and (
use in ("license", "text/ocr", "DSPACEMETS")
):
# Dspace transfers are treated specially, but some of these fileGrpUses may be encountered in other types
kwargs = {
"removedtime__isnull": True,
"sip_id": sipUUID,
"filegrpuse": "original",
"originallocation__startswith": os.path.dirname(f.originallocation),
}
original_file = File.objects.filter(**kwargs).first()
if original_file is not None:
GROUPID = "Group-" + original_file.uuid
elif use in ("preservation", "text/ocr", "derivative"):
# Derived files should be in the original file's group
try:
d = Derivation.objects.get(derived_file_id=f.uuid)
except Derivation.DoesNotExist:
job.pyprint(
"Fatal error: unable to locate a Derivation object"
" where the derived file is {}".format(f.uuid)
)
raise
GROUPID = "Group-" + d.source_file_id
elif use == "service":
# Service files are in the original file's group
fileFileIDPath = itemdirectoryPath.replace(
baseDirectoryPath + "objects/service/",
SIP_DIR_VAR + "objects/",
)
objectNameExtensionIndex = fileFileIDPath.rfind(".")
fileFileIDPath = fileFileIDPath[: objectNameExtensionIndex + 1]
kwargs = {
"removedtime__isnull": True,
"sip_id": sipUUID,
"filegrpuse": "original",
"currentlocation__startswith": fileFileIDPath,
}
original_file = File.objects.get(**kwargs)
GROUPID = "Group-" + original_file.uuid
elif use == "TRIM container metadata":
GROUPID = "Group-%s" % (f.uuid)
use = "metadata"
# Special DSPACEMETS processing
if f.transfer and f.transfer.type == "Dspace" and use == "DSPACEMETS":
use = "submissionDocumentation"
admidApplyTo = None
if GROUPID == "": # is an AIP identifier
GROUPID = f.uuid
admidApplyTo = structMapDiv.getparent()
label = "mets.xml-%s" % (GROUPID)
dspace_dmdsecs = createDSpaceDMDSec(
job, label, itemdirectoryPath, directoryPathSTR, state
)
if dspace_dmdsecs:
state.dmdSecs.extend(list(dspace_dmdsecs.values()))
ids = " ".join(list(dspace_dmdsecs.keys()))
if admidApplyTo is not None:
admidApplyTo.set("DMDID", ids)
else:
dspaceMetsDMDID = ids
# Special Dataverse processing. If there's .tab file, check if
# there's a Dataverse METS with additional metadata.
if f.originallocation.endswith(".tab"):
dv_metadata = create_dataverse_tabfile_dmdsec(
job, baseDirectoryPath, os.path.basename(f.originallocation)
)
state.dmdSecs.extend(dv_metadata)
ids = " ".join([ds.get("ID") for ds in dv_metadata])
if ids != "":
fileDiv.attrib["DMDID"] = fileDiv.attrib.get("DMDID", "") + ids
if GROUPID == "":
state.error_accumulator.error_count += 1
job.pyprint(
'No groupID for file: "', directoryPathSTR, '"', file=sys.stderr
)
if use not in state.globalFileGrps:
job.pyprint('Invalid use: "%s"' % (use), file=sys.stderr)
state.error_accumulator.error_count += 1
else:
file_elem = etree.SubElement(
state.globalFileGrps[use],
ns.metsBNS + "file",
ID=fileId,
GROUPID=GROUPID,
)
if use == "original":
filesInThisDirectory.append(file_elem)
# <Flocat xlink:href="objects/file1-UUID" locType="other" otherLocType="system"/>
newChild(
file_elem,
ns.metsBNS + "FLocat",
sets=[
(ns.xlinkBNS + "href", directoryPathSTR),
("LOCTYPE", "OTHER"),
("OTHERLOCTYPE", "SYSTEM"),
],
)
if includeAmdSec:
AMD, ADMID = getAMDSec(
job,
f.uuid,
directoryPathSTR,
use,
sipUUID,
f.transfer_id,
itemdirectoryPath,
typeOfTransfer,
baseDirectoryPath,
state,
)
state.amdSecs.append(AMD)
file_elem.set("ADMID", ADMID)
if dspaceMetsDMDID is not None:
for file_elem in filesInThisDirectory:
file_elem.set("DMDID", dspaceMetsDMDID)
return structMapDiv
| def createFileSec(
job,
directoryPath,
parentDiv,
baseDirectoryPath,
sipUUID,
directories,
state,
includeAmdSec=True,
):
"""Creates fileSec and structMap entries for files on disk recursively.
:param directoryPath: Path to recursively traverse and create METS entries for
:param parentDiv: structMap div to attach created children to
:param baseDirectoryPath: SIP path
:param sipUUID: SIP UUID
:param includeAmdSec: If True, creates amdSecs for the files
"""
filesInThisDirectory = []
dspaceMetsDMDID = None
try:
directoryContents = sorted(os.listdir(directoryPath))
except os.error:
# Directory doesn't exist
job.pyprint(directoryPath, "doesn't exist", file=sys.stderr)
return
# Create the <mets:div> element for the directory that this file is in.
# If this directory has been assigned a UUID during transfer, retrieve that
# UUID based on the directory's relative path and document it in its own
# <mets:dmdSec> element.
directoryName = os.path.basename(directoryPath)
relativeDirectoryPath = "%SIPDirectory%" + os.path.join(
directoryPath.replace(baseDirectoryPath, "", 1), ""
)
dir_mdl = directories.get(
relativeDirectoryPath, directories.get(relativeDirectoryPath.rstrip("/"))
)
dir_dmd_id = None
if dir_mdl:
dirDmdSec = getDirDmdSec(dir_mdl, relativeDirectoryPath)
state.globalDmdSecCounter += 1
state.dmdSecs.append(dirDmdSec)
dir_dmd_id = "dmdSec_" + state.globalDmdSecCounter.__str__()
dirDmdSec.set("ID", dir_dmd_id)
structMapDiv = etree.SubElement(
parentDiv, ns.metsBNS + "div", TYPE="Directory", LABEL=directoryName
)
DMDIDS = createDMDIDsFromCSVMetadata(
job, directoryPath.replace(baseDirectoryPath, "", 1), state
)
if DMDIDS or dir_dmd_id:
if DMDIDS and dir_dmd_id:
structMapDiv.set("DMDID", dir_dmd_id + " " + DMDIDS)
elif DMDIDS:
structMapDiv.set("DMDID", DMDIDS)
else:
structMapDiv.set("DMDID", dir_dmd_id)
for item in directoryContents:
itemdirectoryPath = os.path.join(directoryPath, item)
if os.path.isdir(itemdirectoryPath):
createFileSec(
job,
itemdirectoryPath,
structMapDiv,
baseDirectoryPath,
sipUUID,
directories,
state,
includeAmdSec=includeAmdSec,
)
elif os.path.isfile(itemdirectoryPath):
# Setup variables for creating file metadata
DMDIDS = ""
directoryPathSTR = itemdirectoryPath.replace(
baseDirectoryPath, SIP_DIR_VAR, 1
)
kwargs = {
"removedtime__isnull": True,
"sip_id": sipUUID,
"currentlocation": directoryPathSTR,
}
try:
f = File.objects.get(**kwargs)
except File.DoesNotExist:
job.pyprint(
'No uuid for file: "', directoryPathSTR, '"', file=sys.stderr
)
state.error_accumulator.error_count += 1
continue
use = f.filegrpuse
label = f.label
typeOfTransfer = f.transfer.type if f.transfer else None
directoryPathSTR = itemdirectoryPath.replace(baseDirectoryPath, "", 1)
# Special TRIM processing
if typeOfTransfer == "TRIM" and state.trimStructMap is None:
state.trimStructMap = etree.Element(
ns.metsBNS + "structMap",
attrib={
"TYPE": "logical",
"ID": "structMap_2",
"LABEL": "Hierarchical arrangement",
},
)
state.trimStructMapObjects = etree.SubElement(
state.trimStructMap,
ns.metsBNS + "div",
attrib={"TYPE": "File", "LABEL": "objects"},
)
trimDmdSec = getTrimDmdSec(job, baseDirectoryPath, sipUUID)
state.globalDmdSecCounter += 1
state.dmdSecs.append(trimDmdSec)
ID = "dmdSec_" + state.globalDmdSecCounter.__str__()
trimDmdSec.set("ID", ID)
state.trimStructMapObjects.set("DMDID", ID)
trimAmdSec = etree.Element(ns.metsBNS + "amdSec")
state.globalAmdSecCounter += 1
state.amdSecs.append(trimAmdSec)
ID = "amdSec_" + state.globalAmdSecCounter.__str__()
trimAmdSec.set("ID", ID)
digiprovMD = getTrimAmdSec(job, baseDirectoryPath, sipUUID)
state.globalDigiprovMDCounter += 1
digiprovMD.set("ID", "digiprovMD_" + str(state.globalDigiprovMDCounter))
trimAmdSec.append(digiprovMD)
state.trimStructMapObjects.set("ADMID", ID)
# Create <div TYPE="Item"> and child <fptr>
# <fptr FILEID="file-<UUID>" LABEL="filename.ext">
fileId = "file-{}".format(f.uuid)
label = item if not label else label
fileDiv = etree.SubElement(
structMapDiv, ns.metsBNS + "div", LABEL=label, TYPE="Item"
)
etree.SubElement(fileDiv, ns.metsBNS + "fptr", FILEID=fileId)
# Pair items listed in custom structmaps. Strip leading path
# separator if it exists.
state.fileNameToFileID[directoryPathSTR] = fileId
# Determine fileGrp @GROUPID based on the file's fileGrpUse and transfer type
GROUPID = ""
if f.filegrpuuid:
# GROUPID was determined elsewhere
GROUPID = "Group-%s" % (f.filegrpuuid)
if use == "TRIM file metadata":
use = "metadata"
elif use in (
"original",
"submissionDocumentation",
"metadata",
"maildirFile",
):
# These files are in a group defined by themselves
GROUPID = "Group-%s" % (f.uuid)
if use == "maildirFile":
use = "original"
# Check for CSV-based Dublincore dmdSec
if use == "original":
DMDIDS = createDMDIDsFromCSVMetadata(
job,
f.originallocation.replace("%transferDirectory%", "", 1),
state,
)
XMLDMDID = create_dmdid_from_xml(
job,
f.originallocation.replace("%transferDirectory%", "", 1),
baseDirectoryPath,
state,
)
if DMDIDS and XMLDMDID:
DMDIDS += " " + XMLDMDID
elif XMLDMDID:
DMDIDS = XMLDMDID
if DMDIDS:
fileDiv.set("DMDID", DMDIDS)
# More special TRIM processing
if typeOfTransfer == "TRIM":
trimFileDiv = etree.SubElement(
state.trimStructMapObjects,
ns.metsBNS + "div",
attrib={"TYPE": "Item"},
)
trimFileDmdSec = getTrimFileDmdSec(
job, baseDirectoryPath, sipUUID, f.uuid
)
state.globalDmdSecCounter += 1
state.dmdSecs.append(trimFileDmdSec)
ID = "dmdSec_" + state.globalDmdSecCounter.__str__()
trimFileDmdSec.set("ID", ID)
trimFileDiv.set("DMDID", ID)
etree.SubElement(
trimFileDiv, ns.metsBNS + "fptr", FILEID=fileId
)
elif typeOfTransfer == "Dspace" and (
use in ("license", "text/ocr", "DSPACEMETS")
):
# Dspace transfers are treated specially, but some of these fileGrpUses may be encountered in other types
kwargs = {
"removedtime__isnull": True,
"sip_id": sipUUID,
"filegrpuse": "original",
"originallocation__startswith": os.path.dirname(f.originallocation),
}
original_file = File.objects.filter(**kwargs).first()
if original_file is not None:
GROUPID = "Group-" + original_file.uuid
elif use in ("preservation", "text/ocr", "derivative"):
# Derived files should be in the original file's group
try:
d = Derivation.objects.get(derived_file_id=f.uuid)
except Derivation.DoesNotExist:
job.pyprint(
"Fatal error: unable to locate a Derivation object"
" where the derived file is {}".format(f.uuid)
)
raise
GROUPID = "Group-" + d.source_file_id
elif use == "service":
# Service files are in the original file's group
fileFileIDPath = itemdirectoryPath.replace(
baseDirectoryPath + "objects/service/",
SIP_DIR_VAR + "objects/",
)
objectNameExtensionIndex = fileFileIDPath.rfind(".")
fileFileIDPath = fileFileIDPath[: objectNameExtensionIndex + 1]
kwargs = {
"removedtime__isnull": True,
"sip_id": sipUUID,
"filegrpuse": "original",
"currentlocation__startswith": fileFileIDPath,
}
original_file = File.objects.get(**kwargs)
GROUPID = "Group-" + original_file.uuid
elif use == "TRIM container metadata":
GROUPID = "Group-%s" % (f.uuid)
use = "metadata"
# Special DSPACEMETS processing
if f.transfer and f.transfer.type == "Dspace" and use == "DSPACEMETS":
use = "submissionDocumentation"
admidApplyTo = None
if GROUPID == "": # is an AIP identifier
GROUPID = f.uuid
admidApplyTo = structMapDiv.getparent()
label = "mets.xml-%s" % (GROUPID)
dspace_dmdsecs = createDSpaceDMDSec(
job, label, itemdirectoryPath, directoryPathSTR, state
)
if dspace_dmdsecs:
state.dmdSecs.extend(list(dspace_dmdsecs.values()))
ids = " ".join(list(dspace_dmdsecs.keys()))
if admidApplyTo is not None:
admidApplyTo.set("DMDID", ids)
else:
dspaceMetsDMDID = ids
# Special Dataverse processing. If there's .tab file, check if
# there's a Dataverse METS with additional metadata.
if f.originallocation.endswith(".tab"):
dv_metadata = create_dataverse_tabfile_dmdsec(
job, baseDirectoryPath, os.path.basename(f.originallocation)
)
state.dmdSecs.extend(dv_metadata)
ids = " ".join([ds.get("ID") for ds in dv_metadata])
if ids != "":
fileDiv.attrib["DMDID"] = fileDiv.attrib.get("DMDID", "") + ids
if GROUPID == "":
state.error_accumulator.error_count += 1
job.pyprint(
'No groupID for file: "', directoryPathSTR, '"', file=sys.stderr
)
if use not in state.globalFileGrps:
job.pyprint('Invalid use: "%s"' % (use), file=sys.stderr)
state.error_accumulator.error_count += 1
else:
file_elem = etree.SubElement(
state.globalFileGrps[use],
ns.metsBNS + "file",
ID=fileId,
GROUPID=GROUPID,
)
if use == "original":
filesInThisDirectory.append(file_elem)
# <Flocat xlink:href="objects/file1-UUID" locType="other" otherLocType="system"/>
newChild(
file_elem,
ns.metsBNS + "FLocat",
sets=[
(ns.xlinkBNS + "href", directoryPathSTR),
("LOCTYPE", "OTHER"),
("OTHERLOCTYPE", "SYSTEM"),
],
)
if includeAmdSec:
AMD, ADMID = getAMDSec(
job,
f.uuid,
directoryPathSTR,
use,
sipUUID,
f.transfer_id,
itemdirectoryPath,
typeOfTransfer,
baseDirectoryPath,
state,
)
state.amdSecs.append(AMD)
file_elem.set("ADMID", ADMID)
if dspaceMetsDMDID is not None:
for file_elem in filesInThisDirectory:
file_elem.set("DMDID", dspaceMetsDMDID)
return structMapDiv
|
54,978 | def ph_pphh_wires(ph_confs, pphh_confs, wires=None):
r"""Map the indices representing the particle-hole configurations
generated by the Coupled-Cluster excitation operator to the wires that
the Unitary Coupled-Cluster Singles and Doubles (UCCSD) template will act on.
**Example**
>>> ph_confs = [[0, 2], [1, 3]]
>>> pphh_confs = [[0, 1, 2, 3]]
>>> ph, pphh = ph_pphh_wires(ph_confs, pphh_confs)
>>> print(ph)
>>> print(pphh)
[[0, 1, 2], [1, 2, 3]]
[[[0, 1], [2, 3]]]
>>> wires=['a0', 'b1', 'c2', 'd3']
>>> ph, pphh = ph_pphh_wires(ph_confs, pphh_confs, wires=wires)
>>> print(ph)
>>> print(pphh)
[['a0', 'b1', 'c2'], ['b1', 'c2', 'd3']]
[[['a0', 'b1'], ['c2', 'd3']]]
Args:
ph_confs (list[list[int]]): list of indices of the two qubits representing
the 1particle-1hole (ph) configuration
:math:`\vert ph \rangle = \hat{c}_p^\dagger \hat{c}_r \vert \mathrm{HF}\rangle`.
pphh_confs (list[list[int]]): list of indices of the four qubits representing
the 2particle-2hole (pphh) configuration
:math:`\vert pphh \rangle = \hat{c}_p^\dagger \hat{c}_q^\dagger
\hat{c}_r \hat{c}_s \vert \mathrm{HF}\rangle`. The indices :math:`r, s`
and :math:`p, q` run over the occupied and virtual Hartree-Fock (HF)
orbitals, respectively.
wires (Iterable[Any]): Wires of the quantum device. If None, will use consecutive wires.
Returns:
tuple(list[list[Any]], list[list[list[Any]]]): lists with the sequence of wires
the Unitary Coupled-Cluster Singles and Doubles (UCCSD) template will act on.
"""
if (not ph_confs) and (not pphh_confs):
raise ValueError(
"'ph_confs' and 'pphh_confs' lists can not be both empty;\
got ph_confs = {}, pphh_confs = {}".
format(ph_confs, pphh_confs)
)
expected_shape = (2,)
for ph_confs_ in ph_confs:
if np.array(ph_confs_).shape != expected_shape:
raise ValueError(
"expected entries of 'ph_confs' to be of shape (2,); got {}".
format(np.array(ph_confs_).shape)
)
expected_shape = (4,)
for pphh_confs_ in pphh_confs:
if np.array(pphh_confs_).shape != expected_shape:
raise ValueError(
"expected entries of 'pphh_confs' to be of shape (4,); got {}".
format(np.array(pphh_confs_).shape)
)
max_idx = 0
if ph_confs:
max_idx = np.max(ph_confs)
if pphh_confs:
max_idx = max(np.max(pphh_confs), max_idx)
if wires is None:
wires = range(max_idx + 1)
elif len(wires) != max_idx + 1:
raise ValueError("Expected number of wires is {}; got {}".format(max_idx + 1, len(wires)))
ph = []
for r, p in ph_confs:
ph_wires = [wires[i] for i in range(r, p + 1)]
ph.append(ph_wires)
pphh = []
for s, r, q, p in pphh_confs:
pphh1_wires = [wires[i] for i in range(s, r + 1)]
pphh2_wires = [wires[i] for i in range(q, p + 1)]
pphh.append([pphh1_wires, pphh2_wires])
return ph, pphh
| def ph_pphh_wires(ph_confs, pphh_confs, wires=None):
r"""Map the indices representing the particle-hole configurations
generated by the Coupled-Cluster excitation operator to the wires that
the Unitary Coupled-Cluster Singles and Doubles (UCCSD) template will act on.
**Example**
>>> ph_confs = [[0, 2], [1, 3]]
>>> pphh_confs = [[0, 1, 2, 3]]
>>> ph, pphh = ph_pphh_wires(ph_confs, pphh_confs)
>>> print(ph)
[[0, 1, 2], [1, 2, 3]]
>>> print(pphh)
[[[0, 1], [2, 3]]]
>>> wires=['a0', 'b1', 'c2', 'd3']
>>> ph, pphh = ph_pphh_wires(ph_confs, pphh_confs, wires=wires)
>>> print(ph)
[['a0', 'b1', 'c2'], ['b1', 'c2', 'd3']]
>>> print(pphh)
[[['a0', 'b1'], ['c2', 'd3']]]
Args:
ph_confs (list[list[int]]): list of indices of the two qubits representing
the 1particle-1hole (ph) configuration
:math:`\vert ph \rangle = \hat{c}_p^\dagger \hat{c}_r \vert \mathrm{HF}\rangle`.
pphh_confs (list[list[int]]): list of indices of the four qubits representing
the 2particle-2hole (pphh) configuration
:math:`\vert pphh \rangle = \hat{c}_p^\dagger \hat{c}_q^\dagger
\hat{c}_r \hat{c}_s \vert \mathrm{HF}\rangle`. The indices :math:`r, s`
and :math:`p, q` run over the occupied and virtual Hartree-Fock (HF)
orbitals, respectively.
wires (Iterable[Any]): Wires of the quantum device. If None, will use consecutive wires.
Returns:
tuple(list[list[Any]], list[list[list[Any]]]): lists with the sequence of wires
the Unitary Coupled-Cluster Singles and Doubles (UCCSD) template will act on.
"""
if (not ph_confs) and (not pphh_confs):
raise ValueError(
"'ph_confs' and 'pphh_confs' lists can not be both empty;\
got ph_confs = {}, pphh_confs = {}".
format(ph_confs, pphh_confs)
)
expected_shape = (2,)
for ph_confs_ in ph_confs:
if np.array(ph_confs_).shape != expected_shape:
raise ValueError(
"expected entries of 'ph_confs' to be of shape (2,); got {}".
format(np.array(ph_confs_).shape)
)
expected_shape = (4,)
for pphh_confs_ in pphh_confs:
if np.array(pphh_confs_).shape != expected_shape:
raise ValueError(
"expected entries of 'pphh_confs' to be of shape (4,); got {}".
format(np.array(pphh_confs_).shape)
)
max_idx = 0
if ph_confs:
max_idx = np.max(ph_confs)
if pphh_confs:
max_idx = max(np.max(pphh_confs), max_idx)
if wires is None:
wires = range(max_idx + 1)
elif len(wires) != max_idx + 1:
raise ValueError("Expected number of wires is {}; got {}".format(max_idx + 1, len(wires)))
ph = []
for r, p in ph_confs:
ph_wires = [wires[i] for i in range(r, p + 1)]
ph.append(ph_wires)
pphh = []
for s, r, q, p in pphh_confs:
pphh1_wires = [wires[i] for i in range(s, r + 1)]
pphh2_wires = [wires[i] for i in range(q, p + 1)]
pphh.append([pphh1_wires, pphh2_wires])
return ph, pphh
|
725 | def get_retry_request(
request,
*,
reason,
spider,
max_retry_times=None,
priority_adjust=None,
):
settings = spider.crawler.settings
stats = spider.crawler.stats
retry_times = request.meta.get('retry_times', 0) + 1
request_max_retry_times = request.meta.get(
'max_retry_times',
max_retry_times,
)
if request_max_retry_times is None:
request_max_retry_times = settings.getint('RETRY_TIMES')
if retry_times <= request_max_retry_times:
logger.debug(
"Retrying %(request)s (failed %(retry_times)d times): %(reason)s",
{'request': request, 'retry_times': retry_times, 'reason': reason},
extra={'spider': spider}
)
new_request = request.copy()
new_request.meta['retry_times'] = retry_times
new_request.dont_filter = True
if priority_adjust is None:
priority_adjust = settings.getint('RETRY_PRIORITY_ADJUST')
new_request.priority = request.priority + priority_adjust
if isinstance(reason, Exception):
reason = global_object_name(reason.__class__)
stats.inc_value('retry/count')
stats.inc_value(f'retry/reason_count/{reason}')
return new_request
else:
stats.inc_value('retry/max_reached')
logger.error("Gave up retrying %(request)s (failed %(retry_times)d times): %(reason)s",
{'request': request, 'retry_times': retry_times, 'reason': reason},
extra={'spider': spider})
return None
| def get_retry_request(
request,
*,
reason,
spider,
max_retry_times=None,
priority_adjust=None,
):
settings = spider.crawler.settings
stats = spider.crawler.stats
retry_times = request.meta.get('retry_times', 0) + 1
request_max_retry_times = request.meta.get(
'max_retry_times',
max_retry_times,
)
if request_max_retry_times is None:
request_max_retry_times = settings.getint('RETRY_TIMES')
if retry_times <= request_max_retry_times:
logger.debug(
"Retrying %(request)s (failed %(retry_times)d times): %(reason)s",
{'request': request, 'retry_times': retry_times, 'reason': reason},
extra={'spider': spider}
)
new_request = request.copy()
new_request.meta['retry_times'] = retry_times
new_request.dont_filter = True
if priority_adjust is None:
priority_adjust = settings.getint('RETRY_PRIORITY_ADJUST')
new_request.priority = request.priority + priority_adjust
if isinstance(reason, Exception):
reason = global_object_name(reason.__class__)
stats.inc_value('retry/count')
stats.inc_value(f'retry/reason_count/{reason}')
return new_request
else:
stats.inc_value('retry/max_reached')
logger.error("Gave up retrying %(request)s (failed %(retry_times)d times): %(reason)s",
{'request': request, 'retry_times': retry_times, 'reason': reason},
extra={'spider': spider})
return
|
7,273 | def correlate_sparse(image, kernel, mode='reflect'):
"""Compute valid cross-correlation of `padded_array` and `kernel`.
This function is *fast* when `kernel` is large with many zeros.
See ``scipy.ndimage.correlate`` for a description of cross-correlation.
Parameters
----------
image : ndarray, dtype float, shape (M, N,[ ...,] P)
The input array. If mode is 'valid', this array should already be
padded, as a margin of the same shape as kernel will be stripped
off.
kernel : ndarray, dtype float shape (Q, R,[ ...,] S)
The kernel to be correlated. Must have the same number of
dimensions as `padded_array`. For high performance, it should
be sparse (few nonzero entries).
mode : string, optional
See `scipy.ndimage.correlate` for valid modes.
Additionally, mode 'valid' is accepted, in which case no padding is
applied and the result is the result for the smaller image for which
the kernel is entirely inside the original data.
Returns
-------
result : array of float, shape (M, N,[ ...,] P)
The result of cross-correlating `image` with `kernel`. If mode
'valid' is used, the resulting shape is (M-Q+1, N-R+1,[ ...,] P-S+1).
"""
kernel = np.asarray(kernel)
if mode == 'valid':
padded_image = image
else:
np_mode = _to_np_mode(mode)
_validate_window_size(kernel.shape)
padded_image = np.pad(
image,
[(w // 2, w // 2) for w in kernel.shape],
mode=np_mode,
)
indices = np.nonzero(kernel)
values = kernel[indices].astype(padded_image.dtype, copy=False)
indices = list(zip(*indices))
kernel_indices_and_values = [(idx, v) for idx, v in zip(indices, values)]
if (0, ) * kernel.ndim not in indices:
kernel_indices_and_values = \
[((0,) * kernel.ndim, 0.0)] + kernel_indices_and_values
out = _correlate_sparse(
padded_image, kernel.shape, kernel_indices_and_values
)
return out
| def correlate_sparse(image, kernel, mode='reflect'):
"""Compute valid cross-correlation of `padded_array` and `kernel`.
This function is *fast* when `kernel` is large with many zeros.
See ``scipy.ndimage.correlate`` for a description of cross-correlation.
Parameters
----------
image : ndarray, dtype float, shape (M, N,[ ...,] P)
The input array. If mode is 'valid', this array should already be
padded, as a margin of the same shape as kernel will be stripped
off.
kernel : ndarray, dtype float shape (Q, R,[ ...,] S)
The kernel to be correlated. Must have the same number of
dimensions as `padded_array`. For high performance, it should
be sparse (few nonzero entries).
mode : string, optional
See `scipy.ndimage.correlate` for valid modes.
Additionally, mode 'valid' is accepted, in which case no padding is
applied and the result is the result for the smaller image for which
the kernel is entirely inside the original data.
Returns
-------
result : array of float, shape (M, N,[ ...,] P)
The result of cross-correlating `image` with `kernel`. If mode
'valid' is used, the resulting shape is (M-Q+1, N-R+1,[ ...,] P-S+1).
"""
kernel = np.asarray(kernel)
if mode == 'valid':
padded_image = image
else:
np_mode = _to_np_mode(mode)
_validate_window_size(kernel.shape)
padded_image = np.pad(
image,
[(w // 2, w // 2) for w in kernel.shape],
mode=np_mode,
)
indices = np.nonzero(kernel)
values = kernel[indices].astype(padded_image.dtype, copy=False)
indices = list(zip(*indices))
kernel_indices_and_values = [(idx, v) for idx, v in zip(indices, values)]
if kernel.reshape(-1)[0] == 0:
kernel_indices_and_values = \
[((0,) * kernel.ndim, 0.0)] + kernel_indices_and_values
out = _correlate_sparse(
padded_image, kernel.shape, kernel_indices_and_values
)
return out
|
45,515 | def is_dynmodb_configured(f):
def inner(*args, **kwargs):
if not dynamo_identity_table:
return
else:
return f(*args, **kwargs)
return inner
| def is_dynamodb_configured(f):
def inner(*args, **kwargs):
if not dynamo_identity_table:
return
else:
return f(*args, **kwargs)
return inner
|
41,362 | def format_data(df, index, **kwargs):
"""Convert a pandas.Dataframe or pandas.Series to the required format"""
if isinstance(df, pd.Series):
df.name = df.name or "value"
df = df.to_frame()
# check for R-style year columns, converting where necessary
def convert_r_columns(c):
try:
first = c[0]
second = c[1:]
if first == "X":
try:
# bingo! was X2015 R-style, return the integer
return int(second)
except:
# nope, not an int, fall down to final return statement
pass
except:
# not a string/iterable/etc, fall down to final return statement
pass
return c
df.columns = df.columns.map(convert_r_columns)
# if `value` is given but not `variable`,
# melt value columns and use column name as `variable`
if "value" in kwargs and "variable" not in kwargs:
value = kwargs.pop("value")
value = value if islistable(value) else [value]
_df = df.set_index(list(set(df.columns) - set(value)))
dfs = []
for v in value:
if v not in df.columns:
raise ValueError("column `{}` does not exist!".format(v))
vdf = _df[v].to_frame().rename(columns={v: "value"})
vdf["variable"] = v
dfs.append(vdf.reset_index())
df = pd.concat(dfs).reset_index(drop=True)
# otherwise, rename columns or concat to IAMC-style or do a fill-by-value
for col, value in kwargs.items():
if col in df:
raise ValueError(
"conflict of kwarg with column `{}` in dataframe!".format(col)
)
if isstr(value) and value in df:
df.rename(columns={value: col}, inplace=True)
elif islistable(value) and all([c in df.columns for c in value]):
df[col] = df.apply(lambda x: concat_with_pipe(x, value), axis=1)
df.drop(value, axis=1, inplace=True)
elif isstr(value):
df[col] = value
else:
raise ValueError("invalid argument for casting `{}: {}`".format(col, value))
# all lower case
str_cols = [c for c in df.columns if isstr(c)]
df.rename(columns={c: str(c).lower() for c in str_cols}, inplace=True)
if "notes" in df.columns: # this came from the database
logger.info("Ignoring notes column in dataframe")
df.drop(columns="notes", inplace=True)
col = df.columns[0] # first column has database copyright notice
df = df[~df[col].str.contains("database", case=False)]
if "scenario" in df.columns and "model" not in df.columns:
# model and scenario are jammed together in RCP data
scen = df["scenario"]
df.loc[:, "model"] = scen.apply(lambda s: s.split("-")[0].strip())
df.loc[:, "scenario"] = scen.apply(
lambda s: "-".join(s.split("-")[1:]).strip()
)
# reset the index if meaningful entries are included there
if not list(df.index.names) == [None]:
df.reset_index(inplace=True)
# check that there is no column in the timeseries data with reserved names
conflict_cols = [i for i in df.columns if i in ILLEGAL_COLS]
if conflict_cols:
msg = f"Column name {conflict_cols} is illegal for timeseries data.\n"
_args = ", ".join([f"{i}_1='{i}'" for i in conflict_cols])
msg += f"Use `IamDataFrame(..., {_args})` to rename at initialization."
raise ValueError(msg)
# check that index and required columns exist
missing_index = [c for c in index if c not in df.columns]
if missing_index:
raise ValueError(f"Missing index columns: {missing_index}")
missing_required_col = [c for c in REQUIRED_COLS if c not in df.columns]
if missing_required_col:
raise ValueError(f"Missing required columns: {missing_required_col}")
# check whether data in wide format (IAMC) or long format (`value` column)
if "value" in df.columns:
# check if time column is given as `year` (int) or `time` (datetime)
if "year" in df.columns:
time_col = "year"
elif "time" in df.columns:
time_col = "time"
else:
raise ValueError("Invalid time format, must have either `year` or `time`!")
extra_cols = [
c
for c in df.columns
if c not in index + REQUIRED_COLS + [time_col, "value"]
]
else:
# if in wide format, check if columns are years (int) or datetime
cols = [c for c in df.columns if c not in index + REQUIRED_COLS]
year_cols, time_cols, extra_cols = [], [], []
for i in cols:
try:
int(i) # this is a year
year_cols.append(i)
except (ValueError, TypeError):
try:
dateutil.parser.parse(str(i)) # this is datetime
time_cols.append(i)
except ValueError:
extra_cols.append(i) # some other string
if year_cols and not time_cols:
time_col = "year"
melt_cols = year_cols
elif not year_cols and time_cols:
time_col = "time"
melt_cols = time_cols
else:
raise ValueError("Invalid time format, must be either years or `datetime`!")
cols = index + REQUIRED_COLS + extra_cols
df = pd.melt(
df,
id_vars=cols,
var_name=time_col,
value_vars=sorted(melt_cols),
value_name="value",
)
# cast value column to numeric and drop nan
df["value"] = df["value"].astype("float64")
df.dropna(inplace=True, subset=["value"])
# replace missing units by an empty string for user-friendly filtering
df.loc[df.unit.isnull(), "unit"] = ""
# verify that there are no nan's left (in columns)
null_rows = df.isnull().T.any()
if null_rows.any():
_raise_data_error("Empty cells in `data`", df.loc[null_rows])
del null_rows
# format the time-column
df = format_time_col(df, time_col)
# cast to pd.Series, check for duplicates
idx_cols = index + REQUIRED_COLS + [time_col] + extra_cols
df = df.set_index(idx_cols).value
rows = df.index.duplicated()
if any(rows):
_raise_data_error(
"Duplicate rows in `data`", df[rows].index.to_frame(index=False)
)
if df.empty:
logger.warning("Formatted data is empty!")
return df.sort_index(), index, time_col, extra_cols
| def format_data(df, index, **kwargs):
"""Convert a pandas.Dataframe or pandas.Series to the required format"""
if isinstance(df, pd.Series):
df.name = df.name or "value"
df = df.to_frame()
# check for R-style year columns, converting where necessary
def convert_r_columns(c):
try:
first = c[0]
second = c[1:]
if first == "X":
try:
# bingo! was X2015 R-style, return the integer
return int(second)
except:
# nope, not an int, fall down to final return statement
pass
except:
# not a string/iterable/etc, fall down to final return statement
pass
return c
df.columns = df.columns.map(convert_r_columns)
# if `value` is given but not `variable`,
# melt value columns and use column name as `variable`
if "value" in kwargs and "variable" not in kwargs:
value = kwargs.pop("value")
value = value if islistable(value) else [value]
_df = df.set_index(list(set(df.columns) - set(value)))
dfs = []
for v in value:
if v not in df.columns:
raise ValueError("column `{}` does not exist!".format(v))
vdf = _df[v].to_frame().rename(columns={v: "value"})
vdf["variable"] = v
dfs.append(vdf.reset_index())
df = pd.concat(dfs).reset_index(drop=True)
# otherwise, rename columns or concat to IAMC-style or do a fill-by-value
for col, value in kwargs.items():
if col in df:
raise ValueError(
"conflict of kwarg with column `{}` in dataframe!".format(col)
)
if isstr(value) and value in df:
df.rename(columns={value: col}, inplace=True)
elif islistable(value) and all([c in df.columns for c in value]):
df[col] = df.apply(lambda x: concat_with_pipe(x, value), axis=1)
df.drop(value, axis=1, inplace=True)
elif isstr(value):
df[col] = value
else:
raise ValueError("invalid argument for casting `{}: {}`".format(col, value))
# all lower case
str_cols = [c for c in df.columns if isstr(c)]
df.rename(columns={c: str(c).lower() for c in str_cols}, inplace=True)
if "notes" in df.columns: # this came from the database
logger.info("Ignoring notes column in dataframe")
df.drop(columns="notes", inplace=True)
col = df.columns[0] # first column has database copyright notice
df = df[~df[col].str.contains("database", case=False)]
if "scenario" in df.columns and "model" not in df.columns:
# model and scenario are jammed together in RCP data
scen = df["scenario"]
df.loc[:, "model"] = scen.apply(lambda s: s.split("-")[0].strip())
df.loc[:, "scenario"] = scen.apply(
lambda s: "-".join(s.split("-")[1:]).strip()
)
# reset the index if meaningful entries are included there
if not list(df.index.names) == [None]:
df.reset_index(inplace=True)
# check that there is no column in the timeseries data with reserved names
conflict_cols = [i for i in df.columns if i in ILLEGAL_COLS]
if conflict_cols:
msg = f"Column name {conflict_cols} is illegal for timeseries data.\n"
_args = ", ".join([f"{i}_1='{i}'" for i in conflict_cols])
msg += f"Use `IamDataFrame(..., {_args})` to rename at initialization."
raise ValueError(msg)
# check that index and required columns exist
missing_index = [c for c in index if c not in df.columns]
if missing_index:
raise ValueError(f"Missing index columns: {missing_index}")
missing_required_col = [c for c in REQUIRED_COLS if c not in df.columns]
if missing_required_col:
raise ValueError(f"Missing required columns: {missing_required_col}")
# check whether data in wide format (IAMC) or long format (`value` column)
if "value" in df.columns:
# check if time column is given as `year` (int) or `time` (datetime)
if "year" in df.columns:
time_col = "year"
elif "time" in df.columns:
time_col = "time"
else:
raise ValueError("Invalid time format, must have either `year` or `time`!")
extra_cols = [
c
for c in df.columns
if c not in index + REQUIRED_COLS + [time_col, "value"]
]
else:
# if in wide format, check if columns are years (int) or datetime
cols = [c for c in df.columns if c not in index + REQUIRED_COLS]
year_cols, time_cols, extra_cols = [], [], []
for i in cols:
try:
int(i) # this is a year
year_cols.append(i)
except (ValueError, TypeError):
try:
dateutil.parser.parse(str(i)) # this is datetime
time_cols.append(i)
except ValueError:
extra_cols.append(i) # some other string
if year_cols and not time_cols:
time_col = "year"
melt_cols = year_cols
elif not year_cols and time_cols:
time_col = "time"
melt_cols = time_cols
else:
raise ValueError("Invalid time format, must be either years or `datetime`!")
cols = index + REQUIRED_COLS + extra_cols
df = pd.melt(
df,
id_vars=cols,
var_name=time_col,
value_vars=sorted(melt_cols),
value_name="value",
)
# cast value column to numeric and drop nan
df["value"] = df["value"].astype("float64")
df.dropna(inplace=True, subset=["value"])
# replace missing units by an empty string for user-friendly filtering
df.loc[df.unit.isnull(), "unit"] = ""
# verify that there are no nan's left (in columns)
null_rows = df.isnull().T.any()
if null_rows.any():
_raise_data_error("Empty cells in `data`", df.loc[null_rows])
del null_rows
# format the time-column
df = format_time_col(df, time_col)
# cast to pd.Series, check for duplicates
idx_cols = index + REQUIRED_COLS + [time_col] + extra_cols
df = df.set_index(idx_cols).value
rows = df.index.duplicated()
if any(rows):
_raise_data_error(
"Duplicate rows in `data`", df[rows].index.to_frame(index=False)
)
del rows
if df.empty:
logger.warning("Formatted data is empty!")
return df.sort_index(), index, time_col, extra_cols
|
2,047 | def inplace_column_scale(X, scale):
"""Inplace column scaling of a CSC/CSR matrix.
Scale each feature of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : CSC or CSR sparse matrix of shape (n_samples, n_features)
Matrix to normalize using the variance of the features.
scale : ndarray of float of shape (n_features,)
Array of precomputed feature-wise values to use for scaling.
"""
if isinstance(X, sp.csc_matrix):
inplace_csr_row_scale(X.T, scale)
elif isinstance(X, sp.csr_matrix):
inplace_csr_column_scale(X, scale)
else:
_raise_typeerror(X)
| def inplace_column_scale(X, scale):
"""Inplace column scaling of a CSC/CSR matrix.
Scale each feature of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : CSC or CSR sparse matrix of shape (n_samples, n_features)
Matrix to normalize using the variance of the features.
scale : ndarray of shape (n_features,), dtype=float
Array of precomputed feature-wise values to use for scaling.
"""
if isinstance(X, sp.csc_matrix):
inplace_csr_row_scale(X.T, scale)
elif isinstance(X, sp.csr_matrix):
inplace_csr_column_scale(X, scale)
else:
_raise_typeerror(X)
|
37,893 | def args_in_kwargs(args, kwargs):
"""
Take a list and a dictionary, and determine if any entries in the list are
keys in the dictionary.
This function is used to determine if one of the required arguments is
passed to raise a GMTInvalidInput Error.
Parameters
----------
args : list
List of required arguments, using the GMT short aliases.
kwargs : dict
The dictionary of kwargs is the format returned by the _preprocess
function in BasePlotting in base_plotting.py. The keys are the GMT
short aliases of the parameters.
"""
for arg in args:
if arg in list(kwargs.keys()):
return True
return False
| def args_in_kwargs(args, kwargs):
"""
Take a list and a dictionary, and determine if any entries in the list are
keys in the dictionary.
This function is used to determine if one of the required arguments is
passed to raise a GMTInvalidInput Error.
Parameters
----------
args : list
List of required arguments, using the GMT short aliases.
kwargs : dict
The dictionary of kwargs is the format returned by the _preprocess
function in BasePlotting in base_plotting.py. The keys are the GMT
short aliases of the parameters.
"""
for arg in args:
if arg in kwargs.keys():
return True
return False
|
5,918 | def test_install_package_with_target(script):
"""
Test installing a package using pip install --target
"""
target_dir = script.scratch_path / 'target'
result = script.pip_install_local('-t', target_dir, "simple==1.0")
result.did_create(
Path('scratch') / 'target' / 'simple'
)
# Test repeated call without --upgrade, no files should have changed
result = script.pip_install_local(
'-t', target_dir, "simple==1.0", expect_stderr=True,
)
result.did_not_update(
Path('scratch') / 'target' / 'simple'
)
# Test upgrade call, check that new version is installed
result = script.pip_install_local('--upgrade', '-t',
target_dir, "simple==2.0")
result.did_update(
Path('scratch') / 'target' / 'simple'
)
egg_folder = (
Path('scratch') / 'target' /
'simple-2.0-py{pyversion}.egg-info'.format(**globals()))
result.did_create(egg_folder)
# Test install and upgrade of single-module package
result = script.pip_install_local('-t', target_dir, 'singlemodule==0.0.0')
singlemodule_py = Path('scratch') / 'target' / 'singlemodule.py'
result.did_create(singlemodule_py)
result = script.pip_install_local('-t', target_dir, 'singlemodule==0.0.1',
'--upgrade')
result.did_update(singlemodule_py)
| def test_install_package_with_target(script):
"""
Test installing a package using pip install --target
"""
target_dir = script.scratch_path / 'target'
result = script.pip_install_local('-t', target_dir, "simple==1.0")
result.did_create(
Path('scratch') / 'target' / 'simple'
)
# Test repeated call without --upgrade, no files should have changed
result = script.pip_install_local(
'-t', target_dir, "simple==1.0", expect_stderr=True,
)
result.did_not_update(Path('scratch') / 'target' / 'simple')
# Test upgrade call, check that new version is installed
result = script.pip_install_local('--upgrade', '-t',
target_dir, "simple==2.0")
result.did_update(
Path('scratch') / 'target' / 'simple'
)
egg_folder = (
Path('scratch') / 'target' /
'simple-2.0-py{pyversion}.egg-info'.format(**globals()))
result.did_create(egg_folder)
# Test install and upgrade of single-module package
result = script.pip_install_local('-t', target_dir, 'singlemodule==0.0.0')
singlemodule_py = Path('scratch') / 'target' / 'singlemodule.py'
result.did_create(singlemodule_py)
result = script.pip_install_local('-t', target_dir, 'singlemodule==0.0.1',
'--upgrade')
result.did_update(singlemodule_py)
|
2,158 | def multiclass_brier_score_loss(y_true, y_prob, sample_weight=None,
labels=None):
r"""Compute the Brier score loss.
The smaller the Brier score loss, the better, hence the naming with "loss".
The Brier score measures the mean squared difference between the predicted
probability and the actual outcome.
For :math:`N` samples with :math:`C` different classes, the multi-class
Brier score is defined as:
.. math::
\frac{1}{N}\sum_{i=1}^{N}\sum_{c=1}^{C}(y_{ic} - \bar{y}_{ic})^{2}
where :math:`y_{ic}` is 1 if observation `i` belongs to class `c`,
otherwise 0 and :math:`\bar{y}_{ic}` is the predicted probability of
observation `i` for class `c`. The probabilities for `c` classes for
observation `i` should sum to 1.
The Brier score always takes on a value between [0, 2]. For the
binary case however, there is a more common definition of Brier score
implemented in :func:`brier_score_loss` that is exactly half of the value
returned by this function, thereby having a range between [0, 1].
It can be decomposed as the sum of refinement loss and calibration loss.
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another).
Read more in the :ref:`User Guide <brier_score_loss>`.
Parameters
----------
y_true : array of shape (n_samples,)
True targets.
y_prob : array-like of float, shape=(n_samples, n_classes) or (n_samples,)
Predicted probabilities, as returned by a classifier's
predict_proba method. If ``y_pred.shape = (n_samples,)``
the probabilities provided are assumed to be that of the
positive class. The labels in ``y_pred`` are assumed to be
ordered alphabetically, as done by
:class:`preprocessing.LabelBinarizer`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
labels : array-like, default=None
If not provided, labels will be inferred from y_true. If ``labels``
is ``None`` and ``y_prob`` has shape (n_samples,) the labels are
assumed to be binary and are inferred from ``y_true``.
Returns
-------
score : float
Brier score loss.
Examples
--------
>>> from sklearn.metrics import multiclass_brier_score_loss
>>> multiclass_brier_score_loss([0, 1, 1, 0],
... [0.1, 0.9, 0.8, 0.3])
0.074...
>>> multiclass_brier_score_loss(['eggs', 'ham', 'spam'], [[.8, .1, .1],
... [.2, .7, .1],
... [.2, .2, .6]])
0.146...
References
----------
.. [1] `Wikipedia entry for the Brier score
<https://en.wikipedia.org/wiki/Brier_score>`_.
"""
y_true = column_or_1d(y_true)
y_prob = check_array(y_prob, ensure_2d=False)
check_consistent_length(y_prob, y_true, sample_weight)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
lb = LabelBinarizer()
if labels is not None:
lb = lb.fit(labels)
else:
lb = lb.fit(y_true)
if len(lb.classes_) == 1:
if labels is None:
raise ValueError(f'y_true contains only one label: '
f'{lb.classes_[0]}. Please provide the true '
f'labels explicitly through the labels argument.')
else:
raise ValueError(f'The labels array needs to contain at least two '
f'labels, got {lb.classes_}.')
transformed_labels = lb.transform(y_true)
if transformed_labels.shape[1] == 1:
transformed_labels = np.append(1-transformed_labels,
transformed_labels, axis=1)
# If y_prob is of single dimension, assume y_true to be binary
if y_prob.ndim == 1:
y_prob = y_prob[:, np.newaxis]
if y_prob.shape[1] == 1:
y_prob = np.append(1 - y_prob, y_prob, axis=1)
# Check if dimensions are consistent.
transformed_labels = check_array(transformed_labels)
if len(lb.classes_) != y_prob.shape[1]:
if labels is None:
raise ValueError(f"y_true and y_prob contain different number of "
f"classes {transformed_labels.shape[1]}, "
f"{y_prob.shape[1]}. Please provide the true "
f"labels explicitly through the labels argument. "
f"Classes found in y_true: {lb.classes_}")
else:
raise ValueError(f'The number of classes in labels is different '
f'from that in y_prob. Classes found in '
f'labels: {lb.classes_}')
return np.average(np.sum((transformed_labels - y_prob) ** 2, axis=1),
weights=sample_weight)
| def multiclass_brier_score_loss(y_true, y_prob, sample_weight=None,
labels=None):
r"""Compute the Brier score loss.
The smaller the Brier score loss, the better, hence the naming with "loss".
The Brier score measures the mean squared difference between the predicted
probability and the actual outcome.
For :math:`N` samples with :math:`C` different classes, the multi-class
Brier score is defined as:
.. math::
\frac{1}{N}\sum_{i=1}^{N}\sum_{c=1}^{C}(y_{ic} - \bar{y}_{ic})^{2}
where :math:`y_{ic}` is 1 if observation `i` belongs to class `c`,
otherwise 0 and :math:`\bar{y}_{ic}` is the predicted probability of
observation `i` for class `c`. The probabilities for `c` classes for
observation `i` should sum to 1.
The Brier score always takes on a value between [0, 2]. For the
binary case however, there is a more common definition of Brier score
implemented in :func:`brier_score_loss` that is exactly half of the value
returned by this function, thereby having a range between [0, 1].
It can be decomposed as the sum of refinement loss and calibration loss.
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another).
Read more in the :ref:`User Guide <brier_score_loss>`.
Parameters
----------
y_true : array of shape (n_samples,)
True targets.
y_prob : array-like of float, shape=(n_samples, n_classes) or (n_samples,)
Predicted probabilities, as returned by a classifier's
predict_proba method. If ``y_prob.shape = (n_samples,)``
the probabilities provided are assumed to be that of the
positive class. The labels in ``y_pred`` are assumed to be
ordered alphabetically, as done by
:class:`preprocessing.LabelBinarizer`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
labels : array-like, default=None
If not provided, labels will be inferred from y_true. If ``labels``
is ``None`` and ``y_prob`` has shape (n_samples,) the labels are
assumed to be binary and are inferred from ``y_true``.
Returns
-------
score : float
Brier score loss.
Examples
--------
>>> from sklearn.metrics import multiclass_brier_score_loss
>>> multiclass_brier_score_loss([0, 1, 1, 0],
... [0.1, 0.9, 0.8, 0.3])
0.074...
>>> multiclass_brier_score_loss(['eggs', 'ham', 'spam'], [[.8, .1, .1],
... [.2, .7, .1],
... [.2, .2, .6]])
0.146...
References
----------
.. [1] `Wikipedia entry for the Brier score
<https://en.wikipedia.org/wiki/Brier_score>`_.
"""
y_true = column_or_1d(y_true)
y_prob = check_array(y_prob, ensure_2d=False)
check_consistent_length(y_prob, y_true, sample_weight)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
lb = LabelBinarizer()
if labels is not None:
lb = lb.fit(labels)
else:
lb = lb.fit(y_true)
if len(lb.classes_) == 1:
if labels is None:
raise ValueError(f'y_true contains only one label: '
f'{lb.classes_[0]}. Please provide the true '
f'labels explicitly through the labels argument.')
else:
raise ValueError(f'The labels array needs to contain at least two '
f'labels, got {lb.classes_}.')
transformed_labels = lb.transform(y_true)
if transformed_labels.shape[1] == 1:
transformed_labels = np.append(1-transformed_labels,
transformed_labels, axis=1)
# If y_prob is of single dimension, assume y_true to be binary
if y_prob.ndim == 1:
y_prob = y_prob[:, np.newaxis]
if y_prob.shape[1] == 1:
y_prob = np.append(1 - y_prob, y_prob, axis=1)
# Check if dimensions are consistent.
transformed_labels = check_array(transformed_labels)
if len(lb.classes_) != y_prob.shape[1]:
if labels is None:
raise ValueError(f"y_true and y_prob contain different number of "
f"classes {transformed_labels.shape[1]}, "
f"{y_prob.shape[1]}. Please provide the true "
f"labels explicitly through the labels argument. "
f"Classes found in y_true: {lb.classes_}")
else:
raise ValueError(f'The number of classes in labels is different '
f'from that in y_prob. Classes found in '
f'labels: {lb.classes_}')
return np.average(np.sum((transformed_labels - y_prob) ** 2, axis=1),
weights=sample_weight)
|
15,276 | def setup(hass, config):
"""Set up the eBusd component."""
_LOGGER.debug("Ebusd integration setup started")
conf = config[DOMAIN]
name = conf[CONF_NAME]
circuit = conf[CONF_CIRCUIT]
monitored_conditions = conf.get(CONF_MONITORED_CONDITIONS)
server_address = (conf.get(CONF_HOST), conf.get(CONF_PORT))
try:
ebusdpy.init(server_address)
hass.data[DOMAIN] = EbusdData(server_address, circuit)
sensor_config = {
CONF_MONITORED_CONDITIONS: monitored_conditions,
"client_name": name,
"sensor_types": SENSOR_TYPES[circuit],
}
load_platform(hass, "sensor", DOMAIN, sensor_config, config)
hass.services.register(DOMAIN, SERVICE_EBUSD_WRITE, hass.data[DOMAIN].write)
_LOGGER.debug("Ebusd integration setup completed")
return True
except (socket.timeout, OSError):
return False
| def setup(hass, config):
"""Set up the eBusd component."""
_LOGGER.debug("Integration setup has started")
conf = config[DOMAIN]
name = conf[CONF_NAME]
circuit = conf[CONF_CIRCUIT]
monitored_conditions = conf.get(CONF_MONITORED_CONDITIONS)
server_address = (conf.get(CONF_HOST), conf.get(CONF_PORT))
try:
ebusdpy.init(server_address)
hass.data[DOMAIN] = EbusdData(server_address, circuit)
sensor_config = {
CONF_MONITORED_CONDITIONS: monitored_conditions,
"client_name": name,
"sensor_types": SENSOR_TYPES[circuit],
}
load_platform(hass, "sensor", DOMAIN, sensor_config, config)
hass.services.register(DOMAIN, SERVICE_EBUSD_WRITE, hass.data[DOMAIN].write)
_LOGGER.debug("Ebusd integration setup completed")
return True
except (socket.timeout, OSError):
return False
|
15,821 | def as_timestamp(dt_value: dt.datetime | str) -> float:
"""Convert a date/time into a unix time (seconds since 1970)."""
if hasattr(dt_value, "timestamp"):
parsed_dt: dt.datetime | None = cast(dt.datetime, dt_value)
else:
parsed_dt = parse_datetime(str(dt_value))
if parsed_dt is None:
raise ValueError("not a valid date/time.")
return parsed_dt.timestamp()
| def as_timestamp(dt_value: dt.datetime | str) -> float:
"""Convert a date/time into a unix time (seconds since 1970)."""
if hasattr(dt_value, "timestamp"):
parsed_dt = dt_value
else:
parsed_dt = parse_datetime(str(dt_value))
if parsed_dt is None:
raise ValueError("not a valid date/time.")
return parsed_dt.timestamp()
|
57,695 | def set_group_state_command():
groupID = args.get('id')
on = args.get('on')
if on == "true" or on == "True":
on = True
elif on == "false" or on == "False":
on = False
hue = int(args.get('hue'))
bri = int(args.get('bri'))
sat = int(args.get('sat'))
lightID = args.get('id')
header = {
'Content-Type': 'application/json'
}
payload = {"hue": hue, "on": on, "sat": sat, "bri": bri}
page = 'groups/'
URL = baseURL + page + groupID + "/" + "action"
response = requests.request("PUT", URL, data=json.dumps(payload), headers=header)
if not response.ok:
error = "Error in request {} - {}".format(response.status_code, response.text)
raise Exception(text)
data = response.json()
return data
| def set_group_state_command():
groupID = args.get('id')
on = args.get('on')
if on == "true" or on == "True":
on = True
elif on == "false" or on == "False":
on = False
hue = int(args.get('hue', 0))
bri = int(args.get('bri', 0))
sat = int(args.get('sat', 0))
lightID = args.get('id')
header = {
'Content-Type': 'application/json'
}
payload = {"hue": hue, "on": on, "sat": sat, "bri": bri}
page = 'groups/'
URL = baseURL + page + groupID + "/" + "action"
response = requests.request("PUT", URL, data=json.dumps(payload), headers=header)
if not response.ok:
error = "Error in request {} - {}".format(response.status_code, response.text)
raise Exception(text)
data = response.json()
return data
|
48,525 | def apply_transformation_to_points(transformation, points, inplace=False):
"""Apply a given transformation matrix (3x3 or 4x4) to a set of points."""
if transformation.shape not in ((3, 3), (4, 4)):
raise RuntimeError('`transformation` must be of shape (3, 3) or (4, 4)')
if transformation.shape[1] == 4:
# a stack is a copy
points_2 = np.hstack((points, np.ones((len(points), 1))))
else:
points_2 = points
# Paged matrix multiplication. For arrays with ndim > 2, matmul assumes
# that the matrices to be multiplied lie in the last two dimensions.
points_2 = np.matmul(transformation[np.newaxis, :, :],
points_2.T)[0, :3, :].T
# If inplace, set the points
if inplace:
points[:] = points_2
else:
# otherwise return the new points
return points_2
| def apply_transformation_to_points(transformation, points, inplace=False):
"""Apply a given transformation matrix (3x3 or 4x4) to a set of points."""
if transformation.shape not in ((3, 3), (4, 4)):
raise RuntimeError('`transformation` must be of shape (3, 3) or (4, 4)')
if transformation.shape[1] == 4:
# a stack is a copy
points_2 = np.empty((len(points), 4), dtype=points2.dtype)
points_2[:, :3] = points
points_2[:, -1] = -1
else:
points_2 = points
# Paged matrix multiplication. For arrays with ndim > 2, matmul assumes
# that the matrices to be multiplied lie in the last two dimensions.
points_2 = np.matmul(transformation[np.newaxis, :, :],
points_2.T)[0, :3, :].T
# If inplace, set the points
if inplace:
points[:] = points_2
else:
# otherwise return the new points
return points_2
|
38,976 | def test_advanced_exclude_nested_lists():
class SubSubModel(BaseModel):
i: int
j: int
class SubModel(BaseModel):
k: int
subsubs: List[SubSubModel]
class Model(BaseModel):
subs: List[SubModel]
m = Model(
subs=[
SubModel(k=1, subsubs=[SubSubModel(i=1, j=1), SubSubModel(i=2, j=2)]),
SubModel(k=2, subsubs=[SubSubModel(i=3, j=3)]),
]
)
# Normal nested __all__
assert m.dict(exclude={'subs': {'__all__': {'subsubs': {'__all__': {'i'}}}}}) == {
'subs': [{'k': 1, 'subsubs': [{'j': 1}, {'j': 2}]}, {'k': 2, 'subsubs': [{'j': 3}]}]
}
# Merge sub dicts
assert m.dict(
exclude={'subs': {'__all__': {'subsubs': {'__all__': {'i'}}}, 0: {'subsubs': {'__all__': {'j'}}}}}
) == {'subs': [{'k': 1, 'subsubs': [{}, {}]}, {'k': 2, 'subsubs': [{'j': 3}]}]}
assert m.dict(exclude={'subs': {'__all__': {'subsubs': ...}, 0: {'subsubs': {'__all__': {'j'}}}}}) == {
'subs': [{'k': 1, 'subsubs': [{'i': 1}, {'i': 2}]}, {'k': 2}]
}
assert m.dict(exclude={'subs': {'__all__': {'subsubs': {'__all__': {'j'}}}, 0: {'subsubs': ...}}}) == {
'subs': [{'k': 1}, {'k': 2, 'subsubs': [{'i': 3}]}]
}
# Merge sub sets
assert m.dict(exclude={'subs': {'__all__': {'subsubs': {0}}, 0: {'subsubs': {1}}}}) == {
'subs': [{'k': 1, 'subsubs': []}, {'k': 2, 'subsubs': []}]
}
# Merge sub dict-set
assert m.dict(exclude={'subs': {'__all__': {'subsubs': {0: {'i'}}}, 0: {'subsubs': {1}}}}) == {
'subs': [{'k': 1, 'subsubs': [{'j': 1}]}, {'k': 2, 'subsubs': [{'j': 3}]}]
}
# Different keys
assert m.dict(exclude={'subs': {'__all__': {'subsubs'}, 0: {'k'}}}) == {'subs': [{}, {'k': 2}]}
assert m.dict(exclude={'subs': {'__all__': {'subsubs': ...}, 0: {'k'}}}) == {'subs': [{}, {'k': 2}]}
assert m.dict(exclude={'subs': {'__all__': {'subsubs'}, 0: {'k': ...}}}) == {'subs': [{}, {'k': 2}]}
# Nested different keys
assert m.dict(exclude={'subs': {'__all__': {'subsubs': {'__all__': {'i'}, 0: {'j'}}}}}) == {
'subs': [{'k': 1, 'subsubs': [{}, {'j': 2}]}, {'k': 2, 'subsubs': [{}]}]
}
assert m.dict(exclude={'subs': {'__all__': {'subsubs': {'__all__': {'i': ...}, 0: {'j'}}}}}) == {
'subs': [{'k': 1, 'subsubs': [{}, {'j': 2}]}, {'k': 2, 'subsubs': [{}]}]
}
assert m.dict(exclude={'subs': {'__all__': {'subsubs': {'__all__': {'i'}, 0: {'j': ...}}}}}) == {
'subs': [{'k': 1, 'subsubs': [{}, {'j': 2}]}, {'k': 2, 'subsubs': [{}]}]
}
# Ignore __all__ for index with defined exclude.
assert m.dict(exclude={'subs': {'__all__': {'subsubs'}, 0: {'subsubs': {'__all__': {'j'}}}}}) == {
'subs': [{'k': 1, 'subsubs': [{'i': 1}, {'i': 2}]}, {'k': 2}]
}
assert m.dict(exclude={'subs': {'__all__': {'subsubs': {'__all__': {'j'}}}, 0: ...}}) == {
'subs': [{'k': 2, 'subsubs': [{'i': 3}]}]
}
assert m.dict(exclude={'subs': {'__all__': ..., 0: {'subsubs'}}}) == {'subs': [{'k': 1}]}
| def test_advanced_exclude_nested_lists():
class SubSubModel(BaseModel):
i: int
j: int
class SubModel(BaseModel):
k: int
subsubs: List[SubSubModel]
class Model(BaseModel):
subs: List[SubModel]
m = Model(
subs=[
dict(k=1, subsubs=[dict(i=1, j=1), dict(i=2, j=2)]),
SubModel(k=2, subsubs=[SubSubModel(i=3, j=3)]),
]
)
# Normal nested __all__
assert m.dict(exclude={'subs': {'__all__': {'subsubs': {'__all__': {'i'}}}}}) == {
'subs': [{'k': 1, 'subsubs': [{'j': 1}, {'j': 2}]}, {'k': 2, 'subsubs': [{'j': 3}]}]
}
# Merge sub dicts
assert m.dict(
exclude={'subs': {'__all__': {'subsubs': {'__all__': {'i'}}}, 0: {'subsubs': {'__all__': {'j'}}}}}
) == {'subs': [{'k': 1, 'subsubs': [{}, {}]}, {'k': 2, 'subsubs': [{'j': 3}]}]}
assert m.dict(exclude={'subs': {'__all__': {'subsubs': ...}, 0: {'subsubs': {'__all__': {'j'}}}}}) == {
'subs': [{'k': 1, 'subsubs': [{'i': 1}, {'i': 2}]}, {'k': 2}]
}
assert m.dict(exclude={'subs': {'__all__': {'subsubs': {'__all__': {'j'}}}, 0: {'subsubs': ...}}}) == {
'subs': [{'k': 1}, {'k': 2, 'subsubs': [{'i': 3}]}]
}
# Merge sub sets
assert m.dict(exclude={'subs': {'__all__': {'subsubs': {0}}, 0: {'subsubs': {1}}}}) == {
'subs': [{'k': 1, 'subsubs': []}, {'k': 2, 'subsubs': []}]
}
# Merge sub dict-set
assert m.dict(exclude={'subs': {'__all__': {'subsubs': {0: {'i'}}}, 0: {'subsubs': {1}}}}) == {
'subs': [{'k': 1, 'subsubs': [{'j': 1}]}, {'k': 2, 'subsubs': [{'j': 3}]}]
}
# Different keys
assert m.dict(exclude={'subs': {'__all__': {'subsubs'}, 0: {'k'}}}) == {'subs': [{}, {'k': 2}]}
assert m.dict(exclude={'subs': {'__all__': {'subsubs': ...}, 0: {'k'}}}) == {'subs': [{}, {'k': 2}]}
assert m.dict(exclude={'subs': {'__all__': {'subsubs'}, 0: {'k': ...}}}) == {'subs': [{}, {'k': 2}]}
# Nested different keys
assert m.dict(exclude={'subs': {'__all__': {'subsubs': {'__all__': {'i'}, 0: {'j'}}}}}) == {
'subs': [{'k': 1, 'subsubs': [{}, {'j': 2}]}, {'k': 2, 'subsubs': [{}]}]
}
assert m.dict(exclude={'subs': {'__all__': {'subsubs': {'__all__': {'i': ...}, 0: {'j'}}}}}) == {
'subs': [{'k': 1, 'subsubs': [{}, {'j': 2}]}, {'k': 2, 'subsubs': [{}]}]
}
assert m.dict(exclude={'subs': {'__all__': {'subsubs': {'__all__': {'i'}, 0: {'j': ...}}}}}) == {
'subs': [{'k': 1, 'subsubs': [{}, {'j': 2}]}, {'k': 2, 'subsubs': [{}]}]
}
# Ignore __all__ for index with defined exclude.
assert m.dict(exclude={'subs': {'__all__': {'subsubs'}, 0: {'subsubs': {'__all__': {'j'}}}}}) == {
'subs': [{'k': 1, 'subsubs': [{'i': 1}, {'i': 2}]}, {'k': 2}]
}
assert m.dict(exclude={'subs': {'__all__': {'subsubs': {'__all__': {'j'}}}, 0: ...}}) == {
'subs': [{'k': 2, 'subsubs': [{'i': 3}]}]
}
assert m.dict(exclude={'subs': {'__all__': ..., 0: {'subsubs'}}}) == {'subs': [{'k': 1}]}
|
24,849 | def my_func(self): # [missing-return-type-doc]
"""This is a docstring.
Returns:
Always False
"""
return False
| def my_func(self): # [missing-return-type-doc]
"""Warn partial google returns
Returns:
Always False
"""
return False
|
30,714 | def list_accounts(client, args):
title = f'{INTEGRATION_NAME} - List of the Accounts'
raws = []
cyberark_ec = []
raw_response = client.get_accounts(offset=args['offset'], limit=args['limit'])['value']
if raw_response:
for item in raw_response:
raws.append(item)
cyberark_ec.append({
'AccountName': item['name'],
'UserName': item['userName'],
'PlatformID': item['platformId'],
'SafeName': item['safeName'],
'AccountID': item['id'],
'CreatedTime': item['createdTime']
})
if not raws:
return f'{INTEGRATION_NAME} - Could not find any Accounts'
context_entry = {
"CyberArk.Accounts": cyberark_ec
}
human_readable = tableToMarkdown(t=context_entry.get('CyberArk.Accounts'), name=title)
return [human_readable, context_entry, raws]
| def list_accounts(client, args):
title = f'{INTEGRATION_NAME} - List of the Accounts'
raws = []
cyberark_ec = []
raw_response = client.get_accounts(offset=args.get('offset', '0'), limit=args.get('limit', '50')).get('value')
if raw_response:
for item in raw_response:
raws.append(item)
cyberark_ec.append({
'AccountName': item['name'],
'UserName': item['userName'],
'PlatformID': item['platformId'],
'SafeName': item['safeName'],
'AccountID': item['id'],
'CreatedTime': item['createdTime']
})
if not raws:
return f'{INTEGRATION_NAME} - Could not find any Accounts'
context_entry = {
"CyberArk.Accounts": cyberark_ec
}
human_readable = tableToMarkdown(t=context_entry.get('CyberArk.Accounts'), name=title)
return [human_readable, context_entry, raws]
|
44,487 | def get_canonical_type_struct_for_type(typ) -> str:
try:
return type_to_type_name.get(typ, None)
except:
return None
| def get_canonical_type_struct_for_type(t) -> str:
try:
return type_to_type_name.get(typ, None)
except:
return None
|
46,048 | def get_torchvision_models():
model_urls = dict()
# Since torchvision reconstruct its weight loading logic, some model keys
# and urls in `model_urls` have been changed. If you want to experiment
# based on old weights, please use torchvision lower than 13.0. See more
# details at https://github.com/open-mmlab/mmcv/issues/1848.
if digit_version(torchvision.__version__) <= digit_version('0.12.1'):
model_zoo_path = osp.join(
osp.dirname(__file__), '..', 'model_zoo',
'torchvision_before0.13.json')
return mmcv.load(model_zoo_path)
else:
warnings.warn(
'Checkpoints loaded from torchvision have been changed '
'since torchvision 0.13.0. If you want to experiment based on old '
'weights, please use torchvision lower than 13.0')
for cls_name, cls in torchvision.models.__dict__.items():
cls_key = cls_name.replace('_Weights', '').lower()
if cls_name.endswith('_Weights') and hasattr(cls, 'DEFAULT'):
model_urls[cls_key] = cls.DEFAULT.url
return model_urls
| def get_torchvision_models():
model_urls = dict()
# Since torchvision reconstruct its weight loading logic, some model keys
# and urls in `model_urls` have been changed. If you want to experiment
# based on old weights, please use torchvision lower than 13.0. See more
# details at https://github.com/open-mmlab/mmcv/issues/1848.
if digit_version(torchvision.__version__) <= digit_version('0.12.1'):
json_path = osp.join(mmcv.__path__[0], 'model_zoo/torchvision_before0.13.json')
return mmcv.load(model_zoo_path)
else:
warnings.warn(
'Checkpoints loaded from torchvision have been changed '
'since torchvision 0.13.0. If you want to experiment based on old '
'weights, please use torchvision lower than 13.0')
for cls_name, cls in torchvision.models.__dict__.items():
cls_key = cls_name.replace('_Weights', '').lower()
if cls_name.endswith('_Weights') and hasattr(cls, 'DEFAULT'):
model_urls[cls_key] = cls.DEFAULT.url
return model_urls
|
8,367 | def _isophote_list_to_table(isophote_list, key_properties=['main']):
"""
Convert an `~photutils.isophote.IsophoteList` instance to
a `~astropy.table.QTable`.
Parameters
----------
isophote_list : list of `~photutils.isophote.Isophote` or \
`~photutils.isophote.IsophoteList` instance
A list of isophotes.
key_properties : A list of properties to export from the isophote_list
If key_properties = ['all'] or ['main'], it will pick all or few
of the main properties.
Returns
-------
result : `~astropy.table.QTable`
An astropy QTable with the selected or all isophote parameters.
"""
properties = OrderedDict()
isotable = QTable()
# main_properties: `List`
# A list of main parameters matching the original names of
# the isophote_list parameters
def __rename_properties(properties,
orig_names = ['int_err', 'eps', 'ellip_err',
'grad_r_error', 'nflag'],
new_names = ['intens_err', 'ellipticity',
'ellipticity_err', 'grad_rerror',
'nflag']
):
'''
Simple renaming for some of the isophote_list parameters.
Parameters
----------
properties: `OrderedDict`
An OrderedDict with the list of the isophote_list parameters
orig_names: `List`
A list of original names in the isophote_list parameters
to be renamed
new_names: `List`
A list of new names matching in length of the orig_names
Returns
-------
properties: `OrderedDict`
An OrderedDict with the list of the renamed isophote_list
parameters
'''
main_properties = ['sma', 'intens', 'int_err', 'eps', 'ellip_err',
'pa', 'pa_err', 'grad', 'grad_error',
'grad_r_error', 'x0', 'x0_err', 'y0', 'y0_err',
'ndata', 'nflag', 'niter', 'stop_code']
for an_item in main_properties:
if an_item in orig_names:
properties[an_item] = new_names[orig_names.index(an_item)]
else:
properties[an_item] = an_item
return properties
if 'all' in key_properties:
properties = _get_properties(isophote_list)
properties = __rename_properties(properties)
elif 'main' in key_properties:
properties = __rename_properties(properties)
else:
for an_item in key_properties:
properties[an_item] = an_item
for k, v in properties.items():
isotable[v] = np.array([getattr(iso, k) for iso in isophote_list])
if k in ('pa', 'pa_err'):
isotable[v] = isotable[v] * 180. / np.pi * u.deg
return isotable
| def _isophote_list_to_table(isophote_list, key_properties=['main']):
"""
Convert an `~photutils.isophote.IsophoteList` instance to
a `~astropy.table.QTable`.
Parameters
----------
isophote_list : list of `~photutils.isophote.Isophote` or \
`~photutils.isophote.IsophoteList` instance
A list of isophotes.
key_properties : A list of properties to export from the isophote_list
If key_properties = ['all'] or ['main'], it will pick all or few
of the main properties.
Returns
-------
result : `~astropy.table.QTable`
An astropy QTable with the selected or all isophote parameters.
"""
properties = OrderedDict()
isotable = QTable()
# main_properties: `List`
# A list of main parameters matching the original names of
# the isophote_list parameters
def __rename_properties(properties,
orig_names = ['int_err', 'eps', 'ellip_err',
'grad_r_error', 'nflag'],
new_names = ['intens_err', 'ellipticity',
'ellipticity_err', 'grad_rerror',
'nflag']
):
'''
Simple renaming for some of the isophote_list parameters.
Parameters
----------
properties: `OrderedDict`
An OrderedDict with the list of the isophote_list parameters
orig_names: `List`
A list of original names in the isophote_list parameters
to be renamed
new_names: `List`
A list of new names matching in length of the orig_names
Returns
-------
properties: `OrderedDict`
An OrderedDict with the list of the renamed isophote_list
parameters
'''
main_properties = ['sma', 'intens', 'int_err', 'eps', 'ellip_err',
'pa', 'pa_err', 'grad', 'grad_error',
'grad_r_error', 'x0', 'x0_err', 'y0', 'y0_err',
'ndata', 'nflag', 'niter', 'stop_code']
for an_item in main_properties:
if an_item in orig_names:
properties[an_item] = new_names[orig_names.index(an_item)]
else:
properties[an_item] = an_item
return properties
if 'all' in key_properties:
properties = _get_properties(isophote_list)
properties = __rename_properties(properties)
elif columns == 'main':
properties = __rename_properties(properties)
else:
for an_item in key_properties:
properties[an_item] = an_item
for k, v in properties.items():
isotable[v] = np.array([getattr(iso, k) for iso in isophote_list])
if k in ('pa', 'pa_err'):
isotable[v] = isotable[v] * 180. / np.pi * u.deg
return isotable
|
5,752 | def poisson_means_test(k1, n1, k2, n2, diff=0, alternative='two-sided'):
r"""
Calculates the poisson mean test, the "E-test", for the mean difference of
two samples that follow a Poisson distribution from descriptive statistics.
This is a two-sided test. The null hypothesis is that two independent
samples have identical average (expected) values.
Let :math:`X_{11},...,X_{1n_1}` and :math:`X_{21},...,X_{2n_2}` be
independent samples from distributions :math:`Poisson(\lambda_1)` and
:math:`Poisson(\lambda_2)`. It is well known that :math:`X_1`
and :math:`X_2` are independent:
.. math:: X_1 = \sum_{i=1}^{n_1} X_{1i} \sim Poisson(n_1\lambda_1)
.. math:: X_2 = \sum_{i=1}^{n_2} X_{2i} \sim Poisson(n_2\lambda_2)
Let `count1` and `count2` be the observed values of :math:`X_1` and
:math:`X_2`, respectively. The null hypothesis and alternative
hypothesis under comparison are
.. math::
H_0: \lambda_1 = \lambda_2 + \mathtt{diff} \quad vs. \quad
H_a: \lambda_1 \ne \lambda_2 + \mathtt{diff}
for ``alternative=two-sided``, where :math:`\mathtt{diff} \ge 0`.
Parameters
----------
k1 : int
Sample values of interest from sample 1.
n1: int
Sample size from sample 1.
k2 : int
Sample values of interest from sample 2.
n2: int
Sample size from sample 2.
diff : int or float, optional
The difference of mean between two samples under the null hypothesis
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': :math:`\lambda_1 \ne \lambda_2 + \mathtt{diff}`
* 'less': :math:`\lambda_1 \le \lambda_2 + \mathtt{diff}`
* 'greater': :math:`\lambda_1 \ge \lambda_2 + \mathtt{diff}`
Returns
-------
statistic : float
The test statistic calculated from observed samples
pvalue : float
The associated p-value based on the estimated p-value of the
standardized difference.
Notes
-----
A benefit of the E-test is that it maintains its power even
with smaller sample sizes which can reduce sampling costs [1]_. It has
been evaluated and determined to be more powerful than the comparable
C-test, sometimes referred to as the poisson exact test.
References
----------
.. [1] Krishnamoorthy, K., & Thomson, J. (2004). A more powerful test for
comparing two Poisson means. Journal of Statistical Planning and
Inference, 119(1), 23-35.
.. [2] Przyborowski, J., & Wilenski, H. (1940). Homogeneity of results in
testing samples from Poisson series: With an application to testing
clover seed for dodder. Biometrika, 31(3/4), 313-323.
Examples
--------
Suppose that a gardener wishes to test the number of dodder seeds, a weed,
in a sack of clover seeds that they buy from a seed company.
A 100 gram sample is drawn from the sack before being shipped to the
gardener. The sample is analyzed, and it is found to contain no dodder
seeds; that is, `k1` is 0. However, upon arrival, the gardener draws
another 100 gram sample from the sack. This time, three dodder seeds are
found in the sample; that is, `k2` is 3. The gardener would like to
know if the difference between is significant and not due to chance. The
null hypothesis is that the difference between the two samples is merely
due to chance, or that :math:`\lambda_1 = \lambda_2 + \mathtt{diff}`
where :math:`\mathtt{diff} = 0`. The alternative hypothesis is that the
difference is not due to chance, or :math:`\lambda_1 \ne \lambda_2 + 0`.
The gardener selects a significance level of 5% to reject the null
hypothesis in favor of the alternative [2]_.
>>> res = stats.poisson_means_test(0, 100, 3, 100)
>>> res.statistic, res.pvalue
(-1.7320508075688772, 0.08837900929018157)
The p-value is .088, indicating a near 9% chance of observing a value of
the test statistic under the null hypothesis. This exceeds 5%, so the
gardener does not reject the null hypothesis as the difference cannot be
regarded as significant at this level.
"""
_chck_args_poisson_mean_test(k1, n1, k2, n2, diff, alternative)
# "for a given k_1 and k_2, an estimate of \lambda_2 is given by" [1] (3.4)
lmbd_hat2 = ((k1 + k2) / (n1 + n2) - diff * n1 / (n1 + n2))
# "\hat{\lambda_{2k}} may be less than or equal to zero ... and in this
# case the null hypothesis cannot be rejected ... [and] it is not necessary
# to compute the p-value". [1] page 26 below eq. (3.6).
if lmbd_hat2 <= 0:
return PoissonMeansTestResult(0, 1)
# the unbiased variance estimate [1] (3.2)
var = k1 / (n1 ** 2) + k2 / (n2 ** 2)
# the _observed_ pivot statistic from the input. It follows the
# unnumbered equation following equation (3.3) This is used later in
# comparison with the computed pivot statistics in an indicator function.
t_k1k2 = (k1 / n1 - k2 / n2 - diff) / np.sqrt(var)
# equation (3.5) of [1] is lengthy, so it is broken into several parts,
# beginning here. Note that the probability mass function of poisson is
# exp^(-\mu)*\mu^k/k!, so and this is called with shape \mu, here noted
# here as nlmbd_hat*. The strategy for evaluating the double summation in
# (3.5) is to create two arrays of the values of the two products inside
# the summation and then broadcast them together into a matrix, and then
# sum across the entire matrix.
# compute constants (as seen in the first and second separated products in
# (3.5).). (This is the shape (\mu) parameter of the poisson distribution.)
nlmbd_hat1 = n1 * (lmbd_hat2 + diff)
nlmbd_hat2 = n2 * lmbd_hat2
# determine summation bounds for tail ends of distribution rather than
# summing to infinity. `x1*` is for the outer sum and `x2*` is the inner
# sum
x1_lb, x1_ub = distributions.poisson.ppf([1e-10, 1 - 1e-16], nlmbd_hat1)
x2_lb, x2_ub = distributions.poisson.ppf([1e-10, 1 - 1e-16], nlmbd_hat2)
# construct arrays to function as the x_1 and x_2 counters on the summation
# in (3.5). `x1` is in columns and `x2` is in rows to allow for
# broadcasting.
x1 = np.arange(x1_lb, x1_ub + 1)
x2 = np.arange(x2_lb, x2_ub + 1)[:, None]
# these are the two products in equation (3.5) with `prob_x1` being the
# first (left side) and `prob_x2` being the second (right side). (To
# make as clear as possible: the 1st contains a "+ d" term, the 2nd does
# not.)
prob_x1 = distributions.poisson.pmf(x1, nlmbd_hat1)
prob_x2 = distributions.poisson.pmf(x2, nlmbd_hat2)
# compute constants for use in the the "pivot statistic" per the
# unnumbered equation following (3.3).
lmbd_x1 = x1 / n1
lmbd_x2 = x2 / n2
lmbds_diff = lmbd_x1 - lmbd_x2 - diff
var_x1x2 = lmbd_x1 / n1 + lmbd_x2 / n2
# this is the 'pivot statistic' for use in the indicator of the summation
# (left side of "I[.]"). Before dividing, mask zero-elements in the
# denominator with infinity so that they are `false` in the indicator.
mask_out_invalid = (np.abs(lmbd_x1 - lmbd_x2) > diff
if alternative == 'two-sided' else lmbds_diff > 0)
var_x1x2[~mask_out_invalid] = np.inf
t_x1x2 = lmbds_diff / np.sqrt(var_x1x2)
if alternative == 'two-sided':
alternative_comparison = lambda x, y: np.abs(x) >= np.abs(y)
elif alternative == 'less':
alternative_comparison = lambda x, y: np.less_equal(x, y)
else:
alternative_comparison = lambda x, y: np.less_equal(x, y)
# `[indicator]` implements the "I[.] ... the indicator function" per
# the paragraph following equation (3.5).
indicator = alternative_comparison(t_x1x2, t_k1k2)
# multiply all combinations of the products together, exclude terms
# based on the `indicator` and then sum. (3.5)
pvalue = np.sum((prob_x1 * prob_x2)[indicator])
return PoissonMeansTestResult(t_k1k2, pvalue)
| def poisson_means_test(k1, n1, k2, n2, diff=0, alternative='two-sided'):
r"""
Calculates the poisson mean test, the "E-test", for the mean difference of
two samples that follow a Poisson distribution from descriptive statistics.
This is a two-sided test. The null hypothesis is that two independent
samples have identical average (expected) values.
Let :math:`X_{11},...,X_{1n_1}` and :math:`X_{21},...,X_{2n_2}` be
independent samples from distributions :math:`Poisson(\lambda_1)` and
:math:`Poisson(\lambda_2)`. It is well known that :math:`X_1`
and :math:`X_2` are independent:
.. math:: X_1 = \sum_{i=1}^{n_1} X_{1i} \sim Poisson(n_1\lambda_1)
.. math:: X_2 = \sum_{i=1}^{n_2} X_{2i} \sim Poisson(n_2\lambda_2)
Let `count1` and `count2` be the observed values of :math:`X_1` and
:math:`X_2`, respectively. The null hypothesis and alternative
hypothesis under comparison are
.. math::
H_0: \lambda_1 = \lambda_2 + \mathtt{diff} \quad vs. \quad
H_a: \lambda_1 \ne \lambda_2 + \mathtt{diff}
for ``alternative=two-sided``, where :math:`\mathtt{diff} \ge 0`.
Parameters
----------
k1 : int
Sample values of interest from sample 1.
n1: int
Sample size from sample 1.
k2 : int
Sample values of interest from sample 2.
n2: int
Sample size from sample 2.
diff : int or float, optional
The difference of mean between two samples under the null hypothesis.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': :math:`\lambda_1 \ne \lambda_2 + \mathtt{diff}`
* 'less': :math:`\lambda_1 \le \lambda_2 + \mathtt{diff}`
* 'greater': :math:`\lambda_1 \ge \lambda_2 + \mathtt{diff}`
Returns
-------
statistic : float
The test statistic calculated from observed samples
pvalue : float
The associated p-value based on the estimated p-value of the
standardized difference.
Notes
-----
A benefit of the E-test is that it maintains its power even
with smaller sample sizes which can reduce sampling costs [1]_. It has
been evaluated and determined to be more powerful than the comparable
C-test, sometimes referred to as the poisson exact test.
References
----------
.. [1] Krishnamoorthy, K., & Thomson, J. (2004). A more powerful test for
comparing two Poisson means. Journal of Statistical Planning and
Inference, 119(1), 23-35.
.. [2] Przyborowski, J., & Wilenski, H. (1940). Homogeneity of results in
testing samples from Poisson series: With an application to testing
clover seed for dodder. Biometrika, 31(3/4), 313-323.
Examples
--------
Suppose that a gardener wishes to test the number of dodder seeds, a weed,
in a sack of clover seeds that they buy from a seed company.
A 100 gram sample is drawn from the sack before being shipped to the
gardener. The sample is analyzed, and it is found to contain no dodder
seeds; that is, `k1` is 0. However, upon arrival, the gardener draws
another 100 gram sample from the sack. This time, three dodder seeds are
found in the sample; that is, `k2` is 3. The gardener would like to
know if the difference between is significant and not due to chance. The
null hypothesis is that the difference between the two samples is merely
due to chance, or that :math:`\lambda_1 = \lambda_2 + \mathtt{diff}`
where :math:`\mathtt{diff} = 0`. The alternative hypothesis is that the
difference is not due to chance, or :math:`\lambda_1 \ne \lambda_2 + 0`.
The gardener selects a significance level of 5% to reject the null
hypothesis in favor of the alternative [2]_.
>>> res = stats.poisson_means_test(0, 100, 3, 100)
>>> res.statistic, res.pvalue
(-1.7320508075688772, 0.08837900929018157)
The p-value is .088, indicating a near 9% chance of observing a value of
the test statistic under the null hypothesis. This exceeds 5%, so the
gardener does not reject the null hypothesis as the difference cannot be
regarded as significant at this level.
"""
_chck_args_poisson_mean_test(k1, n1, k2, n2, diff, alternative)
# "for a given k_1 and k_2, an estimate of \lambda_2 is given by" [1] (3.4)
lmbd_hat2 = ((k1 + k2) / (n1 + n2) - diff * n1 / (n1 + n2))
# "\hat{\lambda_{2k}} may be less than or equal to zero ... and in this
# case the null hypothesis cannot be rejected ... [and] it is not necessary
# to compute the p-value". [1] page 26 below eq. (3.6).
if lmbd_hat2 <= 0:
return PoissonMeansTestResult(0, 1)
# the unbiased variance estimate [1] (3.2)
var = k1 / (n1 ** 2) + k2 / (n2 ** 2)
# the _observed_ pivot statistic from the input. It follows the
# unnumbered equation following equation (3.3) This is used later in
# comparison with the computed pivot statistics in an indicator function.
t_k1k2 = (k1 / n1 - k2 / n2 - diff) / np.sqrt(var)
# equation (3.5) of [1] is lengthy, so it is broken into several parts,
# beginning here. Note that the probability mass function of poisson is
# exp^(-\mu)*\mu^k/k!, so and this is called with shape \mu, here noted
# here as nlmbd_hat*. The strategy for evaluating the double summation in
# (3.5) is to create two arrays of the values of the two products inside
# the summation and then broadcast them together into a matrix, and then
# sum across the entire matrix.
# compute constants (as seen in the first and second separated products in
# (3.5).). (This is the shape (\mu) parameter of the poisson distribution.)
nlmbd_hat1 = n1 * (lmbd_hat2 + diff)
nlmbd_hat2 = n2 * lmbd_hat2
# determine summation bounds for tail ends of distribution rather than
# summing to infinity. `x1*` is for the outer sum and `x2*` is the inner
# sum
x1_lb, x1_ub = distributions.poisson.ppf([1e-10, 1 - 1e-16], nlmbd_hat1)
x2_lb, x2_ub = distributions.poisson.ppf([1e-10, 1 - 1e-16], nlmbd_hat2)
# construct arrays to function as the x_1 and x_2 counters on the summation
# in (3.5). `x1` is in columns and `x2` is in rows to allow for
# broadcasting.
x1 = np.arange(x1_lb, x1_ub + 1)
x2 = np.arange(x2_lb, x2_ub + 1)[:, None]
# these are the two products in equation (3.5) with `prob_x1` being the
# first (left side) and `prob_x2` being the second (right side). (To
# make as clear as possible: the 1st contains a "+ d" term, the 2nd does
# not.)
prob_x1 = distributions.poisson.pmf(x1, nlmbd_hat1)
prob_x2 = distributions.poisson.pmf(x2, nlmbd_hat2)
# compute constants for use in the the "pivot statistic" per the
# unnumbered equation following (3.3).
lmbd_x1 = x1 / n1
lmbd_x2 = x2 / n2
lmbds_diff = lmbd_x1 - lmbd_x2 - diff
var_x1x2 = lmbd_x1 / n1 + lmbd_x2 / n2
# this is the 'pivot statistic' for use in the indicator of the summation
# (left side of "I[.]"). Before dividing, mask zero-elements in the
# denominator with infinity so that they are `false` in the indicator.
mask_out_invalid = (np.abs(lmbd_x1 - lmbd_x2) > diff
if alternative == 'two-sided' else lmbds_diff > 0)
var_x1x2[~mask_out_invalid] = np.inf
t_x1x2 = lmbds_diff / np.sqrt(var_x1x2)
if alternative == 'two-sided':
alternative_comparison = lambda x, y: np.abs(x) >= np.abs(y)
elif alternative == 'less':
alternative_comparison = lambda x, y: np.less_equal(x, y)
else:
alternative_comparison = lambda x, y: np.less_equal(x, y)
# `[indicator]` implements the "I[.] ... the indicator function" per
# the paragraph following equation (3.5).
indicator = alternative_comparison(t_x1x2, t_k1k2)
# multiply all combinations of the products together, exclude terms
# based on the `indicator` and then sum. (3.5)
pvalue = np.sum((prob_x1 * prob_x2)[indicator])
return PoissonMeansTestResult(t_k1k2, pvalue)
|
52,687 | def set_time_passes(enable):
"""Enable or disable pass timers.
Parameters
----------
enable : bool
Set to True to enable pass timers.
Set to False to disable pass timers.
"""
ffi.lib.LLVMPY_SetTimePasses(c_int(enable))
| def set_time_passes(enable):
"""Enable or disable pass timers.
Parameters
----------
enable : bool
Set to True to enable the pass timers.
Set to False to disable the pass timers.
"""
ffi.lib.LLVMPY_SetTimePasses(c_int(enable))
|
45,297 | def read_sql(
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
columns=None,
chunksize=None,
partition_column: Optional[str] = None,
lower_bound: Optional[int] = None,
upper_bound: Optional[int] = None,
max_sessions: Optional[int] = None,
) -> DataFrame:
"""
General documentation in `modin.pandas.read_sql`.
Experimental feature is simultaneous reading from a sql file.
Parameters
----------
sql : str or SQLAlchemy Selectable (select or text object)
SQL query to be executed or a table name.
con : SQLAlchemy connectable, str, or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible
for engine disposal and connection closure for the SQLAlchemy
connectable; str connections are closed automatically. See
`here <https://docs.sqlalchemy.org/en/13/core/connections.html>`_.
index_col : str or list of str, optional
Column(s) to set as index(MultiIndex).
coerce_float : bool, default: True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
params : list, tuple or dict, optional
List of parameters to pass to execute method. The syntax used to pass
parameters is database driver dependent. Check your database driver
documentation for which of the five syntax styles, described in PEP 249’s
paramstyle, is supported. Eg. for psycopg2, uses %(name)s so use params=
{‘name’ : ‘value’}.
parse_dates : list or dict, optional
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, optional
List of column names to select from SQL table (only used when reading
a table).
chunksize : int, optional
If specified, return an iterator where `chunksize` is the
number of rows to include in each chunk.
partition_column : str, optional
Column used to share the data between the workers (MUST be a INTEGER column).
lower_bound : int, optional
The minimum value to be requested from the partition_column.
upper_bound : int, optional
The maximum value to be requested from the partition_column.
max_sessions : int, optional
The maximum number of simultaneous connections allowed to use.
Returns
-------
Modin DataFrame.
"""
Engine.subscribe(_update_engine)
assert IsExperimental.get(), "This only works in experimental mode"
_, _, _, kwargs = inspect.getargvalues(inspect.currentframe())
return DataFrame(query_compiler=EngineDispatcher.read_sql(**kwargs))
| def read_sql(
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
columns=None,
chunksize=None,
partition_column: Optional[str] = None,
lower_bound: Optional[int] = None,
upper_bound: Optional[int] = None,
max_sessions: Optional[int] = None,
) -> DataFrame:
"""
General documentation in `modin.pandas.read_sql`.
This experimental feature provides distributed reading from a sql file.
Parameters
----------
sql : str or SQLAlchemy Selectable (select or text object)
SQL query to be executed or a table name.
con : SQLAlchemy connectable, str, or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible
for engine disposal and connection closure for the SQLAlchemy
connectable; str connections are closed automatically. See
`here <https://docs.sqlalchemy.org/en/13/core/connections.html>`_.
index_col : str or list of str, optional
Column(s) to set as index(MultiIndex).
coerce_float : bool, default: True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
params : list, tuple or dict, optional
List of parameters to pass to execute method. The syntax used to pass
parameters is database driver dependent. Check your database driver
documentation for which of the five syntax styles, described in PEP 249’s
paramstyle, is supported. Eg. for psycopg2, uses %(name)s so use params=
{‘name’ : ‘value’}.
parse_dates : list or dict, optional
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, optional
List of column names to select from SQL table (only used when reading
a table).
chunksize : int, optional
If specified, return an iterator where `chunksize` is the
number of rows to include in each chunk.
partition_column : str, optional
Column used to share the data between the workers (MUST be a INTEGER column).
lower_bound : int, optional
The minimum value to be requested from the partition_column.
upper_bound : int, optional
The maximum value to be requested from the partition_column.
max_sessions : int, optional
The maximum number of simultaneous connections allowed to use.
Returns
-------
Modin DataFrame.
"""
Engine.subscribe(_update_engine)
assert IsExperimental.get(), "This only works in experimental mode"
_, _, _, kwargs = inspect.getargvalues(inspect.currentframe())
return DataFrame(query_compiler=EngineDispatcher.read_sql(**kwargs))
|
18,934 | def set_build_environment_variables(pkg, env, dirty):
"""Ensure a clean install environment when we build packages.
This involves unsetting pesky environment variables that may
affect the build. It also involves setting environment variables
used by Spack's compiler wrappers.
Args:
pkg: The package we are building
env: The build environment
dirty (bool): Skip unsetting the user's environment settings
"""
# Gather information about various types of dependencies
build_deps = set(pkg.spec.dependencies(deptype=('build', 'test')))
link_deps = set(pkg.spec.traverse(root=False, deptype=('link')))
build_link_deps = build_deps | link_deps
rpath_deps = get_rpath_deps(pkg)
# This includes all build dependencies and any other dependencies that
# should be added to PATH (e.g. supporting executables run by build
# dependencies)
build_and_supporting_deps = set()
for build_dep in build_deps:
build_and_supporting_deps.update(build_dep.traverse(deptype='run'))
# External packages may be installed in a prefix which contains many other
# package installs. To avoid having those installations override
# Spack-installed packages, they are placed at the end of search paths.
# System prefixes are removed entirely later on since they are already
# searched.
build_deps = _place_externals_last(build_deps)
link_deps = _place_externals_last(link_deps)
build_link_deps = _place_externals_last(build_link_deps)
rpath_deps = _place_externals_last(rpath_deps)
build_and_supporting_deps = _place_externals_last(
build_and_supporting_deps)
link_dirs = []
include_dirs = []
rpath_dirs = []
# The top-level package is always RPATHed. It hasn't been installed yet
# so the RPATHs are added unconditionally (e.g. even though lib64/ may
# not be created for the install).
for libdir in ['lib', 'lib64']:
lib_path = os.path.join(pkg.prefix, libdir)
rpath_dirs.append(lib_path)
# Set up link, include, RPATH directories that are passed to the
# compiler wrapper
for dep in link_deps:
if is_system_path(dep.prefix):
continue
query = pkg.spec[dep.name]
dep_link_dirs = list()
try:
dep_link_dirs.extend(query.libs.directories)
except NoLibrariesError:
tty.debug("No libraries found for {0}".format(dep.name))
for default_lib_dir in ['lib', 'lib64']:
default_lib_prefix = os.path.join(dep.prefix, default_lib_dir)
if os.path.isdir(default_lib_prefix):
dep_link_dirs.append(default_lib_prefix)
link_dirs.extend(dep_link_dirs)
if dep in rpath_deps:
rpath_dirs.extend(dep_link_dirs)
try:
include_dirs.extend(query.headers.directories)
except NoHeadersError:
tty.debug("No headers found for {0}".format(dep.name))
link_dirs = list(dedupe(filter_system_paths(link_dirs)))
include_dirs = list(dedupe(filter_system_paths(include_dirs)))
rpath_dirs = list(dedupe(filter_system_paths(rpath_dirs)))
env.set(SPACK_LINK_DIRS, ':'.join(link_dirs))
env.set(SPACK_INCLUDE_DIRS, ':'.join(include_dirs))
env.set(SPACK_RPATH_DIRS, ':'.join(rpath_dirs))
build_and_supporting_prefixes = filter_system_paths(
x.prefix for x in build_and_supporting_deps)
build_link_prefixes = filter_system_paths(
x.prefix for x in build_link_deps)
# Add dependencies to CMAKE_PREFIX_PATH
env.set_path('CMAKE_PREFIX_PATH', get_cmake_prefix_path(pkg))
# Set environment variables if specified for
# the given compiler
compiler = pkg.compiler
env.extend(spack.schema.environment.parse(compiler.environment))
if compiler.extra_rpaths:
extra_rpaths = ':'.join(compiler.extra_rpaths)
env.set('SPACK_COMPILER_EXTRA_RPATHS', extra_rpaths)
# Add bin directories from dependencies to the PATH for the build.
# These directories are added to the beginning of the search path, and in
# the order given by 'build_and_supporting_prefixes' (the iteration order
# is reversed because each entry is prepended)
for prefix in reversed(build_and_supporting_prefixes):
for dirname in ['bin', 'bin64']:
bin_dir = os.path.join(prefix, dirname)
if os.path.isdir(bin_dir):
env.prepend_path('PATH', bin_dir)
# Add spack build environment path with compiler wrappers first in
# the path. We add the compiler wrapper path, which includes default
# wrappers (cc, c++, f77, f90), AND a subdirectory containing
# compiler-specific symlinks. The latter ensures that builds that
# are sensitive to the *name* of the compiler see the right name when
# we're building with the wrappers.
#
# Conflicts on case-insensitive systems (like "CC" and "cc") are
# handled by putting one in the <build_env_path>/case-insensitive
# directory. Add that to the path too.
env_paths = []
compiler_specific = os.path.join(
spack.paths.build_env_path, os.path.dirname(pkg.compiler.link_paths['cc']))
for item in [spack.paths.build_env_path, compiler_specific]:
env_paths.append(item)
ci = os.path.join(item, 'case-insensitive')
if os.path.isdir(ci):
env_paths.append(ci)
for item in env_paths:
env.prepend_path('PATH', item)
env.set_path(SPACK_ENV_PATH, env_paths)
# Working directory for the spack command itself, for debug logs.
if spack.config.get('config:debug'):
env.set(SPACK_DEBUG, 'TRUE')
env.set(SPACK_SHORT_SPEC, pkg.spec.short_spec)
env.set(SPACK_DEBUG_LOG_ID, pkg.spec.format('{name}-{hash:7}'))
env.set(SPACK_DEBUG_LOG_DIR, spack.main.spack_working_dir)
# Find ccache binary and hand it to build environment
if spack.config.get('config:ccache'):
ccache = Executable('ccache')
if not ccache:
raise RuntimeError("No ccache binary found in PATH")
env.set(SPACK_CCACHE_BINARY, ccache)
# Add any pkgconfig directories to PKG_CONFIG_PATH
for prefix in reversed(build_link_prefixes):
for directory in ('lib', 'lib64', 'share'):
pcdir = os.path.join(prefix, directory, 'pkgconfig')
if os.path.isdir(pcdir):
env.prepend_path('PKG_CONFIG_PATH', pcdir)
return env
| def set_build_environment_variables(pkg, env, dirty):
"""Ensure a clean install environment when we build packages.
This involves unsetting pesky environment variables that may
affect the build. It also involves setting environment variables
used by Spack's compiler wrappers.
Args:
pkg: The package we are building
env: The build environment
dirty (bool): Skip unsetting the user's environment settings
"""
# Gather information about various types of dependencies
build_deps = set(pkg.spec.dependencies(deptype=('build', 'test')))
link_deps = set(pkg.spec.traverse(root=False, deptype=('link')))
build_link_deps = build_deps | link_deps
rpath_deps = get_rpath_deps(pkg)
# This includes all build dependencies and any other dependencies that
# should be added to PATH (e.g. supporting executables run by build
# dependencies)
build_and_supporting_deps = set()
for build_dep in build_deps:
build_and_supporting_deps.update(build_dep.traverse(deptype='run'))
# External packages may be installed in a prefix which contains many other
# package installs. To avoid having those installations override
# Spack-installed packages, they are placed at the end of search paths.
# System prefixes are removed entirely later on since they are already
# searched.
build_deps = _place_externals_last(build_deps)
link_deps = _place_externals_last(link_deps)
build_link_deps = _place_externals_last(build_link_deps)
rpath_deps = _place_externals_last(rpath_deps)
build_and_supporting_deps = _place_externals_last(
build_and_supporting_deps)
link_dirs = []
include_dirs = []
rpath_dirs = []
# The top-level package is always RPATHed. It hasn't been installed yet
# so the RPATHs are added unconditionally (e.g. even though lib64/ may
# not be created for the install).
for libdir in ['lib', 'lib64']:
lib_path = os.path.join(pkg.prefix, libdir)
rpath_dirs.append(lib_path)
# Set up link, include, RPATH directories that are passed to the
# compiler wrapper
for dep in link_deps:
if is_system_path(dep.prefix):
continue
query = pkg.spec[dep.name]
dep_link_dirs = list()
try:
dep_link_dirs.extend(query.libs.directories)
except NoLibrariesError:
tty.debug("No libraries found for {0}".format(dep.name))
for default_lib_dir in ['lib', 'lib64']:
default_lib_prefix = os.path.join(dep.prefix, default_lib_dir)
if os.path.isdir(default_lib_prefix):
dep_link_dirs.append(default_lib_prefix)
link_dirs.extend(dep_link_dirs)
if dep in rpath_deps:
rpath_dirs.extend(dep_link_dirs)
try:
include_dirs.extend(query.headers.directories)
except NoHeadersError:
tty.debug("No headers found for {0}".format(dep.name))
link_dirs = list(dedupe(filter_system_paths(link_dirs)))
include_dirs = list(dedupe(filter_system_paths(include_dirs)))
rpath_dirs = list(dedupe(filter_system_paths(rpath_dirs)))
env.set(SPACK_LINK_DIRS, ':'.join(link_dirs))
env.set(SPACK_INCLUDE_DIRS, ':'.join(include_dirs))
env.set(SPACK_RPATH_DIRS, ':'.join(rpath_dirs))
build_and_supporting_prefixes = filter_system_paths(
x.prefix for x in build_and_supporting_deps)
build_link_prefixes = filter_system_paths(
x.prefix for x in build_link_deps)
# Add dependencies to CMAKE_PREFIX_PATH
env.set_path('CMAKE_PREFIX_PATH', filter_system_paths(x.prefix for x in build_link_deps))
# Set environment variables if specified for
# the given compiler
compiler = pkg.compiler
env.extend(spack.schema.environment.parse(compiler.environment))
if compiler.extra_rpaths:
extra_rpaths = ':'.join(compiler.extra_rpaths)
env.set('SPACK_COMPILER_EXTRA_RPATHS', extra_rpaths)
# Add bin directories from dependencies to the PATH for the build.
# These directories are added to the beginning of the search path, and in
# the order given by 'build_and_supporting_prefixes' (the iteration order
# is reversed because each entry is prepended)
for prefix in reversed(build_and_supporting_prefixes):
for dirname in ['bin', 'bin64']:
bin_dir = os.path.join(prefix, dirname)
if os.path.isdir(bin_dir):
env.prepend_path('PATH', bin_dir)
# Add spack build environment path with compiler wrappers first in
# the path. We add the compiler wrapper path, which includes default
# wrappers (cc, c++, f77, f90), AND a subdirectory containing
# compiler-specific symlinks. The latter ensures that builds that
# are sensitive to the *name* of the compiler see the right name when
# we're building with the wrappers.
#
# Conflicts on case-insensitive systems (like "CC" and "cc") are
# handled by putting one in the <build_env_path>/case-insensitive
# directory. Add that to the path too.
env_paths = []
compiler_specific = os.path.join(
spack.paths.build_env_path, os.path.dirname(pkg.compiler.link_paths['cc']))
for item in [spack.paths.build_env_path, compiler_specific]:
env_paths.append(item)
ci = os.path.join(item, 'case-insensitive')
if os.path.isdir(ci):
env_paths.append(ci)
for item in env_paths:
env.prepend_path('PATH', item)
env.set_path(SPACK_ENV_PATH, env_paths)
# Working directory for the spack command itself, for debug logs.
if spack.config.get('config:debug'):
env.set(SPACK_DEBUG, 'TRUE')
env.set(SPACK_SHORT_SPEC, pkg.spec.short_spec)
env.set(SPACK_DEBUG_LOG_ID, pkg.spec.format('{name}-{hash:7}'))
env.set(SPACK_DEBUG_LOG_DIR, spack.main.spack_working_dir)
# Find ccache binary and hand it to build environment
if spack.config.get('config:ccache'):
ccache = Executable('ccache')
if not ccache:
raise RuntimeError("No ccache binary found in PATH")
env.set(SPACK_CCACHE_BINARY, ccache)
# Add any pkgconfig directories to PKG_CONFIG_PATH
for prefix in reversed(build_link_prefixes):
for directory in ('lib', 'lib64', 'share'):
pcdir = os.path.join(prefix, directory, 'pkgconfig')
if os.path.isdir(pcdir):
env.prepend_path('PKG_CONFIG_PATH', pcdir)
return env
|
45,995 | def _four_point_to_homography(corners: torch.Tensor, deltas: torch.Tensor) -> torch.Tensor:
"""Convert 4-point representation introduced in :cite:`detone2016deep` to homography.
Args:
corners: corners tensor with shape :math:`(B, 4, 2)` where B = batch size
deltas: deltas tensor with shape :math:`(B, 4, 2)` where B = batch size
Return:
the converted homography.
"""
if not isinstance(corners, torch.Tensor):
raise TypeError(f"corners type is not a torch.Tensor. Got {type(corners)}")
if not isinstance(deltas, torch.Tensor):
raise TypeError(f"deltas type is not a torch.Tensor. Got {type(deltas)}")
if not len(corners.shape) == 3 or not corners.shape[1] == 4 or not corners.shape[2] == 2:
raise ValueError(f"Invalid input shape of corners, we expect Bx4x2. Got: {corners.shape}")
if not len(deltas.shape) == 3 or not deltas.shape[1] == 4 or not deltas.shape[2] == 2:
raise ValueError(f"Invalid input shape of deltas, we expect Bx4x2. Got: {deltas.shape}")
if not corners.size(0) == deltas.size(0):
raise ValueError(f'Expected corners batch_size ({corners.size(0)}) to match deltas batch '
f'size ({deltas.size(0)}).')
corners_hat = corners + deltas
homography_inv = get_perspective_transform(corners, corners_hat)
homography = torch.inverse(homography_inv)
return homography
| def _four_point_to_homography(corners: torch.Tensor, deltas: torch.Tensor) -> torch.Tensor:
"""Convert 4-point representation introduced in :cite:`detone2016deep` to homography.
Args:
corners: corners tensor with shape :math:`(B, 4, 2)` where B = batch size
deltas: deltas tensor with shape :math:`(B, 4, 2)` where B = batch size
Return:
the converted homography.
"""
if not isinstance(corners, torch.Tensor):
raise TypeError(f"corners type is not a torch.Tensor. Got {type(corners)}")
if not isinstance(deltas, torch.Tensor):
raise TypeError(f"deltas type is not a torch.Tensor. Got {type(deltas)}")
if not len(corners.shape) == 3 or not corners.shape[1] == 4 or not corners.shape[2] == 2:
raise ValueError(f"Invalid input shape of corners, we expect Bx4x2. Got: {corners.shape}")
if not len(deltas.shape) == 3 or not deltas.shape[1] == 4 or not deltas.shape[2] == 2:
raise ValueError(f"Invalid input shape of deltas, we expect Bx4x2. Got: {deltas.shape}")
if not corners.size(0) == deltas.size(0):
raise ValueError(f'Expected corners batch_size ({corners.size(0)}) to match deltas batch '
f'size ({deltas.size(0)}).')
corners_hat = corners + deltas
dst_homo_src = get_perspective_transform(corners, corners_hat)
homography = torch.inverse(homography_inv)
return homography
|
54,584 | def test_list_format_works():
"""" Giving a list [minx, miny, maxx, maxy] makes a polygon"""
list_out = es.extent_to_json([0, 0, 1, 1])
assert list_out["type"] == "Polygon"
| def test_list_format_works():
"""" Giving a list [minx, miny, maxx, maxy] makes a polygon"""
assert list_out["type"] == "Polygon"
|
30,727 | def get_user_command(client, args):
scim = verify_and_load_scim_data(args.get('scim'))
scim_flat_data = map_scim(scim)
user_id = scim_flat_data.get('id')
username = scim_flat_data.get('userName')
email = scim_flat_data.get('email')
if not (user_id or username or email):
raise Exception('You must provide either the id,, email or username of the user')
if user_id:
user_term = user_id
else:
user_term = username if username else email
user_term = user_term.lower()
res = client.get_user_profile(user_term)
if res.status_code == 200:
res_json = res.json()
active = res_json['enabled']
generic_iam_context = OutputContext(success=True, iden=res_json.get('email'), email=res_json.get('email'),
username=username, details=res_json, active=active)
elif res.status_code == 400:
generic_iam_context = OutputContext(success=False, iden=user_id, username=username, errorCode=404,
errorMessage="User Not Found", details=res.headers.get('x-redlock-status'))
else:
generic_iam_context = OutputContext(success=False, iden=user_id, username=username,
errorCode=res.status_code,
errorMessage=res.headers.get('x-redlock-status'),
details=res.headers.get('x-redlock-status'))
generic_iam_context_dt = f'{generic_iam_context.command}(val.id == obj.id && val.instanceName == obj.instanceName)'
outputs = {
generic_iam_context_dt: generic_iam_context.data
}
readable_output = tableToMarkdown('Get PrismaCloud User:', generic_iam_context.data)
return (
readable_output,
outputs,
generic_iam_context.data
)
| def get_user_command(client, args):
scim = verify_and_load_scim_data(args.get('scim'))
scim_flat_data = map_scim(scim)
user_id = scim_flat_data.get('id')
username = scim_flat_data.get('userName')
email = scim_flat_data.get('email')
if not (user_id or username or email):
raise Exception('You must provide either the id, email or username of the user')
if user_id:
user_term = user_id
else:
user_term = username if username else email
user_term = user_term.lower()
res = client.get_user_profile(user_term)
if res.status_code == 200:
res_json = res.json()
active = res_json['enabled']
generic_iam_context = OutputContext(success=True, iden=res_json.get('email'), email=res_json.get('email'),
username=username, details=res_json, active=active)
elif res.status_code == 400:
generic_iam_context = OutputContext(success=False, iden=user_id, username=username, errorCode=404,
errorMessage="User Not Found", details=res.headers.get('x-redlock-status'))
else:
generic_iam_context = OutputContext(success=False, iden=user_id, username=username,
errorCode=res.status_code,
errorMessage=res.headers.get('x-redlock-status'),
details=res.headers.get('x-redlock-status'))
generic_iam_context_dt = f'{generic_iam_context.command}(val.id == obj.id && val.instanceName == obj.instanceName)'
outputs = {
generic_iam_context_dt: generic_iam_context.data
}
readable_output = tableToMarkdown('Get PrismaCloud User:', generic_iam_context.data)
return (
readable_output,
outputs,
generic_iam_context.data
)
|
5,433 | def assertNoOrderedDict(data):
if isinstance(data, OrderedDict):
raise AssertionError("Found an ordered dictionary")
if isinstance(data, dict):
for value in data.values():
assertNoOrderedDict(value)
elif isinstance(data, (list, tuple)):
for chunk in data:
assertNoOrderedDict(chunk)
| def assert_no_ordered_dict(data):
if isinstance(data, OrderedDict):
raise AssertionError("Found an ordered dictionary")
if isinstance(data, dict):
for value in data.values():
assertNoOrderedDict(value)
elif isinstance(data, (list, tuple)):
for chunk in data:
assertNoOrderedDict(chunk)
|
2,593 | def test_label_propagation_closed_form(global_dtype):
n_classes = 2
X, y = make_classification(n_classes=n_classes, n_samples=200, random_state=0)
X = X.astype(global_dtype)
y[::3] = -1
Y = np.zeros((len(y), n_classes + 1))
Y[np.arange(len(y)), y] = 1
unlabelled_idx = Y[:, (-1,)].nonzero()[0]
labelled_idx = (Y[:, (-1,)] == 0).nonzero()[0]
clf = label_propagation.LabelPropagation(max_iter=10000, gamma=0.1)
clf.fit(X, y)
# adopting notation from Zhu et al 2002
T_bar = clf._build_graph()
Tuu = T_bar[tuple(np.meshgrid(unlabelled_idx, unlabelled_idx, indexing="ij"))]
Tul = T_bar[tuple(np.meshgrid(unlabelled_idx, labelled_idx, indexing="ij"))]
Y = Y[:, :-1]
Y_l = Y[labelled_idx, :]
Y_u = np.dot(np.dot(np.linalg.inv(np.eye(Tuu.shape[0]) - Tuu), Tul), Y_l)
expected = Y.copy()
expected[unlabelled_idx, :] = Y_u
expected /= expected.sum(axis=1)[:, np.newaxis]
assert_allclose(expected, clf.label_distributions_, 4)
| def test_label_propagation_closed_form(global_dtype):
n_classes = 2
X, y = make_classification(n_classes=n_classes, n_samples=200, random_state=0)
X = X.astype(global_dtype, copy=False)
y[::3] = -1
Y = np.zeros((len(y), n_classes + 1))
Y[np.arange(len(y)), y] = 1
unlabelled_idx = Y[:, (-1,)].nonzero()[0]
labelled_idx = (Y[:, (-1,)] == 0).nonzero()[0]
clf = label_propagation.LabelPropagation(max_iter=10000, gamma=0.1)
clf.fit(X, y)
# adopting notation from Zhu et al 2002
T_bar = clf._build_graph()
Tuu = T_bar[tuple(np.meshgrid(unlabelled_idx, unlabelled_idx, indexing="ij"))]
Tul = T_bar[tuple(np.meshgrid(unlabelled_idx, labelled_idx, indexing="ij"))]
Y = Y[:, :-1]
Y_l = Y[labelled_idx, :]
Y_u = np.dot(np.dot(np.linalg.inv(np.eye(Tuu.shape[0]) - Tuu), Tul), Y_l)
expected = Y.copy()
expected[unlabelled_idx, :] = Y_u
expected /= expected.sum(axis=1)[:, np.newaxis]
assert_allclose(expected, clf.label_distributions_, 4)
|
46,519 | def state_transition(state: BeaconState,
block: BeaconBlock,
verify_state_root: bool=False) -> BeaconState:
while state.slot < block.slot:
spec.cache_state(state)
if (state.slot + 1) % spec.SLOTS_PER_EPOCH == 0:
process_epoch_transition(state)
spec.advance_slot(state)
if block.slot == state.slot:
process_block(state, block)
| def state_transition(state: BeaconState,
block: BeaconBlock,
verify_state_root: bool=False) -> BeaconState:
while state.slot < block.slot:
spec.cache_state(state)
if (state.slot + 1) % spec.SLOTS_PER_EPOCH == 0:
process_epoch_transition(state)
spec.advance_slot(state)
if block.slot == state.slot:
process_block(state, block, verify_state_root)
|
9,046 | def rate_user(
rate: int,
message: typing.Optional[str] = None,
) -> typing.Callable:
"""Decorate a function to be rate-limited for a user.
:param rate: seconds between permitted calls of this function by the same
user
:param message: optional; message send as notice when a user hits the limit
This decorator can be used alone or with the :func:`rate` decorator, as it
will always take precedence::
@rate(10, 10, 10)
@rate_user(20, 'You hit your rate limit for this function.')
# user limit will be set to 20, other to 10
# will send a NOTICE only when a user hits their own limit
# as other rate limit don't have any message set
If you don't provide a message, the default message set (if any) by
:func:`rate` will be used instead.
.. versionadded:: 8.0
"""
def add_attribute(function):
function.rate = rate
function.user_rate_message = message
return function
return add_attribute
| def rate_user(
rate: int,
message: typing.Optional[str] = None,
) -> typing.Callable:
"""Decorate a function to be rate-limited for a user.
:param rate: seconds between permitted calls of this function by the same
user
:param message: optional; message sent as NOTICE when a user hits the limit
This decorator can be used alone or with the :func:`rate` decorator, as it
will always take precedence::
@rate(10, 10, 10)
@rate_user(20, 'You hit your rate limit for this function.')
# user limit will be set to 20, other to 10
# will send a NOTICE only when a user hits their own limit
# as other rate limit don't have any message set
If you don't provide a message, the default message set (if any) by
:func:`rate` will be used instead.
.. versionadded:: 8.0
"""
def add_attribute(function):
function.rate = rate
function.user_rate_message = message
return function
return add_attribute
|
32,329 | def gw_send_malware(client: GwClient, args: Optional[Dict[Any, Any]]) -> CommandResults: # noqa: E501
"""Send file to the GScan malcore analysis.
Args:
client: Client to interact with the GCenter.
args: Command arguments.
Returns:
CommandResults object with the 'GCenter.Gscan.Malware' prefix.
"""
result = client.send_malware(
name=args.get("name"), # type: ignore
content=args.get("content") # type: ignore
)
return CommandResults(
outputs_prefix="GCenter.Gscan.Malware",
outputs=result
)
| def gw_send_malware(client: GwClient, args: Optional[Dict[Any, Any]]) -> CommandResults: # noqa: E501
"""Send file to the GScan malware analysis.
Args:
client: Client to interact with the GCenter.
args: Command arguments.
Returns:
CommandResults object with the 'GCenter.Gscan.Malware' prefix.
"""
result = client.send_malware(
name=args.get("name"), # type: ignore
content=args.get("content") # type: ignore
)
return CommandResults(
outputs_prefix="GCenter.Gscan.Malware",
outputs=result
)
|
24,518 | def compare_default_dicts(a: defaultdict, b: defaultdict) -> bool:
"""Compare two defaultdicts, return True if equal, else False.
Does a benign or soft compare. If the defaultdicts COULD become
equal, they are considered equal.
* Does NOT change the memory imprint of any of the dictionaries.
* Any overlapping keys, must have same value.
* Keys unique to one, must have the default value of the other.
* Order of input does NOT matter.
Example:
a = defaultdict(lambda: "", a=42, b=42, c="")
b = defaultdict(lambda: 42, c="", d="")
compare_defaultdicts(a, b) -> True
Parameters
----------
a : defaultdict
Default dictionary from collections
b : defaultdict:
Default dictionary from collections
Returns
-------
bool : True if equal, else False
"""
a_keys = set(a)
b_keys = set(b)
a_unique_keys = (a_keys | b_keys) - b_keys
b_unique_keys = (a_keys | b_keys) - a_keys
# The intersecting keys must have the same value
if not all(a[key] == b[key] for key in (a_keys & b_keys)):
return False
# Keys unique to one, must have default value of other.
if not all(b.default_factory() == a[key] for key in a_unique_keys):
return False
if not all(a.default_factory() == b[key] for key in b_unique_keys):
return False
return True
| def compare_default_dicts(a: defaultdict, b: defaultdict) -> bool:
"""Compare two defaultdicts, return True if equal, else False.
Does a benign or soft compare. If the defaultdicts COULD become
equal, they are considered equal.
* Does NOT change the memory imprint of any of the dictionaries.
* Any overlapping keys, must have same value.
* Keys unique to one, must have the default value of the other.
* Order of input does NOT matter.
Example:
a = defaultdict(lambda: "", a=42, b=42, c="")
b = defaultdict(lambda: 42, c="", d="")
compare_defaultdicts(a, b) -> True
Parameters
----------
a : defaultdict
Default dictionary from collections
b : defaultdict
Default dictionary from collections
Returns
-------
bool : True if equal, else False
"""
a_keys = set(a)
b_keys = set(b)
a_unique_keys = (a_keys | b_keys) - b_keys
b_unique_keys = (a_keys | b_keys) - a_keys
# The intersecting keys must have the same value
if not all(a[key] == b[key] for key in (a_keys & b_keys)):
return False
# Keys unique to one, must have default value of other.
if not all(b.default_factory() == a[key] for key in a_unique_keys):
return False
if not all(a.default_factory() == b[key] for key in b_unique_keys):
return False
return True
|
31,445 | def close_true_positive_command(client: Client, args: dict):
alert_ids = args.get('alert_ids')
custom_filter = args.get('custom_filter')
comment = args.get('comment')
reason = CLOSE_BENIGN_REASON_OPTIONS.get(str(args.get('reason')))
sendFeedback = bool(args.get('sendFeedback'))
feedbackText = args.get('feedbackText')
allowContact = bool(args.get('allowContact'))
contactEmail = args.get('contactEmail')
request_data = args_to_filter_close_alerts(alert_ids, custom_filter, comment, reason,
sendFeedback, feedbackText, allowContact, contactEmail)
close_true_positive = client.close_true_positive(request_data)
number_of_close_true_positive = close_true_positive['closed_true_positive']
return CommandResults(
readable_output=f'{number_of_close_true_positive} are classified as closed true positive',
outputs_prefix='MicrosoftCloudAppSecurity.Alerts',
outputs_key_field='_id',
outputs=close_true_positive)
| def close_true_positive_command(client: Client, args: dict):
alert_ids = args.get('alert_ids')
custom_filter = args.get('custom_filter')
comment = args.get('comment')
reason = CLOSE_BENIGN_REASON_OPTIONS.get(str(args.get('reason')))
sendFeedback = bool(args.get('sendFeedback'))
feedbackText = args.get('feedbackText')
allowContact = argToBoolean(args.get('allowContact'))
contactEmail = args.get('contactEmail')
request_data = args_to_filter_close_alerts(alert_ids, custom_filter, comment, reason,
sendFeedback, feedbackText, allowContact, contactEmail)
close_true_positive = client.close_true_positive(request_data)
number_of_close_true_positive = close_true_positive['closed_true_positive']
return CommandResults(
readable_output=f'{number_of_close_true_positive} are classified as closed true positive',
outputs_prefix='MicrosoftCloudAppSecurity.Alerts',
outputs_key_field='_id',
outputs=close_true_positive)
|
23,885 | def test_molecule_verifiers():
x = sorted(['goss', 'inspec', 'testinfra', 'ansible'])
assert x == verifiers()
| def test_verifiers():
x = sorted(['goss', 'inspec', 'testinfra', 'ansible'])
assert x == verifiers()
|
8,127 | def test_norm_clip(createAIAMap):
# Tests that the default normalizer has clipping disabled
assert createAIAMap.plot_settings['norm'].clip is False
| def test_norm_clip(createAIAMap):
# Tests that the default normalizer has clipping disabled
assert not createAIAMap.plot_settings['norm'].clip
|
25,601 | def _strip_capstring(capstring: str) -> str:
if capstring.startswith("mxc://"):
capstring = capstring[6:]
if "/" in capstring:
capstring = capstring[capstring.rindex("/") + 1 :]
if "?" in capstring:
capstring = capstring[capstring.rindex("?") + 1 :]
return capstring
| def _strip_capstring(capstring: str) -> str:
if capstring.startswith("mxc://"):
capstring = capstring[6:]
_, _, capstring = capstring.rpartition("/")
if "?" in capstring:
capstring = capstring[capstring.rindex("?") + 1 :]
return capstring
|
5,580 | def _interp_logp_height(sounding, missing=-9999):
"""Interpolate height linearly with respect to log p.
This function mimics the functionality of the MR_INTZ
subroutine in GEMPAK.
"""
size = len(sounding['HGHT'])
idx = -1
maxlev = -1
while size + idx != 0:
if sounding['HGHT'][idx] != missing:
maxlev = size + idx
break
else:
idx -= 1
pbot = missing
for i in range(maxlev):
press = sounding['PRES'][i]
hght = sounding['HGHT'][i]
if press == missing or pbot == missing:
continue
elif hght != missing:
pbot = press
zbot = hght
ptop = 2000
else:
ilev = i + 1
while press <= ptop:
if sounding['HGHT'][ilev] != missing:
ptop = sounding['PRES'][ilev]
ztop = sounding['HGHT'][ilev]
else:
ilev += 1
sounding['HGHT'][i] = (zbot + (ztop - zbot)
* (np.log(press / pbot) / np.log(ptop / pbot)))
if maxlev < size - 1:
if maxlev > -1:
pb = units.Quantity(sounding['PRES'][maxlev], 'hPa')
zb = units.Quantity(sounding['HGHT'][maxlev], 'm')
tb = units.Quantity(sounding['TEMP'][maxlev], 'degC')
tdb = units.Quantity(sounding['DWPT'][maxlev], 'degC')
else:
pb = units.Quantity(missing, 'hPa')
zb = units.Quantity(missing, 'm')
tb = units.Quantity(missing, 'degC')
tdb = units.Quantity(missing, 'degC')
for i in range(maxlev + 1, size):
if sounding['HGHT'][i] == missing:
tt = units.Quantity(sounding['TEMP'][i], 'degC')
tdt = units.Quantity(sounding['DWPT'][i], 'degC')
pt = units.Quantity(sounding['PRES'][i], 'hPa')
pl = units.Quantity([pb.m, pt.m], 'hPa')
tl = units.Quantity([tb.m, tt.m], 'degC')
tdl = units.Quantity([tdb.m, tdt.m], 'degC')
if missing in tdl.m:
rl = None
else:
ql = specific_humidity_from_dewpoint(pl, tdl)
rl = mixing_ratio_from_specific_humidity(ql)
if missing not in [*tl.m, zb.m]:
sounding['HGHT'][i] = (zb + thickness_hydrostatic(pl, tl, rl)).m
else:
sounding['HGHT'][i] = missing
| def _interp_logp_height(sounding, missing=-9999):
"""Interpolate height linearly with respect to log p.
This function mimics the functionality of the MR_INTZ
subroutine in GEMPAK.
"""
size = len(sounding['HGHT'])
idx = -1
maxlev = -1
while size + idx != 0:
if sounding['HGHT'][idx] != missing:
maxlev = size + idx
break
else:
idx -= 1
pbot = missing
for i in range(maxlev):
press = sounding['PRES'][i]
hght = sounding['HGHT'][i]
if press == missing:
continue
elif hght != missing:
pbot = press
zbot = hght
ptop = 2000
else:
ilev = i + 1
while press <= ptop:
if sounding['HGHT'][ilev] != missing:
ptop = sounding['PRES'][ilev]
ztop = sounding['HGHT'][ilev]
else:
ilev += 1
sounding['HGHT'][i] = (zbot + (ztop - zbot)
* (np.log(press / pbot) / np.log(ptop / pbot)))
if maxlev < size - 1:
if maxlev > -1:
pb = units.Quantity(sounding['PRES'][maxlev], 'hPa')
zb = units.Quantity(sounding['HGHT'][maxlev], 'm')
tb = units.Quantity(sounding['TEMP'][maxlev], 'degC')
tdb = units.Quantity(sounding['DWPT'][maxlev], 'degC')
else:
pb = units.Quantity(missing, 'hPa')
zb = units.Quantity(missing, 'm')
tb = units.Quantity(missing, 'degC')
tdb = units.Quantity(missing, 'degC')
for i in range(maxlev + 1, size):
if sounding['HGHT'][i] == missing:
tt = units.Quantity(sounding['TEMP'][i], 'degC')
tdt = units.Quantity(sounding['DWPT'][i], 'degC')
pt = units.Quantity(sounding['PRES'][i], 'hPa')
pl = units.Quantity([pb.m, pt.m], 'hPa')
tl = units.Quantity([tb.m, tt.m], 'degC')
tdl = units.Quantity([tdb.m, tdt.m], 'degC')
if missing in tdl.m:
rl = None
else:
ql = specific_humidity_from_dewpoint(pl, tdl)
rl = mixing_ratio_from_specific_humidity(ql)
if missing not in [*tl.m, zb.m]:
sounding['HGHT'][i] = (zb + thickness_hydrostatic(pl, tl, rl)).m
else:
sounding['HGHT'][i] = missing
|
29,806 | def validate_tron(service_path, verbose=False):
soa_dir, service = path_to_soa_dir_service(service_path)
returncode = True
for cluster in list_tron_clusters(service, soa_dir):
if not validate_tron_namespace(service, cluster, soa_dir):
returncode = False
elif verbose:
service_config = load_tron_service_config_no_cache(service, cluster)
for config in service_config:
schedule = config.get_schedule()
num_runs = 5
if schedule.startswith("cron"):
print(info_message(f"Next 5 cron runs for {config.get_name()}"))
next_cron_runs = get_next_x_cron_runs(
num_runs, schedule.replace("cron", ""), datetime.today()
)
for run in next_cron_runs:
print(f"{run}")
return returncode
| def validate_tron(service_path, verbose=False):
soa_dir, service = path_to_soa_dir_service(service_path)
returncode = True
for cluster in list_tron_clusters(service, soa_dir):
if not validate_tron_namespace(service, cluster, soa_dir):
returncode = False
elif verbose:
service_config = load_tron_service_config_no_cache(service, cluster)
for config in service_config:
schedule = config.get_schedule()
num_runs = 5
if schedule.startswith("cron"):
print(info_message(f"Upcoming runs for {config.get_name()}:"))
next_cron_runs = get_next_x_cron_runs(
num_runs, schedule.replace("cron", ""), datetime.today()
)
for run in next_cron_runs:
print(f"{run}")
return returncode
|
2,998 | def test_concat_sparse():
# GH 23557
a = pd.Series(pd.SparseArray([0, 1, 2]))
expected = pd.DataFrame(data=[[0, 0], [1, 1], [2, 2]]).astype(
pd.SparseDtype("int", 0)
)
result = pd.concat([a, a], axis=1)
tm.assert_frame_equal(result, expected)
| def test_concat_sparse():
# GH 23557
a = pd.Series(pd.SparseArray([0, 1, 2]))
expected = pd.DataFrame(data=[[0, 0], [1, 1], [2, 2]]).astype(
pd.SparseDtype(np.intp, 0)
)
result = pd.concat([a, a], axis=1)
tm.assert_frame_equal(result, expected)
|
7,609 | def test_subclass():
wcs1 = Sub()
wcs1.foo = 45
s = pickle.dumps(wcs1)
wcs2 = pickle.loads(s)
assert isinstance(wcs2, Sub)
assert wcs1.foo == 45
assert wcs2.foo == 45
assert wcs2.wcs is not None
| def test_subclass():
wcs1 = Sub()
wcs1.foo = 45
wcs1.foo = 42
wcs2 = pickle.loads(s)
assert isinstance(wcs2, Sub)
assert wcs1.foo == 45
assert wcs2.foo == 45
assert wcs2.wcs is not None
|
58,856 | def main(argv: Any) -> int:
options = parse_args(argv)
webhook_secret = str(os.environ.get('FLEXCI_WEBHOOK_SECRET'))
github_token = str(os.environ.get('GITHUB_TOKEN'))
with open(options.webhook, 'rb') as f:
payload = f.read()
with open(options.projects) as f: # type: ignore
project_tags = json.load(f)
payload_obj = json.loads(payload)
if payload_obj['action'] != 'created':
_log('Invalid action')
return 1
requested_tags = extract_requested_tags(payload_obj['comment']['body'])
if requested_tags is None:
_log('No test requested in comment.')
return 0
_log(f'Test tags requested: {requested_tags}')
association = payload_obj['comment']['author_association']
if association not in ('OWNER', 'MEMBER'):
_log(f'Tests cannot be triggered by {association}')
return 1
projects_dispatch: Set[str] = set()
projects_skipped: Set[str] = set()
for project, tags in project_tags.items():
_log(f'Project: {project} (tags: {tags})')
if len(set(tags) & requested_tags) != 0:
projects_dispatch.add(project)
else:
projects_skipped.add(project)
if len(projects_dispatch) == 0:
_log('No projects matched with the requested tag')
return 1
_log(f'Dispatching projects: {projects_dispatch}')
success = _forward_to_flexci(
payload, webhook_secret, projects_dispatch, options.flexci_uri)
if not success:
_log('Failed to dispatch')
return 1
if len(projects_skipped) != 0:
_complement_commit_status(
payload_obj['repository']['full_name'],
payload_obj['issue']['number'],
github_token,
projects_skipped,
options.flexci_context)
return 0
| def main(argv: Any) -> int:
options = parse_args(argv)
webhook_secret = os.environ['FLEXCI_WEBHOOK_SECRET']
github_token = os.environ['GITHUB_TOKEN']
with open(options.webhook, 'rb') as f:
payload = f.read()
with open(options.projects) as f: # type: ignore
project_tags = json.load(f)
payload_obj = json.loads(payload)
if payload_obj['action'] != 'created':
_log('Invalid action')
return 1
requested_tags = extract_requested_tags(payload_obj['comment']['body'])
if requested_tags is None:
_log('No test requested in comment.')
return 0
_log(f'Test tags requested: {requested_tags}')
association = payload_obj['comment']['author_association']
if association not in ('OWNER', 'MEMBER'):
_log(f'Tests cannot be triggered by {association}')
return 1
projects_dispatch: Set[str] = set()
projects_skipped: Set[str] = set()
for project, tags in project_tags.items():
_log(f'Project: {project} (tags: {tags})')
if len(set(tags) & requested_tags) != 0:
projects_dispatch.add(project)
else:
projects_skipped.add(project)
if len(projects_dispatch) == 0:
_log('No projects matched with the requested tag')
return 1
_log(f'Dispatching projects: {projects_dispatch}')
success = _forward_to_flexci(
payload, webhook_secret, projects_dispatch, options.flexci_uri)
if not success:
_log('Failed to dispatch')
return 1
if len(projects_skipped) != 0:
_complement_commit_status(
payload_obj['repository']['full_name'],
payload_obj['issue']['number'],
github_token,
projects_skipped,
options.flexci_context)
return 0
|
8,969 | def clean_module(module, config):
"""Clean a module and return its callables. (commands, rules, jobs, etc.)
:param module: the module to clean
:type module: :term:`module`
:param config: Sopel's settings
:type config: :class:`sopel.config.Config`
:return: a tuple with triggerable, job, shutdown, and url functions
:rtype: tuple
This function will parse the ``module`` looking for callables:
* shutdown actions
* triggerables (commands, rules, etc.)
* jobs
* URL callbacks
This function will set all the default attributes expected for a Sopel
callable, i.e. properties related to threading, docs, examples, rate
limiting, commands, rules, and other features.
"""
callables = []
shutdowns = []
jobs = []
urls = []
for obj in itervalues(vars(module)):
if callable(obj):
is_sopel_callable = getattr(obj, '_sopel_callable', False) is True
if getattr(obj, '__name__', None) == 'shutdown':
shutdowns.append(obj)
elif not is_sopel_callable:
continue
elif is_triggerable(obj):
clean_callable(obj, config)
callables.append(obj)
elif hasattr(obj, 'interval'):
clean_callable(obj, config)
jobs.append(obj)
elif is_url_callback(obj):
clean_callable(obj, config)
urls.append(obj)
return callables, jobs, shutdowns, urls
| def clean_module(module, config):
"""Clean a module and return its command, rule, job, etc. callables.
:param module: the module to clean
:type module: :term:`module`
:param config: Sopel's settings
:type config: :class:`sopel.config.Config`
:return: a tuple with triggerable, job, shutdown, and url functions
:rtype: tuple
This function will parse the ``module`` looking for callables:
* shutdown actions
* triggerables (commands, rules, etc.)
* jobs
* URL callbacks
This function will set all the default attributes expected for a Sopel
callable, i.e. properties related to threading, docs, examples, rate
limiting, commands, rules, and other features.
"""
callables = []
shutdowns = []
jobs = []
urls = []
for obj in itervalues(vars(module)):
if callable(obj):
is_sopel_callable = getattr(obj, '_sopel_callable', False) is True
if getattr(obj, '__name__', None) == 'shutdown':
shutdowns.append(obj)
elif not is_sopel_callable:
continue
elif is_triggerable(obj):
clean_callable(obj, config)
callables.append(obj)
elif hasattr(obj, 'interval'):
clean_callable(obj, config)
jobs.append(obj)
elif is_url_callback(obj):
clean_callable(obj, config)
urls.append(obj)
return callables, jobs, shutdowns, urls
|
13,538 | def compare_db(db1, db2, ignore=None):
# type: (canmatrix.CanMatrix, canmatrix.CanMatrix, ConfigDict) -> CompareResult
result = CompareResult()
if ignore is None:
ignore = dict()
for f1 in db1.frames:
f2 = db2.frame_by_id(f1.arbitration_id)
if f2 is None:
result.add_child(CompareResult("deleted", "FRAME", f1))
else:
result.add_child(compare_frame(f1, f2, ignore))
for f2 in db2.frames:
f1 = db1.frame_by_id(f2.arbitration_id)
if f1 is None:
result.add_child(CompareResult("added", "FRAME", f2))
if ignore.get("ATTRIBUTE", "") == "*":
pass
else:
result.add_child(compare_attributes(db1, db2, ignore))
for ecu1 in db1.ecus:
ecu2 = db2.ecu_by_name(ecu1.name)
if ecu2 is None:
result.add_child(CompareResult("deleted", "ecu", ecu1))
else:
result.add_child(compare_ecu(ecu1, ecu2, ignore))
for ecu2 in db2.ecus:
ecu1 = db1.ecu_by_name(ecu2.name)
if ecu1 is None:
result.add_child(CompareResult("added", "ecu", ecu2))
if ignore.get("DEFINE", "") == "*":
pass
else:
result.add_child(
compare_define_list(
db1.global_defines,
db2.global_defines))
temp = compare_define_list(db1.ecu_defines, db2.ecu_defines)
temp.type = "ECU Defines"
result.add_child(temp)
temp = compare_define_list(db1.frame_defines, db2.frame_defines)
temp.type = "Frame Defines"
result.add_child(temp)
temp = compare_define_list(db1.signal_defines, db2.signal_defines)
temp.type = "Signal Defines"
result.add_child(temp)
if ignore.get("VALUETABLES", False):
pass
else:
for vt1 in db1.value_tables:
if vt1 not in db2.value_tables:
result.add_child(
CompareResult(
"deleted",
"valuetable " + vt1,
db1.value_tables))
else:
result.add_child(
compare_value_table(
db1.value_tables[vt1],
db2.value_tables[vt1]))
for vt2 in db2.value_tables:
if vt2 not in db1.value_tables:
result.add_child(
CompareResult(
"added",
"valuetable " + vt2,
db2.value_tables))
propagate_changes(result)
return result
| def compare_db(db1, db2, ignore=None):
# type: (canmatrix.CanMatrix, canmatrix.CanMatrix, ConfigDict) -> CompareResult
result = CompareResult()
if ignore is None:
ignore = dict()
for f1 in db1.frames:
f2 = db2.frame_by_id(f1.arbitration_id)
if f2 is None:
result.add_child(CompareResult("deleted", "FRAME", f1))
else:
result.add_child(compare_frame(f1, f2, ignore))
for f2 in db2.frames:
f1 = db1.frame_by_id(f2.arbitration_id)
if f1 is None:
result.add_child(CompareResult("added", "FRAME", f2))
if ignore.get("ATTRIBUTE") == "*":
pass
else:
result.add_child(compare_attributes(db1, db2, ignore))
for ecu1 in db1.ecus:
ecu2 = db2.ecu_by_name(ecu1.name)
if ecu2 is None:
result.add_child(CompareResult("deleted", "ecu", ecu1))
else:
result.add_child(compare_ecu(ecu1, ecu2, ignore))
for ecu2 in db2.ecus:
ecu1 = db1.ecu_by_name(ecu2.name)
if ecu1 is None:
result.add_child(CompareResult("added", "ecu", ecu2))
if ignore.get("DEFINE", "") == "*":
pass
else:
result.add_child(
compare_define_list(
db1.global_defines,
db2.global_defines))
temp = compare_define_list(db1.ecu_defines, db2.ecu_defines)
temp.type = "ECU Defines"
result.add_child(temp)
temp = compare_define_list(db1.frame_defines, db2.frame_defines)
temp.type = "Frame Defines"
result.add_child(temp)
temp = compare_define_list(db1.signal_defines, db2.signal_defines)
temp.type = "Signal Defines"
result.add_child(temp)
if ignore.get("VALUETABLES", False):
pass
else:
for vt1 in db1.value_tables:
if vt1 not in db2.value_tables:
result.add_child(
CompareResult(
"deleted",
"valuetable " + vt1,
db1.value_tables))
else:
result.add_child(
compare_value_table(
db1.value_tables[vt1],
db2.value_tables[vt1]))
for vt2 in db2.value_tables:
if vt2 not in db1.value_tables:
result.add_child(
CompareResult(
"added",
"valuetable " + vt2,
db2.value_tables))
propagate_changes(result)
return result
|
50,075 | def coefficient(base, *, tlist=None, args={}, args_ctypes={},
order=3, compile_opt=None, function_style=None):
"""Coefficient for time dependent systems.
The coefficients are either a function, a string or a numpy array.
For function based coefficients, the function signature must be either:
* ``f(t, ...)`` where the other arguments are supplied as ordinary
"pythonic" arguments (e.g. ``f(t, w, a=5))
* ``f(t, args)`` where the arguments are supplied in a "dict" named
``args``
By default the signature style is controlled by the
``qutip.settings.core["function_coefficient_style"]`` setting, but it
may be overriden here by specifying either ``function_style="pythonic"``
or ``function_style="dict"``.
*Examples*
# pythonic style function signature
def f1_t(t, w):
return np.exp(-1j * t * w)
coeff1 = coefficient(f1_t, args={"w": 1.})
# dict style function signature
def f2_t(t, args):
return np.exp(-1j * t * args["w"])
coeff2 = coefficient(f2_t, args={"w": 1.})
For string based coeffients, the string must be a compilable python code
resulting in a complex. The following symbols are defined:
sin cos tan asin acos atan pi
sinh cosh tanh asinh acosh atanh
exp log log10 erf zerf sqrt
real imag conj abs norm arg proj
numpy as np,
scipy.special as spe (python interface)
and cython_special (cython interface)
[https://docs.scipy.org/doc/scipy/reference/special.cython_special.html].
*Examples*
coeff = coefficient('exp(-1j*w1*t)', args={"w1":1.})
'args' is needed for string coefficient at compilation.
It is a dict of (name:object). The keys must be a valid variables string.
Compilation options can be passed as "compile_opt=CompilationOptions(...)".
For numpy array format, the array must be an 1d of dtype float or complex.
A list of times (float64) at which the coeffients must be given (tlist).
The coeffients array must have the same len as the tlist.
The time of the tlist do not need to be equidistant, but must be sorted.
By default, a cubic spline interpolation will be used to compute the
coefficient at time t. The keyword ``order`` set the order of the
interpolation. When ``order = 0`` it interpolate as a step function to the
previous or last value.
*Examples*
tlist = np.logspace(-5,0,100)
H = QobjEvo(np.exp(-1j*tlist), tlist=tlist)
``scipy.interpolate``'s ``CubicSpline``, ``PPoly`` and ``Bspline`` are
also accepted as array ``Coefficient``. Other interpolation method from
scipy are usually accepted as functions based coefficient.
"""
if isinstance(base, Coefficient):
return base
elif isinstance(base, np.ndarray):
return InterCoefficient(base, tlist, order)
elif isinstance(base, scipy.interpolate.PPoly):
return InterCoefficient.from_PPoly(base)
elif isinstance(base, scipy.interpolate.BSpline):
return InterCoefficient.from_Bspline(base)
elif isinstance(base, str):
return coeff_from_str(base, args, args_ctypes, compile_opt)
elif callable(base):
op = FunctionCoefficient(base, args.copy(), style=function_style)
if not isinstance(op(0), numbers.Number):
raise TypeError("The coefficient function must return a number")
return op
else:
raise ValueError("coefficient format not understood")
| def coefficient(base, *, tlist=None, args={}, args_ctypes={},
order=3, compile_opt=None, function_style=None):
"""Coefficient for time dependent systems.
The coefficients are either a function, a string or a numpy array.
For function based coefficients, the function signature must be either:
* ``f(t, ...)`` where the other arguments are supplied as ordinary
"pythonic" arguments (e.g. ``f(t, w, a=5))
* ``f(t, args)`` where the arguments are supplied in a "dict" named
``args``
By default the signature style is controlled by the
``qutip.settings.core["function_coefficient_style"]`` setting, but it
may be overriden here by specifying either ``function_style="pythonic"``
or ``function_style="dict"``.
*Examples*
# pythonic style function signature
def f1_t(t, w):
return np.exp(-1j * t * w)
coeff1 = coefficient(f1_t, args={"w": 1.})
# dict style function signature
def f2_t(t, args):
return np.exp(-1j * t * args["w"])
coeff2 = coefficient(f2_t, args={"w": 1.})
For string based coeffients, the string must be a compilable python code
resulting in a complex. The following symbols are defined:
sin cos tan asin acos atan pi
sinh cosh tanh asinh acosh atanh
exp log log10 erf zerf sqrt
real imag conj abs norm arg proj
numpy as np,
scipy.special as spe (python interface)
and cython_special (cython interface)
[https://docs.scipy.org/doc/scipy/reference/special.cython_special.html].
*Examples*
coeff = coefficient('exp(-1j*w1*t)', args={"w1":1.})
'args' is needed for string coefficient at compilation.
It is a dict of (name:object). The keys must be a valid variables string.
Compilation options can be passed as "compile_opt=CompilationOptions(...)".
For numpy array format, the array must be an 1d of dtype float or complex.
A list of times (float64) at which the coeffients must be given (tlist).
The coeffients array must have the same len as the tlist.
The time of the tlist do not need to be equidistant, but must be sorted.
By default, a cubic spline interpolation will be used to compute the
coefficient at time t. The keyword ``order`` set the order of the
interpolation. When ``order = 0`` it interpolate as a step function to the
previous or last value.
*Examples*
tlist = np.logspace(-5,0,100)
H = QobjEvo(np.exp(-1j*tlist), tlist=tlist)
``scipy.interpolate``'s ``CubicSpline``, ``PPoly`` and ``Bspline`` are
also converted to interpolated coefficients (the same kind of coefficient
created from ``ndarray``). Other interpolation methods from
scipy are converted to function-based coefficient (the same kind of
coefficient created from callables).
"""
if isinstance(base, Coefficient):
return base
elif isinstance(base, np.ndarray):
return InterCoefficient(base, tlist, order)
elif isinstance(base, scipy.interpolate.PPoly):
return InterCoefficient.from_PPoly(base)
elif isinstance(base, scipy.interpolate.BSpline):
return InterCoefficient.from_Bspline(base)
elif isinstance(base, str):
return coeff_from_str(base, args, args_ctypes, compile_opt)
elif callable(base):
op = FunctionCoefficient(base, args.copy(), style=function_style)
if not isinstance(op(0), numbers.Number):
raise TypeError("The coefficient function must return a number")
return op
else:
raise ValueError("coefficient format not understood")
|
4,505 | def _check_pymatreader_installed(strict=True):
"""Aux function."""
return _soft_import('pymatreader', 'pymatreader', strict=strict)
| def _check_pymatreader_installed(strict=True):
"""Aux function."""
return _soft_import('pymatreader', 'loading .MAT files', strict=strict)
|
32,748 | def patch():
if getattr(mako, '__datadog_patch', False):
# already patched
return
setattr(mako, '__datadog_patch', True)
Pin(service="mako", app="mako", app_type=http.TEMPLATE).onto(Template)
_w(mako, 'template.Template.render', _wrap_render)
_w(mako, 'template.Template.render_unicode', _wrap_render)
_w(mako, 'template.Template.render_context', _wrap_render)
| def patch():
if getattr(mako, '__datadog_patch', False):
# already patched
return
setattr(mako, '__datadog_patch', True)
Pin(service='mako', app='mako', app_type=http.TEMPLATE).onto(Template)
_w(mako, 'template.Template.render', _wrap_render)
_w(mako, 'template.Template.render_unicode', _wrap_render)
_w(mako, 'template.Template.render_context', _wrap_render)
|
35,826 | def call_self_private(stmt_expr, context, sig):
# ** Private Call **
# Steps:
# (x) push current local variables
# (x) push arguments
# (x) push jumpdest (callback ptr)
# (x) jump to label
# (x) pop return values
# (x) pop local variables
method_name, expr_args, sig = call_lookup_specs(stmt_expr, context)
pre_init = []
pop_local_vars = []
push_local_vars = []
pop_return_values = []
push_args = []
# Push local variables.
if context.vars:
var_slots = [(v.pos, v.size) for name, v in context.vars.items()]
var_slots.sort(key=lambda x: x[0])
mem_from, mem_to = var_slots[0][0], var_slots[-1][0] + var_slots[-1][1] * 32
i_placeholder = context.new_placeholder(BaseType('uint256'))
local_save_ident = "%d_%d" % (stmt_expr.lineno, stmt_expr.col_offset)
push_loop_label = 'save_locals_start' + local_save_ident
pop_loop_label = 'restore_locals_start' + local_save_ident
push_local_vars = [
['mstore', i_placeholder, mem_from],
['label', push_loop_label],
['mload', ['mload', i_placeholder]],
['mstore', i_placeholder, ['add', ['mload', i_placeholder], 32]],
['if', ['lt', ['mload', i_placeholder], mem_to],
['goto', push_loop_label]]
]
pop_local_vars = [
['mstore', i_placeholder, mem_to - 32],
['label', pop_loop_label],
['mstore', ['mload', i_placeholder], 'pass'],
['mstore', i_placeholder, ['sub', ['mload', i_placeholder], 32]],
['if', ['ge', ['mload', i_placeholder], mem_from],
['goto', pop_loop_label]]
]
# Push Arguments
if expr_args:
inargs, inargsize, arg_pos = pack_arguments(
sig,
expr_args,
context,
return_placeholder=False,
pos=getpos(stmt_expr),
)
push_args += [inargs] # copy arguments first, to not mess up the push/pop sequencing.
static_arg_size = 32 * sum(
[get_static_size_of_type(arg.typ)
for arg in expr_args])
static_pos = arg_pos + static_arg_size
needs_dyn_section = any(
[has_dynamic_data(arg.typ)
for arg in expr_args])
if needs_dyn_section:
ident = 'push_args_%d_%d_%d' % (sig.method_id, stmt_expr.lineno, stmt_expr.col_offset)
start_label = ident + '_start'
end_label = ident + '_end'
i_placeholder = context.new_placeholder(BaseType('uint256'))
# Calculate copy start position.
# Given | static | dynamic | section in memory,
# copy backwards so the values are in order on the stack.
# We calculate i, the end of the whole encoded part
# (i.e. the starting index for copy)
# by taking ceil32(len<arg>) + offset<arg> + arg_pos
# for the last dynamic argument and arg_pos is the start
# the whole argument section.
for idx, arg in enumerate(expr_args):
if isinstance(arg.typ, ByteArrayLike):
last_idx = idx
push_args += [
['with', 'offset', ['mload', arg_pos + last_idx * 32],
['with', 'len_pos', ['add', arg_pos, 'offset'],
['with', 'len_value', ['mload', 'len_pos'],
['mstore', i_placeholder,
['add', 'len_pos', ['ceil32', 'len_value']]]]]]
]
# loop from end of dynamic section to start of dynamic section,
# pushing each element onto the stack.
push_args += [
['label', start_label],
['if', ['lt', ['mload', i_placeholder], static_pos],
['goto', end_label]],
['mload', ['mload', i_placeholder]],
['mstore', i_placeholder, ['sub', ['mload', i_placeholder], 32]], # decrease i
['goto', start_label],
['label', end_label]
]
# push static section
push_args += [
['mload', pos] for pos in reversed(range(arg_pos, static_pos, 32))
]
# Jump to function label.
jump_to_func = [
['add', ['pc'], 6], # set callback pointer.
['goto', 'priv_{}'.format(sig.method_id)],
['jumpdest'],
]
# Pop return values.
returner = [0]
if sig.output_type:
output_placeholder, returner, output_size = call_make_placeholder(stmt_expr, context, sig)
if output_size > 0:
dynamic_offsets = []
if isinstance(sig.output_type, (BaseType, ListType)):
pop_return_values = [
['mstore', ['add', output_placeholder, pos], 'pass']
for pos in range(0, output_size, 32)
]
elif isinstance(sig.output_type, ByteArrayLike):
dynamic_offsets = [(0, sig.output_type)]
pop_return_values = [
['pop', 'pass'],
]
elif isinstance(sig.output_type, TupleLike):
static_offset = 0
pop_return_values = []
for out_type in sig.output_type.members:
if isinstance(out_type, ByteArrayLike):
pop_return_values.append(
['mstore', ['add', output_placeholder, static_offset], 'pass']
)
dynamic_offsets.append(
(['mload', ['add', output_placeholder, static_offset]], out_type)
)
else:
pop_return_values.append(
['mstore', ['add', output_placeholder, static_offset], 'pass']
)
static_offset += 32
# append dynamic unpacker.
dyn_idx = 0
for in_memory_offset, _out_type in dynamic_offsets:
ident = "%d_%d_arg_%d" % (stmt_expr.lineno, stmt_expr.col_offset, dyn_idx)
dyn_idx += 1
start_label = 'dyn_unpack_start_' + ident
end_label = 'dyn_unpack_end_' + ident
i_placeholder = context.new_placeholder(typ=BaseType('uint256'))
begin_pos = ['add', output_placeholder, in_memory_offset]
# loop until length.
o = LLLnode.from_list(
['seq_unchecked',
['mstore', begin_pos, 'pass'], # get len
['mstore', i_placeholder, 0],
['label', start_label],
[ # break
'if',
['ge', ['mload', i_placeholder], ['ceil32', ['mload', begin_pos]]],
['goto', end_label]
],
[ # pop into correct memory slot.
'mstore',
['add', ['add', begin_pos, 32], ['mload', i_placeholder]],
'pass',
],
# increment i
['mstore', i_placeholder, ['add', 32, ['mload', i_placeholder]]],
['goto', start_label],
['label', end_label]],
typ=None, annotation='dynamic unpacker', pos=getpos(stmt_expr))
pop_return_values.append(o)
call_body = list(itertools.chain(
['seq_unchecked'],
pre_init,
push_local_vars,
push_args,
jump_to_func,
pop_return_values,
pop_local_vars,
[returner],
))
# If we have no return, we need to pop off
pop_returner_call_body = ['pop', call_body] if sig.output_type is None else call_body
o = LLLnode.from_list(
pop_returner_call_body,
typ=sig.output_type,
location='memory',
pos=getpos(stmt_expr),
annotation='Internal Call: %s' % method_name,
add_gas_estimate=sig.gas
)
o.gas += sig.gas
return o
| def call_self_private(stmt_expr, context, sig):
# ** Private Call **
# Steps:
# (x) push current local variables
# (x) push arguments
# (x) push jumpdest (callback ptr)
# (x) jump to label
# (x) pop return values
# (x) pop local variables
method_name, expr_args, sig = call_lookup_specs(stmt_expr, context)
pre_init = []
pop_local_vars = []
push_local_vars = []
pop_return_values = []
push_args = []
# Push local variables.
if context.vars:
var_slots = [(v.pos, v.size) for name, v in context.vars.items()]
var_slots.sort(key=lambda x: x[0])
mem_from, mem_to = var_slots[0][0], var_slots[-1][0] + var_slots[-1][1] * 32
i_placeholder = context.new_placeholder(BaseType('uint256'))
local_save_ident = "_%d_%d" % (stmt_expr.lineno, stmt_expr.col_offset)
push_loop_label = 'save_locals_start' + local_save_ident
pop_loop_label = 'restore_locals_start' + local_save_ident
push_local_vars = [
['mstore', i_placeholder, mem_from],
['label', push_loop_label],
['mload', ['mload', i_placeholder]],
['mstore', i_placeholder, ['add', ['mload', i_placeholder], 32]],
['if', ['lt', ['mload', i_placeholder], mem_to],
['goto', push_loop_label]]
]
pop_local_vars = [
['mstore', i_placeholder, mem_to - 32],
['label', pop_loop_label],
['mstore', ['mload', i_placeholder], 'pass'],
['mstore', i_placeholder, ['sub', ['mload', i_placeholder], 32]],
['if', ['ge', ['mload', i_placeholder], mem_from],
['goto', pop_loop_label]]
]
# Push Arguments
if expr_args:
inargs, inargsize, arg_pos = pack_arguments(
sig,
expr_args,
context,
return_placeholder=False,
pos=getpos(stmt_expr),
)
push_args += [inargs] # copy arguments first, to not mess up the push/pop sequencing.
static_arg_size = 32 * sum(
[get_static_size_of_type(arg.typ)
for arg in expr_args])
static_pos = arg_pos + static_arg_size
needs_dyn_section = any(
[has_dynamic_data(arg.typ)
for arg in expr_args])
if needs_dyn_section:
ident = 'push_args_%d_%d_%d' % (sig.method_id, stmt_expr.lineno, stmt_expr.col_offset)
start_label = ident + '_start'
end_label = ident + '_end'
i_placeholder = context.new_placeholder(BaseType('uint256'))
# Calculate copy start position.
# Given | static | dynamic | section in memory,
# copy backwards so the values are in order on the stack.
# We calculate i, the end of the whole encoded part
# (i.e. the starting index for copy)
# by taking ceil32(len<arg>) + offset<arg> + arg_pos
# for the last dynamic argument and arg_pos is the start
# the whole argument section.
for idx, arg in enumerate(expr_args):
if isinstance(arg.typ, ByteArrayLike):
last_idx = idx
push_args += [
['with', 'offset', ['mload', arg_pos + last_idx * 32],
['with', 'len_pos', ['add', arg_pos, 'offset'],
['with', 'len_value', ['mload', 'len_pos'],
['mstore', i_placeholder,
['add', 'len_pos', ['ceil32', 'len_value']]]]]]
]
# loop from end of dynamic section to start of dynamic section,
# pushing each element onto the stack.
push_args += [
['label', start_label],
['if', ['lt', ['mload', i_placeholder], static_pos],
['goto', end_label]],
['mload', ['mload', i_placeholder]],
['mstore', i_placeholder, ['sub', ['mload', i_placeholder], 32]], # decrease i
['goto', start_label],
['label', end_label]
]
# push static section
push_args += [
['mload', pos] for pos in reversed(range(arg_pos, static_pos, 32))
]
# Jump to function label.
jump_to_func = [
['add', ['pc'], 6], # set callback pointer.
['goto', 'priv_{}'.format(sig.method_id)],
['jumpdest'],
]
# Pop return values.
returner = [0]
if sig.output_type:
output_placeholder, returner, output_size = call_make_placeholder(stmt_expr, context, sig)
if output_size > 0:
dynamic_offsets = []
if isinstance(sig.output_type, (BaseType, ListType)):
pop_return_values = [
['mstore', ['add', output_placeholder, pos], 'pass']
for pos in range(0, output_size, 32)
]
elif isinstance(sig.output_type, ByteArrayLike):
dynamic_offsets = [(0, sig.output_type)]
pop_return_values = [
['pop', 'pass'],
]
elif isinstance(sig.output_type, TupleLike):
static_offset = 0
pop_return_values = []
for out_type in sig.output_type.members:
if isinstance(out_type, ByteArrayLike):
pop_return_values.append(
['mstore', ['add', output_placeholder, static_offset], 'pass']
)
dynamic_offsets.append(
(['mload', ['add', output_placeholder, static_offset]], out_type)
)
else:
pop_return_values.append(
['mstore', ['add', output_placeholder, static_offset], 'pass']
)
static_offset += 32
# append dynamic unpacker.
dyn_idx = 0
for in_memory_offset, _out_type in dynamic_offsets:
ident = "%d_%d_arg_%d" % (stmt_expr.lineno, stmt_expr.col_offset, dyn_idx)
dyn_idx += 1
start_label = 'dyn_unpack_start_' + ident
end_label = 'dyn_unpack_end_' + ident
i_placeholder = context.new_placeholder(typ=BaseType('uint256'))
begin_pos = ['add', output_placeholder, in_memory_offset]
# loop until length.
o = LLLnode.from_list(
['seq_unchecked',
['mstore', begin_pos, 'pass'], # get len
['mstore', i_placeholder, 0],
['label', start_label],
[ # break
'if',
['ge', ['mload', i_placeholder], ['ceil32', ['mload', begin_pos]]],
['goto', end_label]
],
[ # pop into correct memory slot.
'mstore',
['add', ['add', begin_pos, 32], ['mload', i_placeholder]],
'pass',
],
# increment i
['mstore', i_placeholder, ['add', 32, ['mload', i_placeholder]]],
['goto', start_label],
['label', end_label]],
typ=None, annotation='dynamic unpacker', pos=getpos(stmt_expr))
pop_return_values.append(o)
call_body = list(itertools.chain(
['seq_unchecked'],
pre_init,
push_local_vars,
push_args,
jump_to_func,
pop_return_values,
pop_local_vars,
[returner],
))
# If we have no return, we need to pop off
pop_returner_call_body = ['pop', call_body] if sig.output_type is None else call_body
o = LLLnode.from_list(
pop_returner_call_body,
typ=sig.output_type,
location='memory',
pos=getpos(stmt_expr),
annotation='Internal Call: %s' % method_name,
add_gas_estimate=sig.gas
)
o.gas += sig.gas
return o
|
4,622 | def get_clusters_table(stat_img, stat_threshold, cluster_threshold=None,
two_sided=False, min_distance=8.):
"""Creates pandas dataframe with img cluster statistics.
This function should work on any statistical maps where more extreme values
indicate greater statistical significance.
For example, z-statistic or -log10(p) maps are valid inputs, but a p-value
map is not.
.. important::
For binary clusters (clusters comprised of only one value),
the table reports the center of mass of the cluster,
rather than any peaks/subpeaks.
This center of mass may, in some cases, appear outside of the cluster.
Parameters
----------
stat_img : Niimg-like object
Statistical image to threshold and summarize.
stat_threshold : :obj:`float`
Cluster forming threshold. This value must be in the same scale as
``stat_img``.
cluster_threshold : :obj:`int` or None, optional
Cluster size threshold, in voxels. If None, then no cluster size
threshold will be applied. Default=None.
two_sided : :obj:`bool`, optional
Whether to employ two-sided thresholding or to evaluate positive values
only. Default=False.
min_distance : :obj:`float`, optional
Minimum distance between subpeaks, in millimeters. Default=8.
Returns
-------
df : `pandas.DataFrame`
Table with peaks and subpeaks from thresholded ``stat_img``.
The columns in this table include:
================== ====================================================
Cluster ID The cluster number. Subpeaks have letters after the
number.
X/Y/Z The coordinate for the peak, in millimeters.
Peak Stat The statistical value associated with the peak.
The statistic type is dependent on the type of the
statistical image.
Cluster Size (mm3) The size of the cluster, in millimeters cubed.
Rows corresponding to subpeaks will not have a value
in this column.
================== ====================================================
"""
cols = ['Cluster ID', 'X', 'Y', 'Z', 'Peak Stat', 'Cluster Size (mm3)']
# Replace None with 0
cluster_threshold = 0 if cluster_threshold is None else cluster_threshold
# check that stat_img is niimg-like object and 3D
stat_img = check_niimg_3d(stat_img)
# Apply threshold(s) to image
stat_img = threshold_img(
img=stat_img,
threshold=stat_threshold,
cluster_threshold=cluster_threshold,
two_sided=two_sided,
mask_img=None,
copy=True,
)
# If cluster threshold is used, there is chance that stat_map will be
# modified, therefore copy is needed
stat_map = _safe_get_data(stat_img, ensure_finite=True,
copy_data=(cluster_threshold is not None))
# Define array for 6-connectivity, aka NN1 or "faces"
conn_mat = ndimage.generate_binary_structure(rank=3, connectivity=2)
voxel_size = np.prod(stat_img.header.get_zooms())
signs = [1, -1] if two_sided else [1]
no_clusters_found = True
rows = []
for sign in signs:
# Flip map if necessary
temp_stat_map = stat_map * sign
# Binarize using cluster-defining threshold
binarized = temp_stat_map > stat_threshold
binarized = binarized.astype(int)
# If the stat threshold is too high simply return an empty dataframe
if np.sum(binarized) == 0:
warnings.warn(
'Attention: No clusters with stat {0} than {1}'.format(
'higher' if sign == 1 else 'lower',
stat_threshold * sign,
)
)
continue
# Now re-label and create table
label_map = ndimage.measurements.label(binarized, conn_mat)[0]
clust_ids = sorted(list(np.unique(label_map)[1:]))
peak_vals = np.array(
[np.max(temp_stat_map * (label_map == c)) for c in clust_ids])
# Sort by descending max value
clust_ids = [clust_ids[c] for c in (-peak_vals).argsort()]
for c_id, c_val in enumerate(clust_ids):
cluster_mask = label_map == c_val
masked_data = temp_stat_map * cluster_mask
cluster_size_mm = int(np.sum(cluster_mask) * voxel_size)
# Get peaks, subpeaks and associated statistics
subpeak_ijk, subpeak_vals = _local_max(
masked_data,
stat_img.affine,
min_distance=min_distance,
)
subpeak_vals *= sign # flip signs if necessary
subpeak_xyz = np.asarray(
coord_transform(
subpeak_ijk[:, 0],
subpeak_ijk[:, 1],
subpeak_ijk[:, 2],
stat_img.affine,
)
).tolist()
subpeak_xyz = np.array(subpeak_xyz).T
# Only report peak and, at most, top 3 subpeaks.
n_subpeaks = np.min((len(subpeak_vals), 4))
for subpeak in range(n_subpeaks):
if subpeak == 0:
row = [
c_id + 1,
subpeak_xyz[subpeak, 0],
subpeak_xyz[subpeak, 1],
subpeak_xyz[subpeak, 2],
subpeak_vals[subpeak],
cluster_size_mm,
]
else:
# Subpeak naming convention is cluster num+letter:
# 1a, 1b, etc
sp_id = '{0}{1}'.format(
c_id + 1,
ascii_lowercase[subpeak - 1],
)
row = [
sp_id,
subpeak_xyz[subpeak, 0],
subpeak_xyz[subpeak, 1],
subpeak_xyz[subpeak, 2],
subpeak_vals[subpeak],
'',
]
rows += [row]
# If we reach this point, there are clusters in this sign
no_clusters_found = False
if no_clusters_found:
df = pd.DataFrame(columns=cols)
else:
df = pd.DataFrame(columns=cols, data=rows)
return df
| def get_clusters_table(stat_img, stat_threshold, cluster_threshold=None,
two_sided=False, min_distance=8.):
"""Creates pandas dataframe with img cluster statistics.
This function should work on any statistical maps where more extreme values
indicate greater statistical significance.
For example, z-statistic or -log10(p) maps are valid inputs, but a p-value
map is not.
.. important::
For binary clusters (clusters comprised of only one value),
the table reports the center of mass of the cluster,
rather than any peaks/subpeaks.
This center of mass may, in some cases, appear outside of the cluster.
Parameters
----------
stat_img : Niimg-like object
Statistical image to threshold and summarize.
stat_threshold : :obj:`float`
Cluster forming threshold. This value must be in the same scale as
``stat_img``.
cluster_threshold : :obj:`int` or None, optional
Cluster size threshold, in :term:`voxels<voxel>`. If None, then no cluster size
threshold will be applied. Default=None.
two_sided : :obj:`bool`, optional
Whether to employ two-sided thresholding or to evaluate positive values
only. Default=False.
min_distance : :obj:`float`, optional
Minimum distance between subpeaks, in millimeters. Default=8.
Returns
-------
df : `pandas.DataFrame`
Table with peaks and subpeaks from thresholded ``stat_img``.
The columns in this table include:
================== ====================================================
Cluster ID The cluster number. Subpeaks have letters after the
number.
X/Y/Z The coordinate for the peak, in millimeters.
Peak Stat The statistical value associated with the peak.
The statistic type is dependent on the type of the
statistical image.
Cluster Size (mm3) The size of the cluster, in millimeters cubed.
Rows corresponding to subpeaks will not have a value
in this column.
================== ====================================================
"""
cols = ['Cluster ID', 'X', 'Y', 'Z', 'Peak Stat', 'Cluster Size (mm3)']
# Replace None with 0
cluster_threshold = 0 if cluster_threshold is None else cluster_threshold
# check that stat_img is niimg-like object and 3D
stat_img = check_niimg_3d(stat_img)
# Apply threshold(s) to image
stat_img = threshold_img(
img=stat_img,
threshold=stat_threshold,
cluster_threshold=cluster_threshold,
two_sided=two_sided,
mask_img=None,
copy=True,
)
# If cluster threshold is used, there is chance that stat_map will be
# modified, therefore copy is needed
stat_map = _safe_get_data(stat_img, ensure_finite=True,
copy_data=(cluster_threshold is not None))
# Define array for 6-connectivity, aka NN1 or "faces"
conn_mat = ndimage.generate_binary_structure(rank=3, connectivity=2)
voxel_size = np.prod(stat_img.header.get_zooms())
signs = [1, -1] if two_sided else [1]
no_clusters_found = True
rows = []
for sign in signs:
# Flip map if necessary
temp_stat_map = stat_map * sign
# Binarize using cluster-defining threshold
binarized = temp_stat_map > stat_threshold
binarized = binarized.astype(int)
# If the stat threshold is too high simply return an empty dataframe
if np.sum(binarized) == 0:
warnings.warn(
'Attention: No clusters with stat {0} than {1}'.format(
'higher' if sign == 1 else 'lower',
stat_threshold * sign,
)
)
continue
# Now re-label and create table
label_map = ndimage.measurements.label(binarized, conn_mat)[0]
clust_ids = sorted(list(np.unique(label_map)[1:]))
peak_vals = np.array(
[np.max(temp_stat_map * (label_map == c)) for c in clust_ids])
# Sort by descending max value
clust_ids = [clust_ids[c] for c in (-peak_vals).argsort()]
for c_id, c_val in enumerate(clust_ids):
cluster_mask = label_map == c_val
masked_data = temp_stat_map * cluster_mask
cluster_size_mm = int(np.sum(cluster_mask) * voxel_size)
# Get peaks, subpeaks and associated statistics
subpeak_ijk, subpeak_vals = _local_max(
masked_data,
stat_img.affine,
min_distance=min_distance,
)
subpeak_vals *= sign # flip signs if necessary
subpeak_xyz = np.asarray(
coord_transform(
subpeak_ijk[:, 0],
subpeak_ijk[:, 1],
subpeak_ijk[:, 2],
stat_img.affine,
)
).tolist()
subpeak_xyz = np.array(subpeak_xyz).T
# Only report peak and, at most, top 3 subpeaks.
n_subpeaks = np.min((len(subpeak_vals), 4))
for subpeak in range(n_subpeaks):
if subpeak == 0:
row = [
c_id + 1,
subpeak_xyz[subpeak, 0],
subpeak_xyz[subpeak, 1],
subpeak_xyz[subpeak, 2],
subpeak_vals[subpeak],
cluster_size_mm,
]
else:
# Subpeak naming convention is cluster num+letter:
# 1a, 1b, etc
sp_id = '{0}{1}'.format(
c_id + 1,
ascii_lowercase[subpeak - 1],
)
row = [
sp_id,
subpeak_xyz[subpeak, 0],
subpeak_xyz[subpeak, 1],
subpeak_xyz[subpeak, 2],
subpeak_vals[subpeak],
'',
]
rows += [row]
# If we reach this point, there are clusters in this sign
no_clusters_found = False
if no_clusters_found:
df = pd.DataFrame(columns=cols)
else:
df = pd.DataFrame(columns=cols, data=rows)
return df
|
11,482 | def generate_ci(template_path: Path, folder_path: Path, package_name: str) -> None:
ci = Path(folder_path, "ci.yml")
ci_template_path = template_path / 'ci.yml'
service_name = folder_path.name
name = package_name.split('-')[-1]
if not ci.exists():
with open(ci_template_path, "r") as file_in:
content = file_in.readlines()
content = [line.replace("ServiceName", service_name).replace('PackageName', name) for line in content]
else:
with open(str(ci), "r") as file_in:
content = file_in.readlines()
for line in content:
if f'{package_name}\n' in line:
return
content.append(f' - name: {package_name}\n')
content.append(f' safeName: {package_name.replace("-", "")}\n')
with open(str(ci), "w") as file_out:
file_out.writelines(content)
| def generate_ci(template_path: Path, folder_path: Path, package_name: str) -> None:
ci = Path(folder_path, "ci.yml")
ci_template_path = template_path / 'ci.yml'
service_name = folder_path.name
name = package_name.split('-')[-1]
if not ci.exists():
with open(ci_template_path, "r") as file_in:
content = file_in.readlines()
content = [line.replace("ServiceName", service_name).replace('PackageName', name) for line in content]
else:
with open(ci, "r") as file_in:
content = file_in.readlines()
for line in content:
if f'{package_name}\n' in line:
return
content.append(f' - name: {package_name}\n')
content.append(f' safeName: {package_name.replace("-", "")}\n')
with open(str(ci), "w") as file_out:
file_out.writelines(content)
|
37,808 | def deprecated_selectors(name: str, selector: str, *, error: bool = False) -> None:
if "p27" in selector or "p35" in selector:
msg = f"cibuildwheel 2.x no longer supports Python < 3.6. Please use the 1.x series or update {name}"
print(msg, file=sys.stderr)
if error:
sys.exit(4)
| def deprecated_selectors(name: str, selector: str, *, error: bool = False) -> None:
if "p2" in selector or "p35" in selector:
msg = f"cibuildwheel 2.x no longer supports Python < 3.6. Please use the 1.x series or update {name}"
print(msg, file=sys.stderr)
if error:
sys.exit(4)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.