id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
8,510 |
def status_show(context, data_dict):
'''Return a dictionary with information about the site's configuration.
:rtype: dictionary
'''
plugins = config.get('ckan.plugins')
extensions = "" if plugins == None else plugins.split()
return {
'site_title': config.get('ckan.site_title'),
'site_description': config.get('ckan.site_description'),
'site_url': config.get('ckan.site_url'),
'ckan_version': ckan.__version__,
'error_emails_to': config.get('email_to'),
'locale_default': config.get('ckan.locale_default'),
'extensions': extensions,
}
|
def status_show(context, data_dict):
'''Return a dictionary with information about the site's configuration.
:rtype: dictionary
'''
plugins = config.get('ckan.plugins')
extensions = plugins.split() if plugins else []
return {
'site_title': config.get('ckan.site_title'),
'site_description': config.get('ckan.site_description'),
'site_url': config.get('ckan.site_url'),
'ckan_version': ckan.__version__,
'error_emails_to': config.get('email_to'),
'locale_default': config.get('ckan.locale_default'),
'extensions': extensions,
}
|
50,162 |
def get_mypy_type_of_runtime_value(runtime: Any) -> Optional[mypy.types.Type]:
"""Returns a mypy type object representing the type of ``runtime``.
Returns None if we can't find something that works.
"""
if runtime is None:
return mypy.types.NoneType()
if isinstance(runtime, property):
# Give up on properties to avoid issues with things that are typed as attributes.
return None
def anytype() -> mypy.types.AnyType:
return mypy.types.AnyType(mypy.types.TypeOfAny.unannotated)
if isinstance(
runtime,
(types.FunctionType, types.BuiltinFunctionType, types.MethodType, types.BuiltinMethodType),
):
builtins = get_stub("builtins")
assert builtins is not None
type_info = builtins.names["function"].node
assert isinstance(type_info, nodes.TypeInfo)
fallback = mypy.types.Instance(type_info, [anytype()])
signature = safe_inspect_signature(runtime)
if signature:
arg_types = []
arg_kinds = []
arg_names = []
for arg in signature.parameters.values():
arg_types.append(anytype())
arg_names.append(
None if arg.kind == inspect.Parameter.POSITIONAL_ONLY else arg.name
)
has_default = arg.default == inspect.Parameter.empty
if arg.kind == inspect.Parameter.POSITIONAL_ONLY:
arg_kinds.append(nodes.ARG_POS if has_default else nodes.ARG_OPT)
elif arg.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD:
arg_kinds.append(nodes.ARG_POS if has_default else nodes.ARG_OPT)
elif arg.kind == inspect.Parameter.KEYWORD_ONLY:
arg_kinds.append(nodes.ARG_NAMED if has_default else nodes.ARG_NAMED_OPT)
elif arg.kind == inspect.Parameter.VAR_POSITIONAL:
arg_kinds.append(nodes.ARG_STAR)
elif arg.kind == inspect.Parameter.VAR_KEYWORD:
arg_kinds.append(nodes.ARG_STAR2)
else:
raise AssertionError
else:
arg_types = [anytype(), anytype()]
arg_kinds = [nodes.ARG_STAR, nodes.ARG_STAR2]
arg_names = [None, None]
return mypy.types.CallableType(
arg_types,
arg_kinds,
arg_names,
ret_type=anytype(),
fallback=fallback,
is_ellipsis_args=True,
)
# Try and look up a stub for the runtime object
if isinstance(runtime, type):
runtime_type = runtime # This might be a class
else:
runtime_type = type(runtime) # Or an instance
stub = get_stub(runtime_type.__module__)
if stub is None:
return None
type_name = runtime_type.__name__
if type_name not in stub.names:
return None
type_info = stub.names[type_name].node
if isinstance(type_info, nodes.Var):
return type_info.type
if not isinstance(type_info, nodes.TypeInfo):
return None
if isinstance(runtime, tuple):
# Special case tuples so we construct a valid mypy.types.TupleType
optional_items = [get_mypy_type_of_runtime_value(v) for v in runtime]
items = [(i if i is not None else anytype()) for i in optional_items]
fallback = mypy.types.Instance(type_info, [anytype()])
return mypy.types.TupleType(items, fallback)
fallback = mypy.types.Instance(type_info, [anytype() for _ in type_info.type_vars])
value: Union[bool, int, str]
if isinstance(runtime, bytes):
value = bytes_to_human_readable_repr(runtime)
elif isinstance(runtime, enum.Enum):
value = runtime.name
elif isinstance(runtime, (bool, int, str)):
value = runtime
else:
if isinstance(runtime, type):
return mypy.types.TypeType(fallback)
return fallback
return mypy.types.LiteralType(value=value, fallback=fallback)
|
def get_mypy_type_of_runtime_value(runtime: Any) -> Optional[mypy.types.Type]:
"""Returns a mypy type object representing the type of ``runtime``.
Returns None if we can't find something that works.
"""
if runtime is None:
return mypy.types.NoneType()
if isinstance(runtime, property):
# Give up on properties to avoid issues with things that are typed as attributes.
return None
def anytype() -> mypy.types.AnyType:
return mypy.types.AnyType(mypy.types.TypeOfAny.unannotated)
if isinstance(
runtime,
(types.FunctionType, types.BuiltinFunctionType, types.MethodType, types.BuiltinMethodType),
):
builtins = get_stub("builtins")
assert builtins is not None
type_info = builtins.names["function"].node
assert isinstance(type_info, nodes.TypeInfo)
fallback = mypy.types.Instance(type_info, [anytype()])
signature = safe_inspect_signature(runtime)
if signature:
arg_types = []
arg_kinds = []
arg_names = []
for arg in signature.parameters.values():
arg_types.append(anytype())
arg_names.append(
None if arg.kind == inspect.Parameter.POSITIONAL_ONLY else arg.name
)
has_default = arg.default == inspect.Parameter.empty
if arg.kind == inspect.Parameter.POSITIONAL_ONLY:
arg_kinds.append(nodes.ARG_POS if has_default else nodes.ARG_OPT)
elif arg.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD:
arg_kinds.append(nodes.ARG_POS if has_default else nodes.ARG_OPT)
elif arg.kind == inspect.Parameter.KEYWORD_ONLY:
arg_kinds.append(nodes.ARG_NAMED if has_default else nodes.ARG_NAMED_OPT)
elif arg.kind == inspect.Parameter.VAR_POSITIONAL:
arg_kinds.append(nodes.ARG_STAR)
elif arg.kind == inspect.Parameter.VAR_KEYWORD:
arg_kinds.append(nodes.ARG_STAR2)
else:
raise AssertionError
else:
arg_types = [anytype(), anytype()]
arg_kinds = [nodes.ARG_STAR, nodes.ARG_STAR2]
arg_names = [None, None]
return mypy.types.CallableType(
arg_types,
arg_kinds,
arg_names,
ret_type=anytype(),
fallback=fallback,
is_ellipsis_args=True,
)
# Try and look up a stub for the runtime object
if isinstance(runtime, type):
runtime_type = runtime # This might be a class
else:
runtime_type = type(runtime) # Or an instance
stub = get_stub(runtime_type.__module__)
if stub is None:
return None
type_name = runtime_type.__name__
if type_name not in stub.names:
return None
type_info = stub.names[type_name].node
if isinstance(type_info, nodes.Var):
return type_info.type
if not isinstance(type_info, nodes.TypeInfo):
return None
if isinstance(runtime, tuple):
# Special case tuples so we construct a valid mypy.types.TupleType
optional_items = [get_mypy_type_of_runtime_value(v) for v in runtime]
items = [(i if i is not None else anytype()) for i in optional_items]
fallback = mypy.types.Instance(type_info, [anytype()])
return mypy.types.TupleType(items, fallback)
fallback = mypy.types.Instance(type_info, [anytype() for _ in type_info.type_vars])
value: Union[bool, int, str]
if isinstance(runtime, bytes):
value = bytes_to_human_readable_repr(runtime)
elif isinstance(runtime, enum.Enum):
value = runtime.name
elif isinstance(runtime, (bool, int, str)):
value = runtime
elif isinstance(runtime, type):
return mypy.types.TypeType(fallback)
else:
return fallback
return mypy.types.LiteralType(value=value, fallback=fallback)
|
14,112 |
def test_from_wkb__hex():
geometry_hex = ["0101000000CDCCCCCCCCCC1440CDCCCCCCCC0C4A40"]
res = from_wkb(geometry_hex)
assert isinstance(res, GeometryArray)
# array
res = from_wkb(np.array(geometry_hex, dtype=object))
assert isinstance(res, GeometryArray)
|
def test_from_wkb_hex():
geometry_hex = ["0101000000CDCCCCCCCCCC1440CDCCCCCCCC0C4A40"]
res = from_wkb(geometry_hex)
assert isinstance(res, GeometryArray)
# array
res = from_wkb(np.array(geometry_hex, dtype=object))
assert isinstance(res, GeometryArray)
|
8,639 |
def list_aliases(options):
"""
Show aliases that exist.
"""
data = _get_alias_details(options['node-directory'])
if options['json']:
output = _escape_format(json.dumps(data, indent=4).decode("utf-8"))
else:
def dircap(details):
return (
details['readonly']
if options['readonly-uri']
else details['readwrite']
).decode("utf-8")
max_width = max([len(quote_output(name)) for name in data.keys()] + [0])
fmt = "%" + str(max_width) + "s: %s"
output = "\n".join(list(
fmt % (name, dircap(details))
for name, details
in data.items()
))
if output:
# Show whatever we computed. Skip this if there is no output to avoid
# a spurious blank line.
show_output(options.stdout, output)
return 0
|
def list_aliases(options):
"""
Show aliases that exist.
"""
data = _get_alias_details(options['node-directory'])
if options['json']:
output = _escape_format(json.dumps(data, indent=4, ensure_ascii=False))
else:
def dircap(details):
return (
details['readonly']
if options['readonly-uri']
else details['readwrite']
).decode("utf-8")
max_width = max([len(quote_output(name)) for name in data.keys()] + [0])
fmt = "%" + str(max_width) + "s: %s"
output = "\n".join(list(
fmt % (name, dircap(details))
for name, details
in data.items()
))
if output:
# Show whatever we computed. Skip this if there is no output to avoid
# a spurious blank line.
show_output(options.stdout, output)
return 0
|
385 |
def sample_smc(
draws=2000,
kernel=IMH,
*,
start=None,
model=None,
random_seed=None,
chains=None,
cores=None,
compute_convergence_checks=True,
return_inferencedata=True,
idata_kwargs=None,
progressbar=True,
**kernel_kwargs,
):
r"""
Sequential Monte Carlo based sampling.
Parameters
----------
draws: int
The number of samples to draw from the posterior (i.e. last stage). And also the number of
independent chains. Defaults to 2000.
kernel: SMC Kernel used. Defaults to pm.smc.IMH (Independent Metropolis Hastings)
start: dict, or array of dict
Starting point in parameter space. It should be a list of dict with length `chains`.
When None (default) the starting point is sampled from the prior distribution.
model: Model (optional if in ``with`` context)).
random_seed: int
random seed
chains : int
The number of chains to sample. Running independent chains is important for some
convergence statistics. If ``None`` (default), then set to either ``cores`` or 2, whichever
is larger.
cores : int
The number of chains to run in parallel. If ``None``, set to the number of CPUs in the
system.
compute_convergence_checks : bool
Whether to compute sampler statistics like ``R hat`` and ``effective_n``.
Defaults to ``True``.
return_inferencedata : bool, default=True
Whether to return the trace as an :class:`arviz:arviz.InferenceData` (True) object or a `MultiTrace` (False)
Defaults to ``True``.
idata_kwargs : dict, optional
Keyword arguments for :func:`pymc.to_inference_data`
progressbar : bool, optional default=True
Whether or not to display a progress bar in the command line.
**kernel_kwargs: keyword arguments passed to the SMC kernel.
The default IMH kernel takes the following keywords:
threshold: float
Determines the change of beta from stage to stage, i.e. indirectly the number of stages,
the higher the value of `threshold` the higher the number of stages. Defaults to 0.5.
It should be between 0 and 1.
correlation_threshold: float
The lower the value the higher the number of MCMC steps computed automatically.
Defaults to 0.01. It should be between 0 and 1.
Keyword arguments for other kernels should be checked in the respective docstrings
Notes
-----
SMC works by moving through successive stages. At each stage the inverse temperature
:math:`\beta` is increased a little bit (starting from 0 up to 1). When :math:`\beta` = 0
we have the prior distribution and when :math:`\beta` =1 we have the posterior distribution.
So in more general terms we are always computing samples from a tempered posterior that we can
write as:
.. math::
p(\theta \mid y)_{\beta} = p(y \mid \theta)^{\beta} p(\theta)
A summary of the algorithm is:
1. Initialize :math:`\beta` at zero and stage at zero.
2. Generate N samples :math:`S_{\beta}` from the prior (because when :math `\beta = 0` the
tempered posterior is the prior).
3. Increase :math:`\beta` in order to make the effective sample size equals some predefined
value (we use :math:`Nt`, where :math:`t` is 0.5 by default).
4. Compute a set of N importance weights W. The weights are computed as the ratio of the
likelihoods of a sample at stage i+1 and stage i.
5. Obtain :math:`S_{w}` by re-sampling according to W.
6. Use W to compute the mean and covariance for the proposal distribution, a MVNormal.
7. Run N independent MCMC chains, starting each one from a different sample
in :math:`S_{w}`. For the IMH kernel, the mean of the proposal distribution is the
mean of the previous posterior stage and not the current point in parameter space.
8. The N chains are run until the autocorrelation with the samples from the previous
stops decreasing given a certain threshold.
9. Repeat from step 3 until :math:`\beta \ge 1`.
10. The final result is a collection of N samples from the posterior.
References
----------
.. [Minson2013] Minson, S. E. and Simons, M. and Beck, J. L., (2013),
Bayesian inversion for finite fault earthquake source models I- Theory and algorithm.
Geophysical Journal International, 2013, 194(3), pp.1701-1726,
`link <https://gji.oxfordjournals.org/content/194/3/1701.full>`__
.. [Ching2007] Ching, J. and Chen, Y. (2007).
Transitional Markov Chain Monte Carlo Method for Bayesian Model Updating, Model Class
Selection, and Model Averaging. J. Eng. Mech., 10.1061/(ASCE)0733-9399(2007)133:7(816),
816-832. `link <http://ascelibrary.org/doi/abs/10.1061/%28ASCE%290733-9399
%282007%29133:7%28816%29>`__
"""
if isinstance(kernel, str) and kernel.lower() in ("abc", "metropolis"):
warnings.warn(
f'The kernel string argument "{kernel}" in sample_smc has been deprecated. '
f"It is no longer needed to distinguish between `abc` and `metropolis`",
FutureWarning,
stacklevel=2,
)
kernel = IMH
if kernel_kwargs.pop("save_sim_data", None) is not None:
warnings.warn(
"save_sim_data has been deprecated. Use pm.sample_posterior_predictive "
"to obtain the same type of samples.",
FutureWarning,
stacklevel=2,
)
if kernel_kwargs.pop("save_log_pseudolikelihood", None) is not None:
warnings.warn(
"save_log_pseudolikelihood has been deprecated. This information is "
"now saved as log_likelihood in models with Simulator distributions.",
FutureWarning,
stacklevel=2,
)
parallel = kernel_kwargs.pop("parallel", None)
if parallel is not None:
warnings.warn(
"The argument parallel is deprecated, use the argument cores instead.",
FutureWarning,
stacklevel=2,
)
if parallel is False:
cores = 1
if cores is None:
cores = _cpu_count()
if chains is None:
chains = max(2, cores)
else:
cores = min(chains, cores)
if random_seed == -1:
raise FutureWarning(
f"random_seed should be a non-negative integer or None, got: {random_seed}"
"This will raise a ValueError in the Future"
)
random_seed = None
if isinstance(random_seed, int) or random_seed is None:
rng = np.random.default_rng(seed=random_seed)
random_seed = list(rng.integers(2**30, size=chains))
elif isinstance(random_seed, Iterable):
if len(random_seed) != chains:
raise ValueError(f"Length of seeds ({len(seeds)}) must match number of chains {chains}")
else:
raise TypeError("Invalid value for `random_seed`. Must be tuple, list, int or None")
model = modelcontext(model)
_log = logging.getLogger("pymc")
_log.info("Initializing SMC sampler...")
_log.info(
f"Sampling {chains} chain{'s' if chains > 1 else ''} "
f"in {cores} job{'s' if cores > 1 else ''}"
)
params = (
draws,
kernel,
start,
model,
)
t1 = time.time()
if cores > 1:
results = run_chains_parallel(
chains, progressbar, _sample_smc_int, params, random_seed, kernel_kwargs, cores
)
else:
results = run_chains_sequential(
chains, progressbar, _sample_smc_int, params, random_seed, kernel_kwargs
)
(
traces,
sample_stats,
sample_settings,
) = zip(*results)
trace = MultiTrace(traces)
_t_sampling = time.time() - t1
sample_stats, idata = _save_sample_stats(
sample_settings,
sample_stats,
chains,
trace,
return_inferencedata,
_t_sampling,
idata_kwargs,
model,
)
if compute_convergence_checks:
_compute_convergence_checks(idata, draws, model, trace)
return idata if return_inferencedata else trace
|
def sample_smc(
draws=2000,
kernel=IMH,
*,
start=None,
model=None,
random_seed=None,
chains=None,
cores=None,
compute_convergence_checks=True,
return_inferencedata=True,
idata_kwargs=None,
progressbar=True,
**kernel_kwargs,
):
r"""
Sequential Monte Carlo based sampling.
Parameters
----------
draws: int
The number of samples to draw from the posterior (i.e. last stage). And also the number of
independent chains. Defaults to 2000.
kernel: SMC Kernel used. Defaults to pm.smc.IMH (Independent Metropolis Hastings)
start: dict, or array of dict
Starting point in parameter space. It should be a list of dict with length `chains`.
When None (default) the starting point is sampled from the prior distribution.
model: Model (optional if in ``with`` context)).
random_seed: int
random seed
chains : int
The number of chains to sample. Running independent chains is important for some
convergence statistics. If ``None`` (default), then set to either ``cores`` or 2, whichever
is larger.
cores : int
The number of chains to run in parallel. If ``None``, set to the number of CPUs in the
system.
compute_convergence_checks : bool
Whether to compute sampler statistics like ``R hat`` and ``effective_n``.
Defaults to ``True``.
return_inferencedata : bool, default=True
Whether to return the trace as an :class:`arviz:arviz.InferenceData` (True) object or a `MultiTrace` (False)
Defaults to ``True``.
idata_kwargs : dict, optional
Keyword arguments for :func:`pymc.to_inference_data`
progressbar : bool, optional default=True
Whether or not to display a progress bar in the command line.
**kernel_kwargs: keyword arguments passed to the SMC kernel.
The default IMH kernel takes the following keywords:
threshold: float
Determines the change of beta from stage to stage, i.e. indirectly the number of stages,
the higher the value of `threshold` the higher the number of stages. Defaults to 0.5.
It should be between 0 and 1.
correlation_threshold: float
The lower the value the higher the number of MCMC steps computed automatically.
Defaults to 0.01. It should be between 0 and 1.
Keyword arguments for other kernels should be checked in the respective docstrings
Notes
-----
SMC works by moving through successive stages. At each stage the inverse temperature
:math:`\beta` is increased a little bit (starting from 0 up to 1). When :math:`\beta` = 0
we have the prior distribution and when :math:`\beta` =1 we have the posterior distribution.
So in more general terms we are always computing samples from a tempered posterior that we can
write as:
.. math::
p(\theta \mid y)_{\beta} = p(y \mid \theta)^{\beta} p(\theta)
A summary of the algorithm is:
1. Initialize :math:`\beta` at zero and stage at zero.
2. Generate N samples :math:`S_{\beta}` from the prior (because when :math `\beta = 0` the
tempered posterior is the prior).
3. Increase :math:`\beta` in order to make the effective sample size equals some predefined
value (we use :math:`Nt`, where :math:`t` is 0.5 by default).
4. Compute a set of N importance weights W. The weights are computed as the ratio of the
likelihoods of a sample at stage i+1 and stage i.
5. Obtain :math:`S_{w}` by re-sampling according to W.
6. Use W to compute the mean and covariance for the proposal distribution, a MVNormal.
7. Run N independent MCMC chains, starting each one from a different sample
in :math:`S_{w}`. For the IMH kernel, the mean of the proposal distribution is the
mean of the previous posterior stage and not the current point in parameter space.
8. The N chains are run until the autocorrelation with the samples from the previous stage
stops decreasing given a certain threshold.
9. Repeat from step 3 until :math:`\beta \ge 1`.
10. The final result is a collection of N samples from the posterior.
References
----------
.. [Minson2013] Minson, S. E. and Simons, M. and Beck, J. L., (2013),
Bayesian inversion for finite fault earthquake source models I- Theory and algorithm.
Geophysical Journal International, 2013, 194(3), pp.1701-1726,
`link <https://gji.oxfordjournals.org/content/194/3/1701.full>`__
.. [Ching2007] Ching, J. and Chen, Y. (2007).
Transitional Markov Chain Monte Carlo Method for Bayesian Model Updating, Model Class
Selection, and Model Averaging. J. Eng. Mech., 10.1061/(ASCE)0733-9399(2007)133:7(816),
816-832. `link <http://ascelibrary.org/doi/abs/10.1061/%28ASCE%290733-9399
%282007%29133:7%28816%29>`__
"""
if isinstance(kernel, str) and kernel.lower() in ("abc", "metropolis"):
warnings.warn(
f'The kernel string argument "{kernel}" in sample_smc has been deprecated. '
f"It is no longer needed to distinguish between `abc` and `metropolis`",
FutureWarning,
stacklevel=2,
)
kernel = IMH
if kernel_kwargs.pop("save_sim_data", None) is not None:
warnings.warn(
"save_sim_data has been deprecated. Use pm.sample_posterior_predictive "
"to obtain the same type of samples.",
FutureWarning,
stacklevel=2,
)
if kernel_kwargs.pop("save_log_pseudolikelihood", None) is not None:
warnings.warn(
"save_log_pseudolikelihood has been deprecated. This information is "
"now saved as log_likelihood in models with Simulator distributions.",
FutureWarning,
stacklevel=2,
)
parallel = kernel_kwargs.pop("parallel", None)
if parallel is not None:
warnings.warn(
"The argument parallel is deprecated, use the argument cores instead.",
FutureWarning,
stacklevel=2,
)
if parallel is False:
cores = 1
if cores is None:
cores = _cpu_count()
if chains is None:
chains = max(2, cores)
else:
cores = min(chains, cores)
if random_seed == -1:
raise FutureWarning(
f"random_seed should be a non-negative integer or None, got: {random_seed}"
"This will raise a ValueError in the Future"
)
random_seed = None
if isinstance(random_seed, int) or random_seed is None:
rng = np.random.default_rng(seed=random_seed)
random_seed = list(rng.integers(2**30, size=chains))
elif isinstance(random_seed, Iterable):
if len(random_seed) != chains:
raise ValueError(f"Length of seeds ({len(seeds)}) must match number of chains {chains}")
else:
raise TypeError("Invalid value for `random_seed`. Must be tuple, list, int or None")
model = modelcontext(model)
_log = logging.getLogger("pymc")
_log.info("Initializing SMC sampler...")
_log.info(
f"Sampling {chains} chain{'s' if chains > 1 else ''} "
f"in {cores} job{'s' if cores > 1 else ''}"
)
params = (
draws,
kernel,
start,
model,
)
t1 = time.time()
if cores > 1:
results = run_chains_parallel(
chains, progressbar, _sample_smc_int, params, random_seed, kernel_kwargs, cores
)
else:
results = run_chains_sequential(
chains, progressbar, _sample_smc_int, params, random_seed, kernel_kwargs
)
(
traces,
sample_stats,
sample_settings,
) = zip(*results)
trace = MultiTrace(traces)
_t_sampling = time.time() - t1
sample_stats, idata = _save_sample_stats(
sample_settings,
sample_stats,
chains,
trace,
return_inferencedata,
_t_sampling,
idata_kwargs,
model,
)
if compute_convergence_checks:
_compute_convergence_checks(idata, draws, model, trace)
return idata if return_inferencedata else trace
|
2,711 |
def _assert_all_finite(
X, allow_nan=False, msg_dtype=None, estimator_name=None, input_name=""
):
"""Like assert_all_finite, but only for ndarray."""
if _get_config()["assume_finite"]:
return
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method. The sum is also calculated
# safely to reduce dtype induced overflows.
is_float = X.dtype.kind in "fc"
with np.errstate(over="ignore"):
first_pass_isfinite = is_float and np.isfinite(np.sum(X))
if first_pass_isfinite:
pass
elif is_float:
has_inf = np.isinf(X).any()
has_nan = np.isnan(X).any()
if has_inf or not allow_nan and has_nan:
if not allow_nan and has_nan:
type_err = "NaN"
else:
msg_dtype = msg_dtype if msg_dtype is not None else X.dtype
type_err = f"infinity or a value too large for {msg_dtype!r}"
padded_input_name = input_name + " " if input_name else ""
msg_err = f"Input {padded_input_name}contains {type_err}."
if not allow_nan and estimator_name and input_name == "X" and has_nan:
# Improve the error message on how to handle missing values in
# scikit-learn.
msg_err += (
f"\n{estimator_name} does not accept missing values"
" encoded as NaN natively. For supervised learning, you might want"
" to consider sklearn.ensemble.HistGradientBoostingClassifier and"
" Regressor which accept missing values encoded as NaNs natively."
" Alternatively, it is possible to preprocess the data, for"
" instance by using an imputer transformer in a pipeline or drop"
" samples with missing values. See"
" https://scikit-learn.org/stable/modules/impute.html"
" You can find a list of all estimators that handle NaN values"
" at the following page:"
" https://scikit-learn.org/stable/modules/impute.html"
"#estimators-that-handle-nan-values"
)
raise ValueError(msg_err)
# for object dtype data, we only check for NaNs (GH-13254)
elif X.dtype == np.dtype("object") and not allow_nan:
if _object_dtype_isnan(X).any():
raise ValueError("Input contains NaN")
|
def _assert_all_finite(
X, allow_nan=False, msg_dtype=None, estimator_name=None, input_name=""
):
"""Like assert_all_finite, but only for ndarray."""
if _get_config()["assume_finite"]:
return
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
is_float = X.dtype.kind in "fc"
with np.errstate(over="ignore"):
first_pass_isfinite = is_float and np.isfinite(np.sum(X))
if first_pass_isfinite:
pass
elif is_float:
has_inf = np.isinf(X).any()
has_nan = np.isnan(X).any()
if has_inf or not allow_nan and has_nan:
if not allow_nan and has_nan:
type_err = "NaN"
else:
msg_dtype = msg_dtype if msg_dtype is not None else X.dtype
type_err = f"infinity or a value too large for {msg_dtype!r}"
padded_input_name = input_name + " " if input_name else ""
msg_err = f"Input {padded_input_name}contains {type_err}."
if not allow_nan and estimator_name and input_name == "X" and has_nan:
# Improve the error message on how to handle missing values in
# scikit-learn.
msg_err += (
f"\n{estimator_name} does not accept missing values"
" encoded as NaN natively. For supervised learning, you might want"
" to consider sklearn.ensemble.HistGradientBoostingClassifier and"
" Regressor which accept missing values encoded as NaNs natively."
" Alternatively, it is possible to preprocess the data, for"
" instance by using an imputer transformer in a pipeline or drop"
" samples with missing values. See"
" https://scikit-learn.org/stable/modules/impute.html"
" You can find a list of all estimators that handle NaN values"
" at the following page:"
" https://scikit-learn.org/stable/modules/impute.html"
"#estimators-that-handle-nan-values"
)
raise ValueError(msg_err)
# for object dtype data, we only check for NaNs (GH-13254)
elif X.dtype == np.dtype("object") and not allow_nan:
if _object_dtype_isnan(X).any():
raise ValueError("Input contains NaN")
|
25,145 |
def _to_literal(node):
# Can raise SyntaxError or ValueError from ast.literal_eval
# Is this the stupidest idea or the simplest idea?
return ast.literal_eval(node.as_string())
|
def _to_literal(node: nodes.Compare):
# Can raise SyntaxError or ValueError from ast.literal_eval
# Is this the stupidest idea or the simplest idea?
return ast.literal_eval(node.as_string())
|
8,079 |
def get_body_heliographic_stonyhurst(body, time='now', observer=None):
"""
Return a `~sunpy.coordinates.frames.HeliographicStonyhurst` frame for the location of a
solar-system body at a specified time.
Parameters
----------
body : `str`
The solar-system body for which to calculate positions
time : various
Time to use as `~astropy.time.Time` or in a parse_time-compatible format
observer : `~astropy.coordinates.SkyCoord`
If not None, the returned coordinate is the apparent location (i.e., factors in light
travel time)
Returns
-------
out : `~sunpy.coordinates.frames.HeliographicStonyhurst`
Location of the solar-system body in the `~sunpy.coordinates.HeliographicStonyhurst` frame
"""
obstime = parse_time(time)
if observer is None:
body_icrs = get_body_barycentric(body, obstime)
else:
observer_icrs = SkyCoord(observer).icrs.cartesian
# This implementation is modeled after Astropy's `_get_apparent_body_position`
light_travel_time = 0.*u.s
emitted_time = obstime
delta_light_travel_time = 1.*u.s # placeholder value
while np.any(np.fabs(delta_light_travel_time) > 1.0e-8*u.s):
body_icrs = get_body_barycentric(body, emitted_time)
distance = (body_icrs - observer_icrs).norm()
delta_light_travel_time = light_travel_time - distance / speed_of_light
light_travel_time = distance / speed_of_light
emitted_time = obstime - light_travel_time
print('Apparent location factors in {} seconds of light travel time'.format(light_travel_time.to('s').value))
body_hgs = ICRS(body_icrs).transform_to(HGS(obstime=obstime))
return body_hgs
|
def get_body_heliographic_stonyhurst(body, time='now', observer=None):
"""
Return a `~sunpy.coordinates.frames.HeliographicStonyhurst` frame for the location of a
solar-system body at a specified time.
Parameters
----------
body : `str`
The solar-system body for which to calculate positions
time : various
Time to use as `~astropy.time.Time` or in a parse_time-compatible format
observer : `~astropy.coordinates.SkyCoord`
If not None, the returned coordinate is the apparent location (i.e., factors in light
travel time)
Returns
-------
out : `~sunpy.coordinates.frames.HeliographicStonyhurst`
Location of the solar-system body in the `~sunpy.coordinates.HeliographicStonyhurst` frame
"""
obstime = parse_time(time)
if observer is None:
body_icrs = get_body_barycentric(body, obstime)
else:
observer_icrs = SkyCoord(observer).icrs.cartesian
# This implementation is modeled after Astropy's `_get_apparent_body_position`
light_travel_time = 0. * u.s
emitted_time = obstime
delta_light_travel_time = 1.*u.s # placeholder value
while np.any(np.fabs(delta_light_travel_time) > 1.0e-8*u.s):
body_icrs = get_body_barycentric(body, emitted_time)
distance = (body_icrs - observer_icrs).norm()
delta_light_travel_time = light_travel_time - distance / speed_of_light
light_travel_time = distance / speed_of_light
emitted_time = obstime - light_travel_time
print('Apparent location factors in {} seconds of light travel time'.format(light_travel_time.to('s').value))
body_hgs = ICRS(body_icrs).transform_to(HGS(obstime=obstime))
return body_hgs
|
3,188 |
def merge_attached_breadcrumbs(mpack_breadcrumbs, data):
# Merge msgpack breadcrumb file.
if mpack_breadcrumbs.size > MAX_MSGPACK_BREADCRUMB_SIZE_BYTES:
return
try:
unpacker = msgpack.Unpacker(mpack_breadcrumbs)
breadcrumbs = list(unpacker)
except (UnpackException, ExtraData) as e:
minidumps_logger.exception(e)
return
if not breadcrumbs:
return
current_crumbs = data.get('breadcrumbs')
if not current_crumbs:
data['breadcrumbs'] = breadcrumbs
return
current_crumb = next((c for c in reversed(current_crumbs)
if isinstance(c, dict) and c.get('timestamp') is not None), None)
new_crumb = next((c for c in reversed(breadcrumbs) if isinstance(
c, dict) and c.get('timestamp') is not None), None)
# cap the breadcrumbs to the highest count of either file
cap = max(len(current_crumbs), len(breadcrumbs))
if current_crumb is not None and new_crumb is not None:
if dp.parse(current_crumb['timestamp']) > dp.parse(new_crumb['timestamp']):
data['breadcrumbs'] = breadcrumbs + current_crumbs
else:
data['breadcrumbs'] = current_crumbs + breadcrumbs
else:
data['breadcrumbs'] = current_crumbs + breadcrumbs
data['breadcrumbs'] = data['breadcrumbs'][len(data['breadcrumbs']) - cap:]
|
def merge_attached_breadcrumbs(mpack_breadcrumbs, data):
# Merge msgpack breadcrumb file.
if mpack_breadcrumbs.size > MAX_MSGPACK_BREADCRUMB_SIZE_BYTES:
return
try:
unpacker = msgpack.Unpacker(mpack_breadcrumbs)
breadcrumbs = list(unpacker)
except (UnpackException, ExtraData) as e:
minidumps_logger.exception(e)
return
if not breadcrumbs:
return
current_crumbs = data.get('breadcrumbs')
if not current_crumbs:
data['breadcrumbs'] = breadcrumbs
return
current_crumb = next((c for c in reversed(current_crumbs)
if isinstance(c, dict) and c.get('timestamp') is not None), None)
new_crumb = next((c for c in reversed(breadcrumbs) if isinstance(
c, dict) and c.get('timestamp') is not None), None)
# cap the breadcrumbs to the highest count of either file
cap = max(len(current_crumbs), len(breadcrumbs))
if current_crumb is not None and new_crumb is not None:
if dp.parse(current_crumb['timestamp']) > dp.parse(new_crumb['timestamp']):
data['breadcrumbs'] = breadcrumbs + current_crumbs
else:
data['breadcrumbs'] = current_crumbs + breadcrumbs
else:
data['breadcrumbs'] = current_crumbs + breadcrumbs
data['breadcrumbs'] = data['breadcrumbs'][:-cap]
|
57,987 |
def remove_duplicates_from_list_arg(args, field):
"""
Removes duplicates from a dict after calling argToList.
For example: args: {'ids': "1,2,1"} , field='ids'
The return output will be ["1","2"]
:type args: dict
:param args: Args to be converted (required)
:type field: str
:param field: Field in args to be converted into list without duplicates (required)
:return: A python list of args without duplicates
:rtype: ``list``
"""
convert_to_list = argToList(args.get(field))
return list(set(convert_to_list))
|
def remove_duplicates_from_list_arg(args, field):
"""
Removes duplicates from a dict after calling argToList.
For example: args: {'ids': "1,2,1"}, field='ids'
The return output will be ["1", "2"]
:type args: dict
:param args: Args to be converted (required)
:type field: str
:param field: Field in args to be converted into list without duplicates (required)
:return: A python list of args without duplicates
:rtype: ``list``
"""
convert_to_list = argToList(args.get(field))
return list(set(convert_to_list))
|
25,191 |
def test_deprecation_of_doc_attribute() -> None:
code = textwrap.dedent(
"""\
def func():
"Docstring"
return 1
"""
)
node: nodes.FunctionDef = extract_node(code) # type: ignore[assignment]
with pytest.warns(DeprecationWarning) as records:
assert node.doc == "Docstring"
assert len(records) == 1
with pytest.warns(DeprecationWarning) as records:
node.doc = None
assert len(records) == 1
code = textwrap.dedent(
"""\
class MyClass():
'''Docstring'''
"""
)
node: nodes.ClassDef = extract_node(code) # type: ignore[assignment]
with pytest.warns(DeprecationWarning) as records:
assert node.doc == "Docstring"
assert len(records) == 1
with pytest.warns(DeprecationWarning) as records:
node.doc = None
assert len(records) == 1
code = textwrap.dedent(
"""\
'''Docstring'''
"""
)
node = parse(code)
with pytest.warns(DeprecationWarning) as records:
assert node.doc == "Docstring"
assert len(records) == 1
with pytest.warns(DeprecationWarning) as records:
node.doc = None
assert len(records) == 1
# If 'doc' isn't passed to Module, ClassDef, FunctionDef,
# no DeprecationWarning should be raised
doc_node = nodes.Const("Docstring")
with warnings.catch_warnings():
# Modify warnings filter to raise error for DeprecationWarning
warnings.simplefilter("error", DeprecationWarning)
node_module = nodes.Module(name="MyModule")
node_module.postinit(body=[], doc_node=doc_node)
assert node_module.doc_node == doc_node
node_class = nodes.ClassDef(name="MyClass")
node_class.postinit(bases=[], body=[], decorators=[], doc_node=doc_node)
assert node_class.doc_node == doc_node
node_func = nodes.FunctionDef(name="MyFunction")
node_func.postinit(args=nodes.Arguments(), body=[], doc_node=doc_node)
assert node_func.doc_node == doc_node
# Test 'doc' attribute if only 'doc_node' is passed
with pytest.warns(DeprecationWarning) as records:
assert node_module.doc == "Docstring"
assert len(records) == 1
with pytest.warns(DeprecationWarning) as records:
assert node_class.doc == "Docstring"
assert len(records) == 1
with pytest.warns(DeprecationWarning) as records:
assert node_func.doc == "Docstring"
assert len(records) == 1
# If 'doc' is passed Module, ClassDef, FunctionDef,
# a DeprecationWarning should be raised
doc_node = nodes.Const("Docstring")
with pytest.warns(DeprecationWarning) as records:
node_module = nodes.Module(name="MyModule", doc="Docstring")
node_class = nodes.ClassDef(name="MyClass", doc="Docstring")
node_func = nodes.FunctionDef(name="MyFunction", doc="Docstring")
assert len(records) == 3
|
def test_deprecation_of_doc_attribute() -> None:
code = textwrap.dedent(
"""\
def func():
"Docstring"
return 1
"""
)
node: nodes.FunctionDef = extract_node(code) # type: ignore[assignment]
with pytest.warns(DeprecationWarning) as records:
assert node.doc == "Docstring"
assert len(records) == 1
with pytest.warns(DeprecationWarning) as records:
node.doc = None
assert len(records) == 1
code = textwrap.dedent(
"""\
class MyClass():
'''Docstring'''
"""
)
node: nodes.ClassDef = extract_node(code) # type: ignore[assignment]
with pytest.warns(DeprecationWarning) as records:
assert node.doc == "Docstring"
assert len(records) == 1
with pytest.warns(DeprecationWarning) as records:
node.doc = None
assert len(records) == 1
code = textwrap.dedent(
"""\
'''Docstring'''
"""
)
node = parse(code)
with pytest.warns(DeprecationWarning) as records:
assert node.doc == "Docstring"
assert len(records) == 1
with pytest.warns(DeprecationWarning) as records:
node.doc = None
assert len(records) == 1
# If 'doc' isn't passed to Module, ClassDef, FunctionDef,
# no DeprecationWarning should be raised
doc_node = nodes.Const("Docstring")
with warnings.catch_warnings():
# Modify warnings filter to raise error for DeprecationWarning
warnings.simplefilter("error", DeprecationWarning)
node_module = nodes.Module(name="MyModule")
node_module.postinit(body=[], doc_node=doc_node)
assert node_module.doc_node == doc_node
node_class = nodes.ClassDef(name="MyClass")
node_class.postinit(bases=[], body=[], decorators=[], doc_node=doc_node)
assert node_class.doc_node == doc_node
node_func = nodes.FunctionDef(name="MyFunction")
node_func.postinit(args=nodes.Arguments(), body=[], doc_node=doc_node)
assert node_func.doc_node == doc_node
# Test 'doc' attribute if only 'doc_node' is passed
with pytest.warns(DeprecationWarning) as records:
assert node_module.doc == "Docstring"
assert len(records) == 1
with pytest.warns(DeprecationWarning) as records:
assert node_class.doc == "Docstring"
assert len(records) == 1
with pytest.warns(DeprecationWarning) as records:
assert node_func.doc == "Docstring"
assert len(records) == 1
# If 'doc' is passed to Module, ClassDef, FunctionDef,
# a DeprecationWarning should be raised
doc_node = nodes.Const("Docstring")
with pytest.warns(DeprecationWarning) as records:
node_module = nodes.Module(name="MyModule", doc="Docstring")
node_class = nodes.ClassDef(name="MyClass", doc="Docstring")
node_func = nodes.FunctionDef(name="MyFunction", doc="Docstring")
assert len(records) == 3
|
30,289 |
def get_service(serviceName, version, additional_scopes=None, delegated_user=None):
credentials = get_credentials(additional_scopes=additional_scopes, delegated_user=delegated_user)
if PROXY:
http_client = credentials.authorize(get_http_client_with_proxy())
return discovery.build(serviceName, version, http=http_client)
return discovery.build(serviceName, version, credentials=credentials)
|
def get_service(service_name, version, additional_scopes=None, delegated_user=None):
credentials = get_credentials(additional_scopes=additional_scopes, delegated_user=delegated_user)
if PROXY:
http_client = credentials.authorize(get_http_client_with_proxy())
return discovery.build(serviceName, version, http=http_client)
return discovery.build(serviceName, version, credentials=credentials)
|
32,788 |
def patch():
if getattr(pynamodb.connection.base, '_datadog_patch', False):
return
setattr(pynamodb.connection.base, '_datadog_patch', True)
wrapt.wrap_function_wrapper('pynamodb.connection.base', 'Connection._make_api_call', patched_api_call)
Pin(service=None, app='aws').onto(pynamodb.connection.base.Connection)
|
def patch():
if getattr(pynamodb.connection.base, '_datadog_patch', False):
return
setattr(pynamodb.connection.base, '_datadog_patch', True)
wrapt.wrap_function_wrapper('pynamodb.connection.base', 'Connection._make_api_call', patched_api_call)
Pin(service=None).onto(pynamodb.connection.base.Connection)
|
2,648 |
def test_safe_indexing_pandas_no_settingwithcopy_warning():
# Using safe_indexing with an array-like indexer gives a copy of the
# DataFrame -> ensure it doesn't raise a warning if modified
pd = pytest.importorskip("pandas")
X = pd.DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]})
subset = _safe_indexing(X, [0, 1], axis=0)
with warnings.catch_warnings():
warnings.simplefilter("error", SettingWithCopyWarning)
subset.iloc[0, 0] = 10
# The original dataframe is unaffected by the assignment on the subset:
assert X.iloc[0, 0] == 1
|
def test_safe_indexing_pandas_no_settingwithcopy_warning():
# Using safe_indexing with an array-like indexer gives a copy of the
# DataFrame -> ensure it doesn't raise a warning if modified
pd = pytest.importorskip("pandas")
X = pd.DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]})
subset = _safe_indexing(X, [0, 1], axis=0)
with warnings.catch_warnings():
warnings.simplefilter("error", pd.core.common.SettingWithCopyWarning)
subset.iloc[0, 0] = 10
# The original dataframe is unaffected by the assignment on the subset:
assert X.iloc[0, 0] == 1
|
15,155 |
def _identify_config_schema(module: ModuleType) -> Optional[str]:
"""Extract the schema and identify list or dict based."""
if not hasattr(module.CONFIG_SCHEMA, "schema"): # type: ignore
return None
schema = module.CONFIG_SCHEMA.schema # type: ignore
if isinstance(schema, vol.All):
for subschema in schema.validators:
if isinstance(subschema, dict):
schema = subschema
break
else:
return None
try:
key = next(k for k in schema if k == module.DOMAIN) # type: ignore
except (TypeError, AttributeError, StopIteration):
return None
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected error identifying config schema")
return None
if hasattr(key, "default") and not isinstance(
key.default, vol.schema_builder.Undefined
):
default_value = module.CONFIG_SCHEMA({module.DOMAIN: key.default()})[ # type: ignore
module.DOMAIN # type: ignore
]
if isinstance(default_value, dict):
return "dict"
if isinstance(default_value, list):
return "list"
return None
domain_schema = schema[key]
t_schema = str(domain_schema)
if t_schema.startswith("{") or "schema_with_slug_keys" in t_schema:
return "dict"
if t_schema.startswith(("[", "All(<function ensure_list")):
return "list"
return None
|
def _identify_config_schema(module: ModuleType) -> Optional[str]:
"""Extract the schema and identify list or dict based."""
if not isinstance(module.CONFIG_SCHEMA, vol.Schema):
return None
schema = module.CONFIG_SCHEMA.schema # type: ignore
if isinstance(schema, vol.All):
for subschema in schema.validators:
if isinstance(subschema, dict):
schema = subschema
break
else:
return None
try:
key = next(k for k in schema if k == module.DOMAIN) # type: ignore
except (TypeError, AttributeError, StopIteration):
return None
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected error identifying config schema")
return None
if hasattr(key, "default") and not isinstance(
key.default, vol.schema_builder.Undefined
):
default_value = module.CONFIG_SCHEMA({module.DOMAIN: key.default()})[ # type: ignore
module.DOMAIN # type: ignore
]
if isinstance(default_value, dict):
return "dict"
if isinstance(default_value, list):
return "list"
return None
domain_schema = schema[key]
t_schema = str(domain_schema)
if t_schema.startswith("{") or "schema_with_slug_keys" in t_schema:
return "dict"
if t_schema.startswith(("[", "All(<function ensure_list")):
return "list"
return None
|
57,498 |
def field_type_schema(
field: ModelField,
*,
by_alias: bool,
model_name_map: Dict[TypeModelOrEnum, str],
ref_template: str,
schema_overrides: bool = False,
ref_prefix: Optional[str] = None,
known_models: TypeModelSet,
) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]:
"""
Used by ``field_schema()``, you probably should be using that function.
Take a single ``field`` and generate the schema for its type only, not including additional
information as title, etc. Also return additional schema definitions, from sub-models.
"""
definitions = {}
nested_models: Set[str] = set()
f_schema: Dict[str, Any]
if field.shape in {SHAPE_LIST, SHAPE_TUPLE_ELLIPSIS, SHAPE_SEQUENCE, SHAPE_SET, SHAPE_FROZENSET, SHAPE_ITERABLE}:
items_schema, f_definitions, f_nested_models = field_singleton_schema(
field,
by_alias=by_alias,
model_name_map=model_name_map,
ref_prefix=ref_prefix,
ref_template=ref_template,
known_models=known_models,
)
definitions.update(f_definitions)
nested_models.update(f_nested_models)
f_schema = {'type': 'array', 'items': items_schema}
if field.shape in {SHAPE_SET, SHAPE_FROZENSET}:
f_schema['uniqueItems'] = True
elif field.shape == SHAPE_MAPPING:
f_schema = {'type': 'object'}
key_field = cast(ModelField, field.key_field)
regex = getattr(key_field.type_, 'regex', None)
items_schema, f_definitions, f_nested_models = field_singleton_schema(
field,
by_alias=by_alias,
model_name_map=model_name_map,
ref_prefix=ref_prefix,
ref_template=ref_template,
known_models=known_models,
)
definitions.update(f_definitions)
nested_models.update(f_nested_models)
if regex:
# Dict keys have a regex pattern
# items_schema might be a schema or empty dict, add it either way
f_schema['patternProperties'] = {regex.pattern: items_schema}
elif items_schema:
# The dict values are not simply Any, so they need a schema
f_schema['additionalProperties'] = items_schema
elif field.shape == SHAPE_TUPLE:
sub_schema = []
sub_fields = cast(List[ModelField], field.sub_fields)
for sf in sub_fields:
sf_schema, sf_definitions, sf_nested_models = field_type_schema(
sf,
by_alias=by_alias,
model_name_map=model_name_map,
ref_prefix=ref_prefix,
ref_template=ref_template,
known_models=known_models,
)
definitions.update(sf_definitions)
nested_models.update(sf_nested_models)
sub_schema.append(sf_schema)
if len(sub_schema) == 1:
sub_schema = sub_schema[0] # type: ignore
f_schema = {'type': 'array', 'items': sub_schema}
else:
assert field.shape in {SHAPE_SINGLETON, SHAPE_GENERIC}, field.shape
f_schema, f_definitions, f_nested_models = field_singleton_schema(
field,
by_alias=by_alias,
model_name_map=model_name_map,
schema_overrides=schema_overrides,
ref_prefix=ref_prefix,
ref_template=ref_template,
known_models=known_models,
)
definitions.update(f_definitions)
nested_models.update(f_nested_models)
# check field type to avoid repeated calls to the same __modify_schema__ method
if field.type_ != field.outer_type_:
if field.shape == SHAPE_GENERIC:
modify_schema = getattr(field.type_, '__modify_schema__', None)
else:
modify_schema = getattr(field.outer_type_, '__modify_schema__', None)
if modify_schema:
modify_schema(f_schema)
return f_schema, definitions, nested_models
|
def field_type_schema(
field: ModelField,
*,
by_alias: bool,
model_name_map: Dict[TypeModelOrEnum, str],
ref_template: str,
schema_overrides: bool = False,
ref_prefix: Optional[str] = None,
known_models: TypeModelSet,
) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]:
"""
Used by ``field_schema()``, you probably should be using that function.
Take a single ``field`` and generate the schema for its type only, not including additional
information as title, etc. Also return additional schema definitions, from sub-models.
"""
definitions = {}
nested_models: Set[str] = set()
f_schema: Dict[str, Any]
if field.shape in {SHAPE_LIST, SHAPE_TUPLE_ELLIPSIS, SHAPE_SEQUENCE, SHAPE_SET, SHAPE_FROZENSET, SHAPE_ITERABLE}:
items_schema, f_definitions, f_nested_models = field_singleton_schema(
field,
by_alias=by_alias,
model_name_map=model_name_map,
ref_prefix=ref_prefix,
ref_template=ref_template,
known_models=known_models,
)
definitions.update(f_definitions)
nested_models.update(f_nested_models)
f_schema = {'type': 'array', 'items': items_schema}
if field.shape in {SHAPE_SET, SHAPE_FROZENSET}:
f_schema['uniqueItems'] = True
elif field.shape == SHAPE_MAPPING:
f_schema = {'type': 'object'}
key_field = cast(ModelField, field.key_field)
regex = getattr(key_field.type_, 'regex', None)
items_schema, f_definitions, f_nested_models = field_singleton_schema(
field,
by_alias=by_alias,
model_name_map=model_name_map,
ref_prefix=ref_prefix,
ref_template=ref_template,
known_models=known_models,
)
definitions.update(f_definitions)
nested_models.update(f_nested_models)
if regex:
# Dict keys have a regex pattern
# items_schema might be a schema or empty dict, add it either way
f_schema['patternProperties'] = {regex.pattern: items_schema}
elif items_schema:
# The dict values are not simply Any, so they need a schema
f_schema['additionalProperties'] = items_schema
elif field.shape == SHAPE_TUPLE:
sub_schema = []
sub_fields = cast(List[ModelField], field.sub_fields)
for sf in sub_fields:
sf_schema, sf_definitions, sf_nested_models = field_type_schema(
sf,
by_alias=by_alias,
model_name_map=model_name_map,
ref_prefix=ref_prefix,
ref_template=ref_template,
known_models=known_models,
)
definitions.update(sf_definitions)
nested_models.update(sf_nested_models)
sub_schema.append(sf_schema)
if len(sub_schema) == 1:
sub_schema = sub_schema[0] # type: ignore
f_schema = {'type': 'array', 'items': sub_schema}
else:
assert field.shape in {SHAPE_SINGLETON, SHAPE_GENERIC}, field.shape
f_schema, f_definitions, f_nested_models = field_singleton_schema(
field,
by_alias=by_alias,
model_name_map=model_name_map,
schema_overrides=schema_overrides,
ref_prefix=ref_prefix,
ref_template=ref_template,
known_models=known_models,
)
definitions.update(f_definitions)
nested_models.update(f_nested_models)
# check field type to avoid repeated calls to the same __modify_schema__ method
if field.type_ != field.outer_type_:
if field.shape == SHAPE_GENERIC:
field_type = field.type_
else:
field_type = field.outer_type_
modify_schema = getattr(field_type, '__modify_schema__', None)
if modify_schema:
modify_schema(f_schema)
return f_schema, definitions, nested_models
|
55,730 |
def view_labels(
data=None,
*,
num_colors=50,
properties=None,
seed=0.5,
name=None,
metadata=None,
scale=None,
translate=None,
opacity=0.7,
blending='translucent',
visible=True,
multiscale=None,
title='napari',
ndisplay=2,
order=None,
axis_labels=None,
show=True,
color=None,
):
"""Create a viewer and add a labels (or segmentation) layer.
An image-like layer where every pixel contains an integer ID
corresponding to the region it belongs to.
Using the viewer's label editing tools (painting, erasing) will
modify the input-array in-place.
To avoid this, pass a copy as follows:
viewer = napari.view_labels(data.copy(), name="sample")
# do some painting/editing
Get the painted labels as follows:
result = viewer.layers["sample"].data
Parameters
----------
data : array or list of array
Labels data as an array or multiscale.
num_colors : int
Number of unique colors to use in colormap.
properties : dict {str: array (N,)}, DataFrame
Properties for each label. Each property should be an array of length
N, where N is the number of labels, and the first property corresponds to
background.
seed : float
Seed for colormap random generator.
name : str
Name of the layer.
metadata : dict
Layer metadata.
scale : tuple of float
Scale factors for the layer.
translate : tuple of float
Translation values for the layer.
opacity : float
Opacity of the layer visual, between 0.0 and 1.0.
blending : str
One of a list of preset blending modes that determines how RGB and
alpha values of the layer visual get mixed. Allowed values are
{'opaque', 'translucent', and 'additive'}.
visible : bool
Whether the layer visual is currently being displayed.
multiscale : bool
Whether the data is a multiscale image or not. Multiscale data is
represented by a list of array like image data. If not specified by
the user and if the data is a list of arrays that decrease in shape
then it will be taken to be multiscale. The first image in the list
should be the largest.
title : string
The title of the viewer window.
ndisplay : {2, 3}
Number of displayed dimensions.
order : tuple of int
Order in which dimensions are displayed where the last two or last
three dimensions correspond to row x column or plane x row x column if
ndisplay is 2 or 3.
axis_labels : list of str
Dimension names.
show : bool, optional
Whether to show the viewer after instantiation. by default True.
color : dict of int to str
Custom label to color mapping
Returns
-------
viewer : :class:`napari.Viewer`
The newly-created viewer.
"""
viewer = Viewer(
title=title,
ndisplay=ndisplay,
order=order,
axis_labels=axis_labels,
show=show,
)
viewer.add_labels(
data=data,
multiscale=multiscale,
num_colors=num_colors,
properties=properties,
seed=seed,
name=name,
metadata=metadata,
scale=scale,
translate=translate,
opacity=opacity,
blending=blending,
visible=visible,
color=color,
)
return viewer
|
def view_labels(
data=None,
*,
num_colors=50,
properties=None,
seed=0.5,
name=None,
metadata=None,
scale=None,
translate=None,
opacity=0.7,
blending='translucent',
visible=True,
multiscale=None,
title='napari',
ndisplay=2,
order=None,
axis_labels=None,
show=True,
color=None,
):
"""Create a viewer and add a labels (or segmentation) layer.
An image-like layer where every pixel contains an integer ID
corresponding to the region it belongs to.
Using the viewer's label editing tools (painting, erasing) will
modify the input-array in-place.
To avoid this, pass a copy as follows:
viewer = napari.view_labels(data.copy(), name="sample")
# do some painting/editing
Get the painted labels as follows:
result = viewer.layers["sample"].data
Parameters
----------
data : array or list of array
Labels data as an array or multiscale.
num_colors : int
Number of unique colors to use in colormap.
properties : dict {str: array (N,)}, DataFrame
Properties for each label. Each property should be an array of length
N, where N is the number of labels, and the first property corresponds to
background.
seed : float
Seed for colormap random generator.
name : str
Name of the layer.
metadata : dict
Layer metadata.
scale : tuple of float
Scale factors for the layer.
translate : tuple of float
Translation values for the layer.
opacity : float
Opacity of the layer visual, between 0.0 and 1.0.
blending : str
One of a list of preset blending modes that determines how RGB and
alpha values of the layer visual get mixed. Allowed values are
{'opaque', 'translucent', and 'additive'}.
visible : bool
Whether the layer visual is currently being displayed.
multiscale : bool
Whether the data is a multiscale image or not. Multiscale data is
represented by a list of array like image data. If not specified by
the user and if the data is a list of arrays that decrease in shape
then it will be taken to be multiscale. The first image in the list
should be the largest.
title : string
The title of the viewer window.
ndisplay : {2, 3}
Number of displayed dimensions.
order : tuple of int
Order in which dimensions are displayed where the last two or last
three dimensions correspond to row x column or plane x row x column if
ndisplay is 2 or 3.
axis_labels : list of str
Dimension names.
show : bool, optional
Whether to show the viewer after instantiation. by default True.
color : dict of int to str or array
Custom label to color mapping. Values must be valid color names or RGBA arrays.
Returns
-------
viewer : :class:`napari.Viewer`
The newly-created viewer.
"""
viewer = Viewer(
title=title,
ndisplay=ndisplay,
order=order,
axis_labels=axis_labels,
show=show,
)
viewer.add_labels(
data=data,
multiscale=multiscale,
num_colors=num_colors,
properties=properties,
seed=seed,
name=name,
metadata=metadata,
scale=scale,
translate=translate,
opacity=opacity,
blending=blending,
visible=visible,
color=color,
)
return viewer
|
41,473 |
def eval_func(op, argop, dataop, d):
def func(p):
tensorlib, _ = get_backend()
xx = tensorlib.tolist(p) if isinstance(p, tf.Tensor) else p
yy = tensorlib.tolist(d) if isinstance(d, tf.Tensor) else d
value = tensorlib.session.run(op, feed_dict={argop: xx, dataop: yy})
return value
return func
|
def eval_func(op, argop, dataop, data):
def func(p):
tensorlib, _ = get_backend()
xx = tensorlib.tolist(p) if isinstance(p, tf.Tensor) else p
yy = tensorlib.tolist(d) if isinstance(d, tf.Tensor) else d
value = tensorlib.session.run(op, feed_dict={argop: xx, dataop: yy})
return value
return func
|
7,281 |
def choose(p, shape):
"""
Function to return Bernoulli trials at a given probability
of a given size.
This function is meant as a lower-memory alternative to calls such as
`np.random.choice([True, False], size=image.shape, p=[p, 1-p])`.
While `np.random.choice` can handle many classes, for the 2-class case
(Bernoulli trials), this function is much more efficient.
Parameters
----------
p : float
The probability that any given trial returns `True`.
shape : int or tuple of ints
The shape of the ndarray to return.
Returns
-------
out : ndarray[bool]
The results of Bernoulli trials in the given `size` where success
occurs with probability `p`.
"""
if p == 0:
return np.zeros(shape, dtype=bool)
if p == 1:
return ~np.zeros(shape, dtype=bool)
return np.random.random(shape) <= p
|
def _bernoulli(p, shape):
"""
Function to return Bernoulli trials at a given probability
of a given size.
This function is meant as a lower-memory alternative to calls such as
`np.random.choice([True, False], size=image.shape, p=[p, 1-p])`.
While `np.random.choice` can handle many classes, for the 2-class case
(Bernoulli trials), this function is much more efficient.
Parameters
----------
p : float
The probability that any given trial returns `True`.
shape : int or tuple of ints
The shape of the ndarray to return.
Returns
-------
out : ndarray[bool]
The results of Bernoulli trials in the given `size` where success
occurs with probability `p`.
"""
if p == 0:
return np.zeros(shape, dtype=bool)
if p == 1:
return ~np.zeros(shape, dtype=bool)
return np.random.random(shape) <= p
|
44,034 |
def Interferometer(theta, phi, varphi, wires, mesh="rectangular", beamsplitter="pennylane"):
r"""General linear interferometer, an array of beamsplitters and phase shifters.
For :math:`M` wires, the general interferometer is specified by
providing :math:`M(M-1)/2` transmittivity angles :math:`\theta` and the same number of
phase angles :math:`\phi`, as well as :math:`M-1` additional rotation
parameters :math:`\varphi`.
By specifying the keyword argument ``mesh``, the scheme used to implement the interferometer
may be adjusted:
* ``mesh='rectangular'`` (default): uses the scheme described in
`Clements et al. <https://dx.doi.org/10.1364/OPTICA.3.001460>`__, resulting in a *rectangular* array of
:math:`M(M-1)/2` beamsplitters arranged in :math:`M` slices and ordered from left
to right and top to bottom in each slice. The first beamsplitter acts on
wires :math:`0` and :math:`1`:
.. figure:: ../../_static/clements.png
:align: center
:width: 30%
:target: javascript:void(0);
* ``mesh='triangular'``: uses the scheme described in `Reck et al. <https://dx.doi.org/10.1103/PhysRevLett.73.58>`__,
resulting in a *triangular* array of :math:`M(M-1)/2` beamsplitters arranged in
:math:`2M-3` slices and ordered from left to right and top to bottom. The
first and fourth beamsplitters act on wires :math:`M-1` and :math:`M`, the second
on :math:`M-2` and :math:`M-1`, and the third on :math:`M-3` and :math:`M-2`, and
so on.
.. figure:: ../../_static/reck.png
:align: center
:width: 30%
:target: javascript:void(0);
In both schemes, the network of :class:`~pennylane.ops.Beamsplitter` operations is followed by
:math:`M` local :class:`~pennylane.ops.Rotation` Operations.
The rectangular decomposition is generally advantageous, as it has a lower
circuit depth (:math:`M` vs :math:`2M-3`) and optical depth than the triangular
decomposition, resulting in reduced optical loss.
This is an example of a 4-mode interferometer with beamsplitters :math:`B` and rotations :math:`R`,
using ``mesh='rectangular'``:
.. figure:: ../../_static/layer_interferometer.png
:align: center
:width: 60%
:target: javascript:void(0);
.. note::
The decomposition as formulated in `Clements et al. <https://dx.doi.org/10.1364/OPTICA.3.001460>`__ uses a different
convention for a beamsplitter :math:`T(\theta, \phi)` than PennyLane, namely:
.. math:: T(\theta, \phi) = BS(\theta, 0) R(\phi)
For the universality of the decomposition, the used convention is irrelevant, but
for a given set of angles the resulting interferometers will be different.
If an interferometer consistent with the convention from `Clements et al. <https://dx.doi.org/10.1364/OPTICA.3.001460>`__
is needed, the optional keyword argument ``beamsplitter='clements'`` can be specified. This
will result in each :class:`~pennylane.ops.Beamsplitter` being preceded by a :class:`~pennylane.ops.Rotation` and
thus increase the number of elementary operations in the circuit.
Args:
theta (tensor_like): size :math:`(M(M-1)/2,)` tensor of transmittivity angles :math:`\theta`
phi (tensor_like): size :math:`(M(M-1)/2,)` tensor of phase angles :math:`\phi`
varphi (tensor_like): size :math:`(M,)` tensor of rotation angles :math:`\varphi`
wires (Iterable or Wires): Wires that the template acts on. Accepts an iterable of numbers or strings, or
a Wires object.
mesh (string): the type of mesh to use
beamsplitter (str): if ``clements``, the beamsplitter convention from
Clements et al. 2016 (https://dx.doi.org/10.1364/OPTICA.3.001460) is used; if ``pennylane``, the
beamsplitter is implemented via PennyLane's ``Beamsplitter`` operation.
Raises:
ValueError: if inputs do not have the correct format
Example:
The template requires :math:`3` sets of parameters. The ``mesh`` and ``beamsplitter`` arguments are optional and
have ``'rectangular'`` and ``'pennylane'`` as default values.
.. code-block:: python
dev = qml.device('default.gaussian', wires=4)
@qml.qnode(dev)
def circuit():
qml.Interferometer(*params, wires=range(4))
return qml.expval(qml.Identity(0))
shapes = [[6, ], [6, ], [4, ]]
params = []
for shape in shapes:
params.append(np.random.random(shape))
Using these random parameters, the resulting circuit is:
>>> print(qml.draw(circuit)())
0: ──╭BS(0.0522, 0.0472)────────────────────╭BS(0.438, 0.222)───R(0.606)────────────────────┤ ⟨I⟩
1: ──╰BS(0.0522, 0.0472)──╭BS(0.994, 0.59)──╰BS(0.438, 0.222)──╭BS(0.823, 0.623)──R(0.221)──┤
2: ──╭BS(0.636, 0.298)────╰BS(0.994, 0.59)──╭BS(0.0818, 0.72)──╰BS(0.823, 0.623)──R(0.807)──┤
3: ──╰BS(0.636, 0.298)──────────────────────╰BS(0.0818, 0.72)───R(0.854)────────────────────┤
Using different values for optional arguments:
.. code-block:: python
@qml.qnode(dev)
def circuit():
qml.Interferometer(*params, wires=range(4), mesh='triangular', beamsplitter='clements')
return qml.expval(qml.Identity(0))
shapes = [[6, ], [6, ], [4, ]]
params = []
for shape in shapes:
params.append(np.random.random(shape))
The resulting circuit in this case is:
>>> print(qml.draw(circuit)())
0: ──R(0.713)──────────────────────────────────╭BS(0.213, 0)───R(0.681)──────────────────────────────────────────────────────────┤ ⟨I⟩
1: ──R(0.00912)─────────────────╭BS(0.239, 0)──╰BS(0.213, 0)───R(0.388)──────╭BS(0.622, 0)──R(0.567)─────────────────────────────┤
2: ──R(0.43)─────╭BS(0.534, 0)──╰BS(0.239, 0)───R(0.189)──────╭BS(0.809, 0)──╰BS(0.622, 0)──R(0.309)──╭BS(0.00845, 0)──R(0.757)──┤
3: ──────────────╰BS(0.534, 0)────────────────────────────────╰BS(0.809, 0)───────────────────────────╰BS(0.00845, 0)──R(0.527)──┤
"""
wires = Wires(wires)
M = len(wires)
shape_varphi = _preprocess(theta, phi, varphi, wires)
with qml.tape.OperationRecorder() as rec:
if M == 1:
# the interferometer is a single rotation
Rotation(varphi[0], wires=wires[0])
else:
n = 0 # keep track of free parameters
if mesh == "rectangular":
# Apply the Clements beamsplitter array
# The array depth is N
for l in range(M):
for k, (w1, w2) in enumerate(zip(wires[:-1], wires[1:])):
# skip even or odd pairs depending on layer
if (l + k) % 2 != 1:
if beamsplitter == "clements":
Rotation(phi[n], wires=Wires(w1))
Beamsplitter(theta[n], 0, wires=Wires([w1, w2]))
elif beamsplitter == "pennylane":
Beamsplitter(theta[n], phi[n], wires=Wires([w1, w2]))
else:
raise ValueError(f"did not recognize beamsplitter {beamsplitter}")
n += 1
elif mesh == "triangular":
# apply the Reck beamsplitter array
# The array depth is 2*N-3
for l in range(2 * M - 3):
for k in range(abs(l + 1 - (M - 1)), M - 1, 2):
if beamsplitter == "clements":
Rotation(phi[n], wires=wires[k])
Beamsplitter(theta[n], 0, wires=wires.subset([k, k + 1]))
elif beamsplitter == "pennylane":
Beamsplitter(theta[n], phi[n], wires=wires.subset([k, k + 1]))
else:
raise ValueError(f"did not recognize beamsplitter {beamsplitter} ")
n += 1
else:
raise ValueError(f"did not recognize mesh {mesh}")
# apply the final local phase shifts to all modes
for i in range(shape_varphi[0]):
act_on = wires[i]
Rotation(varphi[i], wires=act_on)
return rec.queue
|
def Interferometer(theta, phi, varphi, wires, mesh="rectangular", beamsplitter="pennylane"):
r"""General linear interferometer, an array of beamsplitters and phase shifters.
For :math:`M` wires, the general interferometer is specified by
providing :math:`M(M-1)/2` transmittivity angles :math:`\theta` and the same number of
phase angles :math:`\phi`, as well as :math:`M-1` additional rotation
parameters :math:`\varphi`.
By specifying the keyword argument ``mesh``, the scheme used to implement the interferometer
may be adjusted:
* ``mesh='rectangular'`` (default): uses the scheme described in
`Clements et al. <https://dx.doi.org/10.1364/OPTICA.3.001460>`__, resulting in a *rectangular* array of
:math:`M(M-1)/2` beamsplitters arranged in :math:`M` slices and ordered from left
to right and top to bottom in each slice. The first beamsplitter acts on
wires :math:`0` and :math:`1`:
.. figure:: ../../_static/clements.png
:align: center
:width: 30%
:target: javascript:void(0);
* ``mesh='triangular'``: uses the scheme described in `Reck et al. <https://dx.doi.org/10.1103/PhysRevLett.73.58>`__,
resulting in a *triangular* array of :math:`M(M-1)/2` beamsplitters arranged in
:math:`2M-3` slices and ordered from left to right and top to bottom. The
first and fourth beamsplitters act on wires :math:`M-1` and :math:`M`, the second
on :math:`M-2` and :math:`M-1`, and the third on :math:`M-3` and :math:`M-2`, and
so on.
.. figure:: ../../_static/reck.png
:align: center
:width: 30%
:target: javascript:void(0);
In both schemes, the network of :class:`~pennylane.ops.Beamsplitter` operations is followed by
:math:`M` local :class:`~pennylane.ops.Rotation` Operations.
The rectangular decomposition is generally advantageous, as it has a lower
circuit depth (:math:`M` vs :math:`2M-3`) and optical depth than the triangular
decomposition, resulting in reduced optical loss.
This is an example of a 4-mode interferometer with beamsplitters :math:`B` and rotations :math:`R`,
using ``mesh='rectangular'``:
.. figure:: ../../_static/layer_interferometer.png
:align: center
:width: 60%
:target: javascript:void(0);
.. note::
The decomposition as formulated in `Clements et al. <https://dx.doi.org/10.1364/OPTICA.3.001460>`__ uses a different
convention for a beamsplitter :math:`T(\theta, \phi)` than PennyLane, namely:
.. math:: T(\theta, \phi) = BS(\theta, 0) R(\phi)
For the universality of the decomposition, the used convention is irrelevant, but
for a given set of angles the resulting interferometers will be different.
If an interferometer consistent with the convention from `Clements et al. <https://dx.doi.org/10.1364/OPTICA.3.001460>`__
is needed, the optional keyword argument ``beamsplitter='clements'`` can be specified. This
will result in each :class:`~pennylane.ops.Beamsplitter` being preceded by a :class:`~pennylane.ops.Rotation` and
thus increase the number of elementary operations in the circuit.
Args:
theta (tensor_like): size :math:`(M(M-1)/2,)` tensor of transmittivity angles :math:`\theta`
phi (tensor_like): size :math:`(M(M-1)/2,)` tensor of phase angles :math:`\phi`
varphi (tensor_like): size :math:`(M,)` tensor of rotation angles :math:`\varphi`
wires (Iterable or Wires): Wires that the template acts on. Accepts an iterable of numbers or strings, or
a Wires object.
mesh (string): the type of mesh to use
beamsplitter (str): if ``clements``, the beamsplitter convention from
Clements et al. 2016 (https://dx.doi.org/10.1364/OPTICA.3.001460) is used; if ``pennylane``, the
beamsplitter is implemented via PennyLane's ``Beamsplitter`` operation.
Raises:
ValueError: if inputs do not have the correct format
Example:
The template requires :math:`3` sets of parameters. The ``mesh`` and ``beamsplitter`` keyword arguments are optional and
have ``'rectangular'`` and ``'pennylane'`` as default values.
.. code-block:: python
dev = qml.device('default.gaussian', wires=4)
@qml.qnode(dev)
def circuit():
qml.Interferometer(*params, wires=range(4))
return qml.expval(qml.Identity(0))
shapes = [[6, ], [6, ], [4, ]]
params = []
for shape in shapes:
params.append(np.random.random(shape))
Using these random parameters, the resulting circuit is:
>>> print(qml.draw(circuit)())
0: ──╭BS(0.0522, 0.0472)────────────────────╭BS(0.438, 0.222)───R(0.606)────────────────────┤ ⟨I⟩
1: ──╰BS(0.0522, 0.0472)──╭BS(0.994, 0.59)──╰BS(0.438, 0.222)──╭BS(0.823, 0.623)──R(0.221)──┤
2: ──╭BS(0.636, 0.298)────╰BS(0.994, 0.59)──╭BS(0.0818, 0.72)──╰BS(0.823, 0.623)──R(0.807)──┤
3: ──╰BS(0.636, 0.298)──────────────────────╰BS(0.0818, 0.72)───R(0.854)────────────────────┤
Using different values for optional arguments:
.. code-block:: python
@qml.qnode(dev)
def circuit():
qml.Interferometer(*params, wires=range(4), mesh='triangular', beamsplitter='clements')
return qml.expval(qml.Identity(0))
shapes = [[6, ], [6, ], [4, ]]
params = []
for shape in shapes:
params.append(np.random.random(shape))
The resulting circuit in this case is:
>>> print(qml.draw(circuit)())
0: ──R(0.713)──────────────────────────────────╭BS(0.213, 0)───R(0.681)──────────────────────────────────────────────────────────┤ ⟨I⟩
1: ──R(0.00912)─────────────────╭BS(0.239, 0)──╰BS(0.213, 0)───R(0.388)──────╭BS(0.622, 0)──R(0.567)─────────────────────────────┤
2: ──R(0.43)─────╭BS(0.534, 0)──╰BS(0.239, 0)───R(0.189)──────╭BS(0.809, 0)──╰BS(0.622, 0)──R(0.309)──╭BS(0.00845, 0)──R(0.757)──┤
3: ──────────────╰BS(0.534, 0)────────────────────────────────╰BS(0.809, 0)───────────────────────────╰BS(0.00845, 0)──R(0.527)──┤
"""
wires = Wires(wires)
M = len(wires)
shape_varphi = _preprocess(theta, phi, varphi, wires)
with qml.tape.OperationRecorder() as rec:
if M == 1:
# the interferometer is a single rotation
Rotation(varphi[0], wires=wires[0])
else:
n = 0 # keep track of free parameters
if mesh == "rectangular":
# Apply the Clements beamsplitter array
# The array depth is N
for l in range(M):
for k, (w1, w2) in enumerate(zip(wires[:-1], wires[1:])):
# skip even or odd pairs depending on layer
if (l + k) % 2 != 1:
if beamsplitter == "clements":
Rotation(phi[n], wires=Wires(w1))
Beamsplitter(theta[n], 0, wires=Wires([w1, w2]))
elif beamsplitter == "pennylane":
Beamsplitter(theta[n], phi[n], wires=Wires([w1, w2]))
else:
raise ValueError(f"did not recognize beamsplitter {beamsplitter}")
n += 1
elif mesh == "triangular":
# apply the Reck beamsplitter array
# The array depth is 2*N-3
for l in range(2 * M - 3):
for k in range(abs(l + 1 - (M - 1)), M - 1, 2):
if beamsplitter == "clements":
Rotation(phi[n], wires=wires[k])
Beamsplitter(theta[n], 0, wires=wires.subset([k, k + 1]))
elif beamsplitter == "pennylane":
Beamsplitter(theta[n], phi[n], wires=wires.subset([k, k + 1]))
else:
raise ValueError(f"did not recognize beamsplitter {beamsplitter} ")
n += 1
else:
raise ValueError(f"did not recognize mesh {mesh}")
# apply the final local phase shifts to all modes
for i in range(shape_varphi[0]):
act_on = wires[i]
Rotation(varphi[i], wires=act_on)
return rec.queue
|
42,085 |
def _get_contour_plot(
study: Study,
params: Optional[List[str]] = None,
target: Optional[Callable[[FrozenTrial], float]] = None,
target_name: str = "Objective Value",
) -> "go.Figure":
layout = go.Layout(title="Contour Plot")
trials = _filter_nonfinite(
[trial for trial in study.trials if trial.state == TrialState.COMPLETE], target=target
)
if len(trials) == 0:
_logger.warning("Your study does not have any completed trials.")
return go.Figure(data=[], layout=layout)
all_params = {p_name for t in trials for p_name in t.params.keys()}
if params is None:
sorted_params = sorted(all_params)
elif len(params) <= 1:
_logger.warning("The length of params must be greater than 1.")
return go.Figure(data=[], layout=layout)
else:
for input_p_name in params:
if input_p_name not in all_params:
raise ValueError("Parameter {} does not exist in your study.".format(input_p_name))
sorted_params = sorted(set(params))
padding_ratio = 0.05
param_values_range = {}
for p_name in sorted_params:
values = _get_param_values(trials, p_name)
min_value = min(values)
max_value = max(values)
if _is_log_scale(trials, p_name):
padding = (math.log10(max_value) - math.log10(min_value)) * padding_ratio
min_value = math.pow(10, math.log10(min_value) - padding)
max_value = math.pow(10, math.log10(max_value) + padding)
elif _is_numerical(trials, p_name):
padding = (max_value - min_value) * padding_ratio
min_value = min_value - padding
max_value = max_value + padding
else:
# Plotly>=4.12.0 draws contours using the indices of categorical variables instead of
# raw values and the range should be updated based on the cardinality of categorical
# variables. See https://github.com/optuna/optuna/issues/1967.
if version.parse(plotly.__version__) >= version.parse("4.12.0"):
span = len(set(values)) - 1
padding = span * padding_ratio
min_value = -padding
max_value = span + padding
param_values_range[p_name] = (min_value, max_value)
reverse_scale = _is_reverse_scale(study, target)
if len(sorted_params) == 2:
x_param = sorted_params[0]
y_param = sorted_params[1]
sub_plots = _generate_contour_subplot(
trials, x_param, y_param, reverse_scale, param_values_range, target, target_name
)
figure = go.Figure(data=sub_plots, layout=layout)
figure.update_xaxes(title_text=x_param, range=param_values_range[x_param])
figure.update_yaxes(title_text=y_param, range=param_values_range[y_param])
if not _is_numerical(trials, x_param):
figure.update_xaxes(type="category")
if not _is_numerical(trials, y_param):
figure.update_yaxes(type="category")
if _is_log_scale(trials, x_param):
log_range = [math.log10(p) for p in param_values_range[x_param]]
figure.update_xaxes(range=log_range, type="log")
if _is_log_scale(trials, y_param):
log_range = [math.log10(p) for p in param_values_range[y_param]]
figure.update_yaxes(range=log_range, type="log")
else:
figure = make_subplots(
rows=len(sorted_params), cols=len(sorted_params), shared_xaxes=True, shared_yaxes=True
)
figure.update_layout(layout)
showscale = True # showscale option only needs to be specified once
for x_i, x_param in enumerate(sorted_params):
for y_i, y_param in enumerate(sorted_params):
if x_param == y_param:
figure.add_trace(go.Scatter(), row=y_i + 1, col=x_i + 1)
else:
sub_plots = _generate_contour_subplot(
trials,
x_param,
y_param,
reverse_scale,
param_values_range,
target,
target_name,
)
contour = sub_plots[0]
scatter = sub_plots[1]
contour.update(showscale=showscale) # showscale's default is True
if showscale:
showscale = False
figure.add_trace(contour, row=y_i + 1, col=x_i + 1)
figure.add_trace(scatter, row=y_i + 1, col=x_i + 1)
figure.update_xaxes(range=param_values_range[x_param], row=y_i + 1, col=x_i + 1)
figure.update_yaxes(range=param_values_range[y_param], row=y_i + 1, col=x_i + 1)
if not _is_numerical(trials, x_param):
figure.update_xaxes(type="category", row=y_i + 1, col=x_i + 1)
if not _is_numerical(trials, y_param):
figure.update_yaxes(type="category", row=y_i + 1, col=x_i + 1)
if _is_log_scale(trials, x_param):
log_range = [math.log10(p) for p in param_values_range[x_param]]
figure.update_xaxes(range=log_range, type="log", row=y_i + 1, col=x_i + 1)
if _is_log_scale(trials, y_param):
log_range = [math.log10(p) for p in param_values_range[y_param]]
figure.update_yaxes(range=log_range, type="log", row=y_i + 1, col=x_i + 1)
if x_i == 0:
figure.update_yaxes(title_text=y_param, row=y_i + 1, col=x_i + 1)
if y_i == len(sorted_params) - 1:
figure.update_xaxes(title_text=x_param, row=y_i + 1, col=x_i + 1)
return figure
|
def _get_contour_plot(
study: Study,
params: Optional[List[str]] = None,
target: Optional[Callable[[FrozenTrial], float]] = None,
target_name: str = "Objective Value",
) -> "go.Figure":
layout = go.Layout(title="Contour Plot")
trials = _filter_nonfinite(
study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,)), target=target
)
if len(trials) == 0:
_logger.warning("Your study does not have any completed trials.")
return go.Figure(data=[], layout=layout)
all_params = {p_name for t in trials for p_name in t.params.keys()}
if params is None:
sorted_params = sorted(all_params)
elif len(params) <= 1:
_logger.warning("The length of params must be greater than 1.")
return go.Figure(data=[], layout=layout)
else:
for input_p_name in params:
if input_p_name not in all_params:
raise ValueError("Parameter {} does not exist in your study.".format(input_p_name))
sorted_params = sorted(set(params))
padding_ratio = 0.05
param_values_range = {}
for p_name in sorted_params:
values = _get_param_values(trials, p_name)
min_value = min(values)
max_value = max(values)
if _is_log_scale(trials, p_name):
padding = (math.log10(max_value) - math.log10(min_value)) * padding_ratio
min_value = math.pow(10, math.log10(min_value) - padding)
max_value = math.pow(10, math.log10(max_value) + padding)
elif _is_numerical(trials, p_name):
padding = (max_value - min_value) * padding_ratio
min_value = min_value - padding
max_value = max_value + padding
else:
# Plotly>=4.12.0 draws contours using the indices of categorical variables instead of
# raw values and the range should be updated based on the cardinality of categorical
# variables. See https://github.com/optuna/optuna/issues/1967.
if version.parse(plotly.__version__) >= version.parse("4.12.0"):
span = len(set(values)) - 1
padding = span * padding_ratio
min_value = -padding
max_value = span + padding
param_values_range[p_name] = (min_value, max_value)
reverse_scale = _is_reverse_scale(study, target)
if len(sorted_params) == 2:
x_param = sorted_params[0]
y_param = sorted_params[1]
sub_plots = _generate_contour_subplot(
trials, x_param, y_param, reverse_scale, param_values_range, target, target_name
)
figure = go.Figure(data=sub_plots, layout=layout)
figure.update_xaxes(title_text=x_param, range=param_values_range[x_param])
figure.update_yaxes(title_text=y_param, range=param_values_range[y_param])
if not _is_numerical(trials, x_param):
figure.update_xaxes(type="category")
if not _is_numerical(trials, y_param):
figure.update_yaxes(type="category")
if _is_log_scale(trials, x_param):
log_range = [math.log10(p) for p in param_values_range[x_param]]
figure.update_xaxes(range=log_range, type="log")
if _is_log_scale(trials, y_param):
log_range = [math.log10(p) for p in param_values_range[y_param]]
figure.update_yaxes(range=log_range, type="log")
else:
figure = make_subplots(
rows=len(sorted_params), cols=len(sorted_params), shared_xaxes=True, shared_yaxes=True
)
figure.update_layout(layout)
showscale = True # showscale option only needs to be specified once
for x_i, x_param in enumerate(sorted_params):
for y_i, y_param in enumerate(sorted_params):
if x_param == y_param:
figure.add_trace(go.Scatter(), row=y_i + 1, col=x_i + 1)
else:
sub_plots = _generate_contour_subplot(
trials,
x_param,
y_param,
reverse_scale,
param_values_range,
target,
target_name,
)
contour = sub_plots[0]
scatter = sub_plots[1]
contour.update(showscale=showscale) # showscale's default is True
if showscale:
showscale = False
figure.add_trace(contour, row=y_i + 1, col=x_i + 1)
figure.add_trace(scatter, row=y_i + 1, col=x_i + 1)
figure.update_xaxes(range=param_values_range[x_param], row=y_i + 1, col=x_i + 1)
figure.update_yaxes(range=param_values_range[y_param], row=y_i + 1, col=x_i + 1)
if not _is_numerical(trials, x_param):
figure.update_xaxes(type="category", row=y_i + 1, col=x_i + 1)
if not _is_numerical(trials, y_param):
figure.update_yaxes(type="category", row=y_i + 1, col=x_i + 1)
if _is_log_scale(trials, x_param):
log_range = [math.log10(p) for p in param_values_range[x_param]]
figure.update_xaxes(range=log_range, type="log", row=y_i + 1, col=x_i + 1)
if _is_log_scale(trials, y_param):
log_range = [math.log10(p) for p in param_values_range[y_param]]
figure.update_yaxes(range=log_range, type="log", row=y_i + 1, col=x_i + 1)
if x_i == 0:
figure.update_yaxes(title_text=y_param, row=y_i + 1, col=x_i + 1)
if y_i == len(sorted_params) - 1:
figure.update_xaxes(title_text=x_param, row=y_i + 1, col=x_i + 1)
return figure
|
33,451 |
def error_code_to_str(code: Optional[int]) -> str:
"""
Converts a given error code (errno) to a useful and human readable string.
:param code: a possibly invalid/unknown error code
:returns: a string explaining and containing the given error code, or a string
explaining that the errorcode is unknown if that is the case
"""
name = errno.errorcode.get(code, "UNKNOWN") if code is not None else "UNKNOWN"
description = os.strerror(code) if code is not None else "NO DESCRIPTION AVAILABLE"
return f"{name} (errno {code}): {description}"
|
def error_code_to_str(code: Optional[int]) -> str:
"""
Converts a given error code (errno) to a useful and human readable string.
:param code: a possibly invalid/unknown error code
:returns: a string explaining and containing the given error code, or a string
explaining that the errorcode is unknown if that is the case
"""
name = errno.errorcode.get(code, "UNKNOWN")
description = os.strerror(code) if code is not None else "NO DESCRIPTION AVAILABLE"
return f"{name} (errno {code}): {description}"
|
5,564 |
def drop_traitlets_dir(self):
"""Exclude members inherited from HasTraits and '_'/'__' members for dir()."""
traits_dir = dir(HasTraits)
class_dir = dir(type(self))
return [name for name in class_dir if name not in traits_dir if not name.startswith('_')]
|
def drop_traitlets_dir(self):
"""Exclude members inherited from HasTraits and '_'/'__' members for dir()."""
traits_dir = dir(HasTraits)
class_dir = dir(type(self))
return filter(lambda name: not (name in traits_dir or name.startswith('_')), class_dir)
|
23,663 |
def extract_metadata_from_dataset(ds):
metadata = {}
for v in list(ds.variables):
metadata[v] = {
'name': ds[v].name,
'long_name': ds[v].long_name}
if v.lower() != 'time':
metadata[v].update({'units': ds[v].units})
metadata['dims'] = dict(ds.dims)
metadata.update(ds.attrs) # add arbitrary metadata
return metadata
|
def extract_metadata_from_dataset(ds):
metadata = {}
for v in list(ds.variables):
metadata[v] = {
'name': ds[v].name,
'long_name': ds[v].long_name}
if v.lower() != 'time':
metadata[v]['units'] = ds[v].units
metadata['dims'] = dict(ds.dims)
metadata.update(ds.attrs) # add arbitrary metadata
return metadata
|
2,512 |
def test_svc_nonfinite_params():
# Check SVC throws ValueError when dealing with non-finite parameter values
X = np.array(
[
(1.30830774e307, 6.02217328e307),
(1.54166067e308, 1.75812744e308),
(5.57938866e307, 4.13840113e307),
(1.36302835e308, 1.07968131e308),
(1.58772669e308, 1.19380571e307),
(2.20362426e307, 1.58814671e308),
(1.06216028e308, 1.14258583e308),
(7.18031911e307, 1.69661213e308),
(7.91182553e307, 5.12892426e307),
(5.58470885e307, 9.13566765e306),
]
)
y = np.array([0, 0, 1, 0, 0, 0, 1, 0, 1, 0])
clf = svm.SVC()
msg = "Iterative parameter estimation led to non-finite values"
with pytest.raises(ValueError, match=msg):
clf.fit(X, y)
|
def test_svc_nonfinite_params():
# SVC must throw ValueError when dealing with non-finite parameter values
X = np.array(
[
(1.30830774e307, 6.02217328e307),
(1.54166067e308, 1.75812744e308),
(5.57938866e307, 4.13840113e307),
(1.36302835e308, 1.07968131e308),
(1.58772669e308, 1.19380571e307),
(2.20362426e307, 1.58814671e308),
(1.06216028e308, 1.14258583e308),
(7.18031911e307, 1.69661213e308),
(7.91182553e307, 5.12892426e307),
(5.58470885e307, 9.13566765e306),
]
)
y = np.array([0, 0, 1, 0, 0, 0, 1, 0, 1, 0])
clf = svm.SVC()
msg = "Iterative parameter estimation led to non-finite values"
with pytest.raises(ValueError, match=msg):
clf.fit(X, y)
|
14,917 |
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Emoncms sensor."""
apikey = config.get(CONF_API_KEY)
url = config.get(CONF_URL)
sensorid = config.get(CONF_ID)
value_template = config.get(CONF_VALUE_TEMPLATE)
unit_of_measurement = ""
exclude_feeds = config.get(CONF_EXCLUDE_FEEDID)
include_only_feeds = config.get(CONF_ONLY_INCLUDE_FEEDID)
sensor_names = config.get(CONF_SENSOR_NAMES)
interval = config.get(CONF_SCAN_INTERVAL)
if value_template is not None:
value_template.hass = hass
data = EmonCmsData(hass, url, apikey, interval)
data.update()
if data.data is None:
return False
sensors = []
for elem in data.data:
if exclude_feeds is not None:
if int(elem["id"]) in exclude_feeds:
continue
if include_only_feeds is not None:
if int(elem["id"]) not in include_only_feeds:
continue
name = None
if sensor_names is not None:
name = sensor_names.get(int(elem["id"]), None)
# Use specified Unit in preference to Emoncms Feed Unit else default unit
# For backward compatibility
if config.get(CONF_UNIT_OF_MEASUREMENT) != "":
unit_of_measurement = config.get(CONF_UNIT_OF_MEASUREMENT)
elif 'unit' in elem and elem["unit"] != "":
unit_of_measurement = elem["unit"]
else:
unit_of_measurement = DEFAULT_UNIT
sensors.append(
EmonCmsSensor(
hass,
data,
name,
value_template,
unit_of_measurement,
str(sensorid),
elem,
)
)
add_entities(sensors)
|
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Emoncms sensor."""
apikey = config.get(CONF_API_KEY)
url = config.get(CONF_URL)
sensorid = config.get(CONF_ID)
value_template = config.get(CONF_VALUE_TEMPLATE)
unit_of_measurement = ""
exclude_feeds = config.get(CONF_EXCLUDE_FEEDID)
include_only_feeds = config.get(CONF_ONLY_INCLUDE_FEEDID)
sensor_names = config.get(CONF_SENSOR_NAMES)
interval = config.get(CONF_SCAN_INTERVAL)
if value_template is not None:
value_template.hass = hass
data = EmonCmsData(hass, url, apikey, interval)
data.update()
if data.data is None:
return False
sensors = []
for elem in data.data:
if exclude_feeds is not None:
if int(elem["id"]) in exclude_feeds:
continue
if include_only_feeds is not None:
if int(elem["id"]) not in include_only_feeds:
continue
name = None
if sensor_names is not None:
name = sensor_names.get(int(elem["id"]), None)
# Use specified Unit in preference to Emoncms Feed Unit else default unit
# For backward compatibility
if config.get(CONF_UNIT_OF_MEASUREMENT) != "":
unit_of_measurement = config.get(CONF_UNIT_OF_MEASUREMENT)
if elem.get("unit"):
unit_of_measurement = elem["unit"]
else:
unit_of_measurement = DEFAULT_UNIT
sensors.append(
EmonCmsSensor(
hass,
data,
name,
value_template,
unit_of_measurement,
str(sensorid),
elem,
)
)
add_entities(sensors)
|
44,515 |
def filter_docstring_args(
signature: inspect.Signature,
docstring: str,
is_init_signature: bool = False,
) -> Dict[str, str]:
"""Removes unused params from docstring Args section.
Args:
signature (inspect.Signature): Model Builder SDK Method Signature.
docstring (str): Model Builder SDK Method docstring from method.__doc__
is_init_signature (bool): is this constructor signature
Returns:
Dictionary of Arg names as keys and descriptions as values.
"""
try:
parsed_docstring = docstring_parser.parse(docstring)
except ValueError:
return {}
args_dict = {p.arg_name: p.description for p in parsed_docstring.params}
new_args_dict = {}
for param in signature.parameters.values():
if param.name not in PARAMS_TO_REMOVE:
new_arg_name = param.name
# change resource name signatures to resource types
# to match new param.names ie: model_name -> model
if is_init_signature and is_resource_name_parameter_name(param.name
):
new_arg_name = param.name[:-len('_name')]
# check if there was an arg description for this parameter.
if args_dict.get(param.name, False):
new_args_dict[new_arg_name] = args_dict.get(param.name)
return new_args_dict
|
def filter_docstring_args(
signature: inspect.Signature,
docstring: str,
is_init_signature: bool = False,
) -> Dict[str, str]:
"""Removes unused params from docstring Args section.
Args:
signature (inspect.Signature): Model Builder SDK Method Signature.
docstring (str): Model Builder SDK Method docstring from method.__doc__
is_init_signature (bool): is this constructor signature
Returns:
Dictionary of Arg names as keys and descriptions as values.
"""
try:
parsed_docstring = docstring_parser.parse(docstring)
except ValueError:
return {}
args_dict = {p.arg_name: p.description for p in parsed_docstring.params}
new_args_dict = {}
for param in signature.parameters.values():
if param.name not in PARAMS_TO_REMOVE:
new_arg_name = param.name
# change resource name signatures to resource types
# to match new param.names ie: model_name -> model
if is_init_signature and is_resource_name_parameter_name(param.name
):
new_arg_name = param.name[:-len('_name')]
# check if there was an arg description for this parameter.
if args_dict.get(param.name):
new_args_dict[new_arg_name] = args_dict.get(param.name)
return new_args_dict
|
2,548 |
def test_make_blobs_memory_usage():
try:
import memory_profiler
has_memory_profiler = True
except:
has_memory_profiler = False
if not has_memory_profiler:
pytest.skip("memory_profiler is not available.")
blobs_opts = {
"n_samples": 10 ** 4,
"n_features": 10 ** 4,
"centers": 10,
"random_state": 10,
"return_centers": True,
"shuffle": False,
}
# maximum memory usage in MB
actual_memory_usage, (X, y, c) = memory_profiler.memory_usage(
(partial(make_blobs, **blobs_opts), ()),
max_iterations=1,
max_usage=True,
retval=True,
)
memory_usage_X = (
blobs_opts["n_samples"] * blobs_opts["n_features"] * X.dtype.itemsize
)
memory_usage_y = blobs_opts["n_samples"] * y.dtype.itemsize
memory_usage_c = blobs_opts["centers"] * blobs_opts["n_features"] * c.dtype.itemsize
calc_memory_useage_mb = (memory_usage_X + memory_usage_y + memory_usage_c) / 1048576
# make sure actual memory usage is relatively close to theratical amount
assert actual_memory_usage < calc_memory_useage_mb * 1.3
|
def test_make_blobs_memory_usage():
try:
import memory_profiler
has_memory_profiler = True
except:
has_memory_profiler = False
if not has_memory_profiler:
pytest.skip("memory_profiler is not available.")
blobs_opts = {
"n_samples": 10 ** 4,
"n_features": 10 ** 4,
"centers": 10,
"random_state": 10,
"return_centers": True,
"shuffle": False,
}
# maximum memory usage in MiB
actual_memory_usage, (X, y, c) = memory_profiler.memory_usage(
(partial(make_blobs, **blobs_opts), ()),
max_iterations=1,
max_usage=True,
retval=True,
)
memory_usage_X = (
blobs_opts["n_samples"] * blobs_opts["n_features"] * X.dtype.itemsize
)
memory_usage_y = blobs_opts["n_samples"] * y.dtype.itemsize
memory_usage_c = blobs_opts["centers"] * blobs_opts["n_features"] * c.dtype.itemsize
calc_memory_useage_mb = (memory_usage_X + memory_usage_y + memory_usage_c) / 1048576
# make sure actual memory usage is relatively close to theratical amount
assert actual_memory_usage < calc_memory_useage_mb * 1.3
|
30,110 |
def test_multi_index_load_from_directory_3(runtmp):
# check that force=False fails properly when confrunted with non-JSON
# files that are legit sourmash files...
c = runtmp
dirname = utils.get_test_data('prot')
count = 0
for root, dirs, files in os.walk(dirname):
for name in files:
print(f"at {name}")
fullname = os.path.join(root, name)
copyto = c.output(f"file{count}.sig")
shutil.copyfile(fullname, copyto)
count += 1
with pytest.raises(ValueError):
mi = MultiIndex.load_from_directory(c.location, force=False)
|
def test_multi_index_load_from_directory_3(runtmp):
# check that force=False fails properly when confronted with non-JSON
# files that are legit sourmash files...
c = runtmp
dirname = utils.get_test_data('prot')
count = 0
for root, dirs, files in os.walk(dirname):
for name in files:
print(f"at {name}")
fullname = os.path.join(root, name)
copyto = c.output(f"file{count}.sig")
shutil.copyfile(fullname, copyto)
count += 1
with pytest.raises(ValueError):
mi = MultiIndex.load_from_directory(c.location, force=False)
|
39,366 |
def to_surf_mesh(actor, surf, mapper, prop, add_attr={}):
"""Convert a pyvista surface to a buffer geometry.
General Notes
-------------
* THREE.BufferGeometry expects position and index attributes
representing a triangulated mesh points and face indices or just
a position array representing individual faces of a mesh.
* The normals attribute is needed for physically based rendering,
but not for the other mesh types.
* Colors must be a RGB array with one value per point.
Shading Notes
-------------
To match VTK, the following materials are used to match VTK's shading:
* MeshPhysicalMaterial when physically based rendering is enabled
* MeshPhongMaterial when physically based rendering is disabled,
but lighting is enabled.
* MeshBasicMaterial when lighting is disabled.
"""
# convert to an all-triangular surface
if surf.is_all_triangles():
trimesh = surf
else:
trimesh = surf.triangulate()
position = array_to_float_buffer(trimesh.points)
# convert to minimum index type
face_ind = trimesh.faces.reshape(-1, 4)[:, 1:]
index = cast_to_min_size(face_ind, trimesh.n_points)
attr = {'position': position,
'index': index,
}
if prop.GetInterpolation(): # something other than flat shading
attr['normal'] = buffer_normals(trimesh)
# extract point/cell scalars for coloring
colors = None
scalar_mode = mapper.GetScalarModeAsString()
if scalar_mode == 'UsePointData':
colors = map_scalars(mapper, trimesh.point_data.active_scalars)
elif scalar_mode == 'UseCellData':
# special handling for RGBA
if mapper.GetColorMode() == 2:
scalars = trimesh.cell_data.active_scalars.repeat(3, axis=0)
scalars = scalars.astype(np.float32, copy=False)
colors = scalars[:, :3]/255 # ignore alpha
else:
# must repeat for each triangle
scalars = trimesh.cell_data.active_scalars.repeat(3)
colors = map_scalars(mapper, scalars)
position = array_to_float_buffer(trimesh.points[face_ind])
attr = {'position': position}
# add colors to the buffer geometry attributes
if colors is not None:
attr['color'] = array_to_float_buffer(colors)
# texture coordinates
t_coords = trimesh.active_t_coords
if t_coords is not None:
attr['uv'] = array_to_float_buffer(t_coords)
# TODO: Convert PBR textures
# base_color_texture = prop.GetTexture("albedoTex")
# orm_texture = prop.GetTexture("materialTex")
# anisotropy_texture = prop.GetTexture("anisotropyTex")
# normal_texture = prop.GetTexture("normalTex")
# emissive_texture = prop.GetTexture("emissiveTex")
# coatnormal_texture = prop.GetTexture("coatNormalTex")
if prop.GetNumberOfTextures(): # pragma: no cover
warnings.warn('pythreejs converter does not support PBR textures (yet).')
# create base buffer geometry
surf_geo = tjs.BufferGeometry(attributes=attr)
# add texture to the surface buffer if available
texture = actor.GetTexture()
tjs_texture = None
if texture is not None:
wrapped_tex = pv.wrap(texture.GetInput())
data = wrapped_tex.active_scalars
dim = (wrapped_tex.dimensions[0],
wrapped_tex.dimensions[1],
data.shape[1])
data = data.reshape(dim)
fmt = "RGBFormat" if data.shape[1] == 3 else "RGBAFormat"
# Create data texture and catch invalid warning
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="Given trait value dtype")
tjs_texture = tjs.DataTexture(data=data,
format="RGBFormat",
type="UnsignedByteType")
# these attributes are always used regardless of the material
shared_attr = {
'vertexColors': get_coloring(mapper, trimesh),
'wireframe': prop.GetRepresentation() == 1,
'opacity': prop.GetOpacity(),
'wireframeLinewidth': prop.GetLineWidth(),
# 'side': 'DoubleSide' # enabling seems to mess with textures
}
if colors is None:
shared_attr['color'] = color_to_hex(prop.GetColor())
if tjs_texture is not None:
shared_attr['map'] = tjs_texture
else:
shared_attr['side'] = 'DoubleSide'
if prop.GetOpacity() < 1.0:
shared_attr['transparent'] = True
if prop.GetInterpolation() == 3: # using physically based rendering
material = tjs.MeshPhysicalMaterial(flatShading=False,
roughness=prop.GetRoughness(),
metalness=prop.GetMetallic(),
reflectivity=0,
**shared_attr, **add_attr)
elif prop.GetLighting():
# specular disabled to fix lighting issues
material = tjs.MeshPhongMaterial(shininess=0,
flatShading=prop.GetInterpolation() == 0,
specular=color_to_hex((0, 0, 0)),
reflectivity=0,
**shared_attr,
**add_attr)
else: # no lighting
material = tjs.MeshBasicMaterial(**shared_attr, **add_attr)
return tjs.Mesh(geometry=surf_geo, material=material)
|
def to_surf_mesh(actor, surf, mapper, prop, add_attr={}):
"""Convert a pyvista surface to a buffer geometry.
General Notes
-------------
* THREE.BufferGeometry expects position and index attributes
representing a triangulated mesh points and face indices or just
a position array representing individual faces of a mesh.
* The normals attribute is needed for physically based rendering,
but not for the other mesh types.
* Colors must be a RGB array with one value per point.
Shading Notes
-------------
To match VTK, the following materials are used to match VTK's shading:
* MeshPhysicalMaterial when physically based rendering is enabled
* MeshPhongMaterial when physically based rendering is disabled,
but lighting is enabled.
* MeshBasicMaterial when lighting is disabled.
"""
# convert to an all-triangular surface
if surf.is_all_triangles():
trimesh = surf
else:
trimesh = surf.triangulate()
position = array_to_float_buffer(trimesh.points)
# convert to minimum index type
face_ind = trimesh.faces.reshape(-1, 4)[:, 1:]
index = cast_to_min_size(face_ind, trimesh.n_points)
attr = {'position': position,
'index': index,
}
if prop.GetInterpolation(): # something other than flat shading
attr['normal'] = buffer_normals(trimesh)
# extract point/cell scalars for coloring
colors = None
scalar_mode = mapper.GetScalarModeAsString()
if scalar_mode == 'UsePointData':
colors = map_scalars(mapper, trimesh.point_data.active_scalars)
elif scalar_mode == 'UseCellData':
# special handling for RGBA
if mapper.GetColorMode() == 2:
scalars = trimesh.cell_data.active_scalars.repeat(3, axis=0)
scalars = scalars.astype(np.float32, copy=False)
colors = scalars[:, :3]/255 # ignore alpha
else:
# must repeat for each triangle
scalars = trimesh.cell_data.active_scalars.repeat(3)
colors = map_scalars(mapper, scalars)
position = array_to_float_buffer(trimesh.points[face_ind])
attr = {'position': position}
# add colors to the buffer geometry attributes
if colors is not None:
attr['color'] = array_to_float_buffer(colors)
# texture coordinates
t_coords = trimesh.active_t_coords
if t_coords is not None:
attr['uv'] = array_to_float_buffer(t_coords)
# TODO: Convert PBR textures
# base_color_texture = prop.GetTexture("albedoTex")
# orm_texture = prop.GetTexture("materialTex")
# anisotropy_texture = prop.GetTexture("anisotropyTex")
# normal_texture = prop.GetTexture("normalTex")
# emissive_texture = prop.GetTexture("emissiveTex")
# coatnormal_texture = prop.GetTexture("coatNormalTex")
if prop.GetNumberOfTextures(): # pragma: no cover
warnings.warn('pythreejs converter does not support PBR textures (yet).')
# create base buffer geometry
surf_geo = tjs.BufferGeometry(attributes=attr)
# add texture to the surface buffer if available
texture = actor.GetTexture()
tjs_texture = None
if texture is not None:
wrapped_tex = pv.wrap(texture.GetInput())
data = wrapped_tex.active_scalars
dim = (wrapped_tex.dimensions[0],
wrapped_tex.dimensions[1],
data.shape[1])
data = data.reshape(dim)
fmt = "RGBFormat" if data.shape[1] == 3 else "RGBAFormat"
# Create data texture and catch invalid warning
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="Given trait value dtype")
tjs_texture = tjs.DataTexture(data=data,
format="RGBFormat",
type="UnsignedByteType")
# these attributes are always used regardless of the material
shared_attr = {
'vertexColors': get_coloring(mapper, trimesh),
'wireframe': prop.GetRepresentation() == 1,
'opacity': prop.GetOpacity(),
'wireframeLinewidth': prop.GetLineWidth(),
# 'side': 'DoubleSide' # enabling seems to mess with textures
}
if colors is None:
shared_attr['color'] = color_to_hex(prop.GetColor())
if tjs_texture is not None:
shared_attr['map'] = tjs_texture
else:
shared_attr['side'] = 'DoubleSide'
if prop.GetOpacity() < 1.0:
shared_attr['transparent'] = True
if prop.GetInterpolation() == 3: # using physically based rendering
material = tjs.MeshPhysicalMaterial(flatShading=False,
roughness=prop.GetRoughness(),
metalness=prop.GetMetallic(),
reflectivity=0,
**shared_attr, **add_attr)
elif prop.GetLighting():
# specular disabled to fix lighting issues
material = tjs.MeshPhongMaterial(shininess=0,
flatShading=prop.GetInterpolation() == 0,
specular=color_to_hex((0, 0, 0)),
reflectivity=0,
**shared_attr,
**add_attr)
else: # no lighting
material = tjs.MeshBasicMaterial(**shared_attr, **add_attr)
return tjs.Mesh(geometry=surf_geo, material=material)
|
27,458 |
def run_conda_forge_specific(meta, recipe_dir, lints, hints):
gh = github.Github(os.environ["GH_TOKEN"])
package_section = get_section(meta, "package", lints)
extra_section = get_section(meta, "extra", lints)
sources_section = get_section(meta, "source", lints)
recipe_dirname = os.path.basename(recipe_dir) if recipe_dir else "recipe"
recipe_name = package_section.get("name", "").strip()
is_staged_recipes = recipe_dirname != "recipe"
# 1: Check that the recipe does not exist in conda-forge or bioconda
if is_staged_recipes and recipe_name:
cf = gh.get_user(os.getenv("GH_ORG", "conda-forge"))
for name in set(
[
recipe_name,
recipe_name.replace("-", "_"),
recipe_name.replace("_", "-"),
]
):
try:
if cf.get_repo("{}-feedstock".format(name)):
existing_recipe_name = name
feedstock_exists = True
break
else:
feedstock_exists = False
except github.UnknownObjectException as e:
feedstock_exists = False
if feedstock_exists and existing_recipe_name == recipe_name:
lints.append("Feedstock with the same name exists in conda-forge.")
elif feedstock_exists:
hints.append(
"Feedstock with the name {} exists in conda-forge. Is it the same as this package ({})?".format(
existing_recipe_name,
recipe_name,
)
)
bio = gh.get_user("bioconda").get_repo("bioconda-recipes")
try:
bio.get_dir_contents("recipes/{}".format(recipe_name))
except github.UnknownObjectException as e:
pass
else:
hints.append(
"Recipe with the same name exists in bioconda: "
"please discuss with @conda-forge/bioconda-recipes."
)
url = None
for source_section in sources_section:
if (
"url" in source_section
and isinstance(source_section["url"], str)
and source_section["url"].startswith(
"https://pypi.io/packages/source/"
)
):
url = source_section["url"]
if url:
# get pypi name from urls like "https://pypi.io/packages/source/b/build/build-0.4.0.tar.gz"
pypi_name = url.split("/")[6]
mapping_request = requests.get(
"https://raw.githubusercontent.com/regro/cf-graph-countyfair/master/mappings/pypi/name_mapping.yaml"
)
if mapping_request.status_code == 200:
mapping_raw_yaml = mapping_request.content
mapping = get_yaml().load(mapping_raw_yaml)
for pkg in mapping:
if pkg.get("pypi_name", "") == pypi_name:
conda_name = pkg["conda_name"]
hints.append(
f"A conda package with same name ({conda_name}) already exists."
)
# 2: Check that the recipe maintainers exists:
maintainers = extra_section.get("recipe-maintainers", [])
for maintainer in maintainers:
if "/" in maintainer:
# It's a team. Checking for existence is expensive. Skip for now
continue
try:
gh.get_user(maintainer)
except github.UnknownObjectException as e:
lints.append(
'Recipe maintainer "{}" does not exist'.format(maintainer)
)
# 3: if the recipe dir is inside the example dir
if recipe_dir is not None and "recipes/example/" in recipe_dir:
lints.append(
"Please move the recipe out of the example dir and "
"into its own dir."
)
# 4: Do not delete example recipe
if is_staged_recipes and recipe_dir is not None:
example_meta_fname = os.path.abspath(
os.path.join(recipe_dir, "..", "example", "meta.yaml")
)
if not os.path.exists(example_meta_fname):
msg = (
"Please do not delete the example recipe found in "
"`recipes/example/meta.yaml`."
)
if msg not in lints:
lints.append(msg)
|
def run_conda_forge_specific(meta, recipe_dir, lints, hints):
gh = github.Github(os.environ["GH_TOKEN"])
package_section = get_section(meta, "package", lints)
extra_section = get_section(meta, "extra", lints)
sources_section = get_section(meta, "source", lints)
recipe_dirname = os.path.basename(recipe_dir) if recipe_dir else "recipe"
recipe_name = package_section.get("name", "").strip()
is_staged_recipes = recipe_dirname != "recipe"
# 1: Check that the recipe does not exist in conda-forge or bioconda
if is_staged_recipes and recipe_name:
cf = gh.get_user(os.getenv("GH_ORG", "conda-forge"))
for name in set(
[
recipe_name,
recipe_name.replace("-", "_"),
recipe_name.replace("_", "-"),
]
):
try:
if cf.get_repo("{}-feedstock".format(name)):
existing_recipe_name = name
feedstock_exists = True
break
else:
feedstock_exists = False
except github.UnknownObjectException as e:
feedstock_exists = False
if feedstock_exists and existing_recipe_name == recipe_name:
lints.append("Feedstock with the same name exists in conda-forge.")
elif feedstock_exists:
hints.append(
"Feedstock with the name {} exists in conda-forge. Is it the same as this package ({})?".format(
existing_recipe_name,
recipe_name,
)
)
bio = gh.get_user("bioconda").get_repo("bioconda-recipes")
try:
bio.get_dir_contents("recipes/{}".format(recipe_name))
except github.UnknownObjectException as e:
pass
else:
hints.append(
"Recipe with the same name exists in bioconda: "
"please discuss with @conda-forge/bioconda-recipes."
)
url = None
for source_section in sources_section:
if str(source_section.get("url")).startswith("https://pypi.io/packages/source/"):
url = source_section["url"]
if url:
# get pypi name from urls like "https://pypi.io/packages/source/b/build/build-0.4.0.tar.gz"
pypi_name = url.split("/")[6]
mapping_request = requests.get(
"https://raw.githubusercontent.com/regro/cf-graph-countyfair/master/mappings/pypi/name_mapping.yaml"
)
if mapping_request.status_code == 200:
mapping_raw_yaml = mapping_request.content
mapping = get_yaml().load(mapping_raw_yaml)
for pkg in mapping:
if pkg.get("pypi_name", "") == pypi_name:
conda_name = pkg["conda_name"]
hints.append(
f"A conda package with same name ({conda_name}) already exists."
)
# 2: Check that the recipe maintainers exists:
maintainers = extra_section.get("recipe-maintainers", [])
for maintainer in maintainers:
if "/" in maintainer:
# It's a team. Checking for existence is expensive. Skip for now
continue
try:
gh.get_user(maintainer)
except github.UnknownObjectException as e:
lints.append(
'Recipe maintainer "{}" does not exist'.format(maintainer)
)
# 3: if the recipe dir is inside the example dir
if recipe_dir is not None and "recipes/example/" in recipe_dir:
lints.append(
"Please move the recipe out of the example dir and "
"into its own dir."
)
# 4: Do not delete example recipe
if is_staged_recipes and recipe_dir is not None:
example_meta_fname = os.path.abspath(
os.path.join(recipe_dir, "..", "example", "meta.yaml")
)
if not os.path.exists(example_meta_fname):
msg = (
"Please do not delete the example recipe found in "
"`recipes/example/meta.yaml`."
)
if msg not in lints:
lints.append(msg)
|
33,110 |
def edge_colors(
et: pd.DataFrame,
nt: pd.Series,
color_by: Hashable,
node_color_by: Hashable,
):
"""Default edge line color function."""
if color_by in ("source_node_color", "target_node_color"):
edge_select_by = color_by.split("_")[0]
return encodings.data_color(
et[edge_select_by].apply(nt[node_color_by].get),
nt[node_color_by],
)
elif color_by:
return encodings.data_color(et[color_by], et[color_by])
return pd.Series(["black"] * len(et), name="color_by")
|
def edge_colors(
et: pd.DataFrame,
nt: pd.DataFrame,
color_by: Hashable,
node_color_by: Hashable,
):
"""Default edge line color function."""
if color_by in ("source_node_color", "target_node_color"):
edge_select_by = color_by.split("_")[0]
return encodings.data_color(
et[edge_select_by].apply(nt[node_color_by].get),
nt[node_color_by],
)
elif color_by:
return encodings.data_color(et[color_by], et[color_by])
return pd.Series(["black"] * len(et), name="color_by")
|
5,057 |
def test_subfigure_spanning():
# test that subfigures get laid out properly...
fig = plt.figure(constrained_layout=True)
gs = fig.add_gridspec(3, 3)
sub_figs = []
sub_figs += [fig.add_subfigure(gs[0, 0])]
sub_figs += [fig.add_subfigure(gs[0:2, 1])]
sub_figs += [fig.add_subfigure(gs[2, 1:3])]
w = 640
h = 480
minp = sub_figs[0].bbox.min
exp = np.array([0., h*2/3])
np.testing.assert_allclose(minp, exp)
maxp = sub_figs[0].bbox.max
exp = np.array([w / 3, h])
np.testing.assert_allclose(maxp, exp)
minp = sub_figs[1].bbox.min
exp = np.array([w / 3, h * 1 / 3])
np.testing.assert_allclose(minp, exp)
maxp = sub_figs[1].bbox.max
exp = np.array([w * 2 / 3, h])
np.testing.assert_allclose(maxp, exp)
minp = sub_figs[2].bbox.min
exp = np.array([w / 3, 0])
np.testing.assert_allclose(minp, exp)
maxp = sub_figs[2].bbox.max
exp = np.array([w, h * 1 / 3])
np.testing.assert_allclose(maxp, exp)
|
def test_subfigure_spanning():
# test that subfigures get laid out properly...
fig = plt.figure(constrained_layout=True)
gs = fig.add_gridspec(3, 3)
sub_figs = []
sub_figs += [fig.add_subfigure(gs[0, 0])]
sub_figs += [fig.add_subfigure(gs[0:2, 1])]
sub_figs += [fig.add_subfigure(gs[2, 1:3])]
w = 640
h = 480
minp = sub_figs[0].bbox.min
exp = np.array([0., h * 2/3])
np.testing.assert_allclose(minp, exp)
maxp = sub_figs[0].bbox.max
exp = np.array([w / 3, h])
np.testing.assert_allclose(maxp, exp)
minp = sub_figs[1].bbox.min
exp = np.array([w / 3, h * 1 / 3])
np.testing.assert_allclose(minp, exp)
maxp = sub_figs[1].bbox.max
exp = np.array([w * 2 / 3, h])
np.testing.assert_allclose(maxp, exp)
minp = sub_figs[2].bbox.min
exp = np.array([w / 3, 0])
np.testing.assert_allclose(minp, exp)
maxp = sub_figs[2].bbox.max
exp = np.array([w, h * 1 / 3])
np.testing.assert_allclose(maxp, exp)
|
4,072 |
def resource_find(filename, use_cache=True):
'''Search for a resource in the list of paths.
Use resource_add_path to add a custom path to the search.
By default, results are cached for 60 seconds.
This can be disabled using use_cache=False.
.. versionchanged:: 2.0.0rc5
A default cache and the `use_cache` parameter were added.
'''
if not filename:
return
found_filename = None
if use_cache:
found_filename = Cache.get('kv.resourcefind', filename)
if found_filename:
return found_filename
if filename[:8] == 'atlas://':
found_filename = filename
else:
abspath_filename = abspath(filename)
if exists(abspath_filename):
found_filename = abspath(filename)
else:
for path in reversed(resource_paths):
abspath_filename = abspath(join(path, filename))
if exists(abspath_filename):
found_filename = abspath_filename
break
if not found_filename and filename.startswith("data:"):
found_filename = filename
if use_cache:
Cache.append('kv.resourcefind', filename, found_filename)
return found_filename
|
def resource_find(filename, use_cache=True):
'''Search for a resource in the list of paths.
Use resource_add_path to add a custom path to the search.
By default, results are cached for 60 seconds.
This can be disabled using use_cache=False.
.. versionchanged:: 2.0.0
A default cache and the `use_cache` parameter were added.
'''
if not filename:
return
found_filename = None
if use_cache:
found_filename = Cache.get('kv.resourcefind', filename)
if found_filename:
return found_filename
if filename[:8] == 'atlas://':
found_filename = filename
else:
abspath_filename = abspath(filename)
if exists(abspath_filename):
found_filename = abspath(filename)
else:
for path in reversed(resource_paths):
abspath_filename = abspath(join(path, filename))
if exists(abspath_filename):
found_filename = abspath_filename
break
if not found_filename and filename.startswith("data:"):
found_filename = filename
if use_cache:
Cache.append('kv.resourcefind', filename, found_filename)
return found_filename
|
42,907 |
def graph_embed_deprecated(A, max_mean_photon=1.0, make_traceless=False, rtol=1e-05, atol=1e-08):
r"""Embed a graph into a Gaussian state.
Given a graph in terms of a symmetric adjacency matrix
(in general with arbitrary complex off-diagonal and real diagonal entries),
returns the squeezing parameters and interferometer necessary for
creating the Gaussian state whose off-diagonal parts are proportional to that matrix.
Uses :func:`takagi`.
Args:
A (array[complex]): square, symmetric (weighted) adjacency matrix of the graph
max_mean_photon (float): Threshold value. It guarantees that the mode with
the largest squeezing has ``max_mean_photon`` as the mean photon number
i.e., :math:`sinh(r_{max})^2 ==` ``max_mean_photon``.
make_traceless (bool): Removes the trace of the input matrix, by performing the transformation
:math:`\tilde{A} = A-\mathrm{tr}(A) \I/n`. This may reduce the amount of squeezing needed to encode
the graph.
rtol (float): relative tolerance used when checking if the input matrix is symmetric.
atol (float): absolute tolerance used when checking if the input matrix is symmetric.
Returns:
tuple[array, array]: squeezing parameters of the input
state to the interferometer, and the unitary matrix representing the interferometer
"""
(m, n) = A.shape
if m != n:
raise ValueError("The matrix is not square.")
if not np.allclose(A, np.transpose(A), rtol=rtol, atol=atol):
raise ValueError("The matrix is not symmetric.")
if make_traceless:
A = A - np.trace(A) * np.identity(n) / n
s, U = takagi(A, tol=atol)
sc = np.sqrt(1.0 + 1.0 / max_mean_photon)
vals = -np.arctanh(s / (s[0] * sc))
return vals, U
|
def graph_embed_deprecated(A, max_mean_photon=1.0, make_traceless=False, rtol=1e-05, atol=1e-08):
r"""Embed a graph into a Gaussian state.
Given a graph in terms of a symmetric adjacency matrix
(in general with arbitrary complex off-diagonal and real diagonal entries),
returns the squeezing parameters and interferometer necessary for
creating the Gaussian state whose off-diagonal parts are proportional to that matrix.
Uses :func:`takagi`.
Args:
A (array[complex]): square, symmetric (weighted) adjacency matrix of the graph
max_mean_photon (float): Threshold value. It guarantees that the mode with
the largest squeezing has ``max_mean_photon`` as the mean photon number
i.e., :math:`sinh(r_{max})^2 ==` ``max_mean_photon``.
make_traceless (bool): Removes the trace of the input matrix, by performing the transformation
:math:`\tilde{A} = A-\mathrm{tr}(A) \I/n`. This may reduce the amount of squeezing needed to encode
the graph.
rtol (float): relative tolerance used when checking if the input matrix is symmetric.
atol (float): absolute tolerance used when checking if the input matrix is symmetric
Returns:
tuple[array, array]: squeezing parameters of the input
state to the interferometer, and the unitary matrix representing the interferometer
"""
(m, n) = A.shape
if m != n:
raise ValueError("The matrix is not square.")
if not np.allclose(A, np.transpose(A), rtol=rtol, atol=atol):
raise ValueError("The matrix is not symmetric.")
if make_traceless:
A = A - np.trace(A) * np.identity(n) / n
s, U = takagi(A, tol=atol)
sc = np.sqrt(1.0 + 1.0 / max_mean_photon)
vals = -np.arctanh(s / (s[0] * sc))
return vals, U
|
7,191 |
def test_resize_local_mean_nd():
for dim in range(1, 6):
shape = 2 + np.arange(dim) * 2
x = np.ones(shape)
out_shape = np.asarray(shape) * 1.5
resized = resize_local_mean(x, out_shape)
expected_shape = 1.5 * shape
assert_equal(resized.shape, expected_shape)
assert np.all(resized == 1)
|
def test_resize_local_mean_nd():
for dim in range(1, 6):
shape = 2 + np.arange(dim) * 2
x = np.ones(shape)
out_shape = np.asarray(shape) * 1.5
resized = resize_local_mean(x, out_shape)
expected_shape = 1.5 * shape
assert_equal(resized.shape, expected_shape)
assert_array_equal(resized, 1)
|
34,315 |
def validate_files(args) -> NoReturn:
"""Validate all files needed for training a model.
Fails with a non-zero exit code if there are any errors in the data."""
from rasa.core.validator import Validator
from rasa.importers.rasa import RasaFileImporter
loop = asyncio.get_event_loop()
file_importer = RasaFileImporter(
domain_path=args.domain, training_data_paths=args.data
)
validator = loop.run_until_complete(Validator.from_importer(file_importer))
domain_is_valid = validator.verify_domain_validity()
if not domain_is_valid:
sys.exit(1)
everything_is_alright = validator.verify_all(not args.fail_on_warnings)
if not args.max_history:
logger.info(
"Will not test for inconsistencies in stories since "
"you did not provide --max-history."
)
if args.max_history:
# Only run story structure validation if everything else is fine
# since this might take a while
everything_is_alright = validator.verify_story_structure(
not args.fail_on_warnings, max_history=args.max_history
)
sys.exit(0) if everything_is_alright else sys.exit(1)
|
def validate_files(args) -> NoReturn:
"""Validate all files needed for training a model.
Fails with a non-zero exit code if there are any errors in the data."""
from rasa.core.validator import Validator
from rasa.importers.rasa import RasaFileImporter
loop = asyncio.get_event_loop()
file_importer = RasaFileImporter(
domain_path=args.domain, training_data_paths=args.data
)
validator = loop.run_until_complete(Validator.from_importer(file_importer))
domain_is_valid = validator.verify_domain_validity()
if not domain_is_valid:
sys.exit(1)
everything_is_alright = validator.verify_all(not args.fail_on_warnings)
if not args.max_history:
logger.info(
"Will not test for inconsistencies in stories since "
"you did not provide --max-history."
)
else:
# Only run story structure validation if everything else is fine
# since this might take a while
everything_is_alright = validator.verify_story_structure(
not args.fail_on_warnings, max_history=args.max_history
)
sys.exit(0) if everything_is_alright else sys.exit(1)
|
43,047 |
def TimeEvolution(t: float, w: np.ndarray):
r"""Generates a custom ``sf`` operation for performing the transformation
:math:`e^{-i\hat{H}t/\hbar}` on a given state.
The custom operation returned by this function can be used as part of a Strawberry Fields
:class:`~.Program` just like any other operation from the :mod:`~.ops` module.
**Example usage:**
>>> modes = 2
>>> p = sf.Program(modes)
>>> with p.context as q:
>>> sf.ops.Fock(1) | q[0]
>>> sf.ops.Interferometer(Ul.T) | q
>>> TimeEvolution(t, w) | q
>>> sf.ops.Interferometer(Ul) | q
Args:
t (float): time in femtoseconds
w (array): normal mode frequencies :math:`\omega` in units of :math:`\mbox{cm}^{-1}` that
compose the Hamiltonian :math:`\hat{H} = \sum_i \hbar \omega_i a_i^\dagger a_i`
Returns:
an ``sf`` operation for enacting the dynamics transformation
Return type:
op
"""
# pylint: disable=expression-not-assigned
n_modes = len(w)
@operation(n_modes)
def op(q):
theta = -w * 100.0 * c * 1.0e-15 * t * (2.0 * pi)
for i in range(n_modes):
sf.ops.Rgate(theta[i]) | q[i]
return op()
|
def TimeEvolution(t: float, w: np.ndarray):
r"""Generates a custom ``sf`` operation for performing the transformation
:math:`e^{-i\hat{H}t/\hbar}` on a given state.
This operation can be used as part of a Strawberry Fields :class:`~.Program` just like any
:class:`~.Program` just like any other operation from the :mod:`~.ops` module.
**Example usage:**
>>> modes = 2
>>> p = sf.Program(modes)
>>> with p.context as q:
>>> sf.ops.Fock(1) | q[0]
>>> sf.ops.Interferometer(Ul.T) | q
>>> TimeEvolution(t, w) | q
>>> sf.ops.Interferometer(Ul) | q
Args:
t (float): time in femtoseconds
w (array): normal mode frequencies :math:`\omega` in units of :math:`\mbox{cm}^{-1}` that
compose the Hamiltonian :math:`\hat{H} = \sum_i \hbar \omega_i a_i^\dagger a_i`
Returns:
an ``sf`` operation for enacting the dynamics transformation
Return type:
op
"""
# pylint: disable=expression-not-assigned
n_modes = len(w)
@operation(n_modes)
def op(q):
theta = -w * 100.0 * c * 1.0e-15 * t * (2.0 * pi)
for i in range(n_modes):
sf.ops.Rgate(theta[i]) | q[i]
return op()
|
45,640 |
def decode_description(description):
"""
Parses the first line of a FASTA file using the specifications of
several different database headers (in _databases).
:param (string) description: The header line with the initial '>'
removed.
:rtype (dict): A dictionary for which each key-value pair comprises
a property specified by the database used and the
value of that property given by the header. If the
database is not recognized, the keys are given as
'desc-n' where n is the position of the property.
"""
if len(description) == 0:
return {'-1': 'no description'}
decoded = {}
desc = description.split('|')
if desc[0] in _databases:
db_info = _databases[desc[0]]
if desc[0] in ['sp', 'tr']:
decoded['accession'] = desc[1]
# using regex to get the other information
rs = re.search(
r'([^\s]+)(.*)\ OS=(.*)\ OX=(.*)\ GN=(.*)\ PE=(.*)\ SV=(.*)$',
string=desc[2]
)
for i in range(2, len(db_info)):
decoded[db_info[i]] = rs.group(i)
else:
# shift by one, since first section in header describes
# the database
for i in range(len(desc)-1):
decoded[db_info[i]] = desc[i+1]
else:
if len(desc) > 1:
for i in range(len(desc)-1):
decoded[str(i)] = desc[i+1]
else:
decoded['Header'] = desc[0]
return decoded
|
def decode_description(description):
"""
Parse the first line of a FASTA file using the specifications of
several different database headers (in _databases).
:param (string) description: The header line with the initial '>'
removed.
:rtype (dict): A dictionary for which each key-value pair comprises
a property specified by the database used and the
value of that property given by the header. If the
database is not recognized, the keys are given as
'desc-n' where n is the position of the property.
"""
if len(description) == 0:
return {'-1': 'no description'}
decoded = {}
desc = description.split('|')
if desc[0] in _databases:
db_info = _databases[desc[0]]
if desc[0] in ['sp', 'tr']:
decoded['accession'] = desc[1]
# using regex to get the other information
rs = re.search(
r'([^\s]+)(.*)\ OS=(.*)\ OX=(.*)\ GN=(.*)\ PE=(.*)\ SV=(.*)$',
string=desc[2]
)
for i in range(2, len(db_info)):
decoded[db_info[i]] = rs.group(i)
else:
# shift by one, since first section in header describes
# the database
for i in range(len(desc)-1):
decoded[db_info[i]] = desc[i+1]
else:
if len(desc) > 1:
for i in range(len(desc)-1):
decoded[str(i)] = desc[i+1]
else:
decoded['Header'] = desc[0]
return decoded
|
14,334 |
def test(glyphsets, glyphs=None, names=None):
if names is None:
names = glyphsets
if glyphs is None:
glyphs = glyphsets[0].keys()
hist = []
for glyph_name in glyphs:
#print()
#print(glyph_name)
try:
allVectors = []
allNodeTypes = []
for glyphset,name in zip(glyphsets, names):
#print('.', end='')
glyph = glyphset[glyph_name]
perContourPen = PerContourOrComponentPen(RecordingPen, glyphset=glyphset)
glyph.draw(perContourPen)
contourPens = perContourPen.value
del perContourPen
contourVectors = []
nodeTypes = []
allNodeTypes.append(nodeTypes)
allVectors.append(contourVectors)
for contour in contourPens:
nodeTypes.append(tuple([ instruction[0] for instruction in contour.value ]))
stats = StatisticsPen(glyphset=glyphset)
contour.replay(stats)
size = abs(stats.area) ** .5 * .5
vector = (
int(size),
int(stats.meanX),
int(stats.meanY),
int(stats.stddevX * 2),
int(stats.stddevY * 2),
int(stats.correlation * size),
)
contourVectors.append(vector)
#print(vector)
# Check each master against the next one in the list.
for i,(m0,m1) in enumerate(zip(allNodeTypes[:-1],allNodeTypes[1:])):
if len(m0) != len(m1):
print('%s: %s+%s: Glyphs not compatible (wrong number of paths %i+%i)!!!!!' % (glyph_name, names[i], names[i+1], len(m0), len(m1)))
if m0 == m1:
continue
for pathIx, (nodes1, nodes2) in enumerate(zip(m0,m1)):
if nodes1 == nodes2:
continue
print('%s: %s+%s: Glyphs not compatible at path %i!!!!!' % (glyph_name, names[i], names[i+1], pathIx))
if len(nodes1) != len(nodes2):
print("%s has %i nodes, %s has %i nodes" % (names[i], len(nodes1), names[i+1], len(nodes2)))
continue
for nodeIx, (n1, n2) in enumerate(zip(nodes1, nodes2)):
if n1 != n2:
print("At node %i, %s has %s, %s has %s" % (nodeIx, names[i], n1, names[i+1], n2))
continue
for i,(m0,m1) in enumerate(zip(allVectors[:-1],allVectors[1:])):
if len(m0) != len(m1):
print('%s: %s+%s: Glyphs not compatible!!!!!' % (glyph_name, names[i], names[i+1]))
continue
if not m0:
continue
costs = [[_vlen(_vdiff(v0,v1)) for v1 in m1] for v0 in m0]
matching, matching_cost = min_cost_perfect_bipartite_matching(costs)
if matching != list(range(len(m0))):
print('%s: %s+%s: Glyph has wrong contour/component order: %s' % (glyph_name, names[i], names[i+1], matching)) #, m0, m1)
break
upem = 2048
item_cost = round((matching_cost / len(m0) / len(m0[0])) ** .5 / upem * 100)
hist.append(item_cost)
threshold = 7
if item_cost >= threshold:
print('%s: %s+%s: Glyph has very high cost: %d%%' % (glyph_name, names[i], names[i+1], item_cost))
except ValueError as e:
print('%s: %s: math error %s; skipping glyph.' % (glyph_name, name, e))
print(contour.value)
#raise
|
def test(glyphsets, glyphs=None, names=None):
if names is None:
names = glyphsets
if glyphs is None:
glyphs = glyphsets[0].keys()
hist = []
for glyph_name in glyphs:
#print()
#print(glyph_name)
try:
allVectors = []
allNodeTypes = []
for glyphset,name in zip(glyphsets, names):
#print('.', end='')
glyph = glyphset[glyph_name]
perContourPen = PerContourOrComponentPen(RecordingPen, glyphset=glyphset)
glyph.draw(perContourPen)
contourPens = perContourPen.value
del perContourPen
contourVectors = []
nodeTypes = []
allNodeTypes.append(nodeTypes)
allVectors.append(contourVectors)
for contour in contourPens:
nodeTypes.append(tuple([ instruction[0] for instruction in contour.value ]))
stats = StatisticsPen(glyphset=glyphset)
contour.replay(stats)
size = abs(stats.area) ** .5 * .5
vector = (
int(size),
int(stats.meanX),
int(stats.meanY),
int(stats.stddevX * 2),
int(stats.stddevY * 2),
int(stats.correlation * size),
)
contourVectors.append(vector)
#print(vector)
# Check each master against the next one in the list.
for i, (m0, m1) in enumerate(zip(allNodeTypes[:-1], allNodeTypes[1:])):
if len(m0) != len(m1):
print('%s: %s+%s: Glyphs not compatible (wrong number of paths %i+%i)!!!!!' % (glyph_name, names[i], names[i+1], len(m0), len(m1)))
if m0 == m1:
continue
for pathIx, (nodes1, nodes2) in enumerate(zip(m0,m1)):
if nodes1 == nodes2:
continue
print('%s: %s+%s: Glyphs not compatible at path %i!!!!!' % (glyph_name, names[i], names[i+1], pathIx))
if len(nodes1) != len(nodes2):
print("%s has %i nodes, %s has %i nodes" % (names[i], len(nodes1), names[i+1], len(nodes2)))
continue
for nodeIx, (n1, n2) in enumerate(zip(nodes1, nodes2)):
if n1 != n2:
print("At node %i, %s has %s, %s has %s" % (nodeIx, names[i], n1, names[i+1], n2))
continue
for i,(m0,m1) in enumerate(zip(allVectors[:-1],allVectors[1:])):
if len(m0) != len(m1):
print('%s: %s+%s: Glyphs not compatible!!!!!' % (glyph_name, names[i], names[i+1]))
continue
if not m0:
continue
costs = [[_vlen(_vdiff(v0,v1)) for v1 in m1] for v0 in m0]
matching, matching_cost = min_cost_perfect_bipartite_matching(costs)
if matching != list(range(len(m0))):
print('%s: %s+%s: Glyph has wrong contour/component order: %s' % (glyph_name, names[i], names[i+1], matching)) #, m0, m1)
break
upem = 2048
item_cost = round((matching_cost / len(m0) / len(m0[0])) ** .5 / upem * 100)
hist.append(item_cost)
threshold = 7
if item_cost >= threshold:
print('%s: %s+%s: Glyph has very high cost: %d%%' % (glyph_name, names[i], names[i+1], item_cost))
except ValueError as e:
print('%s: %s: math error %s; skipping glyph.' % (glyph_name, name, e))
print(contour.value)
#raise
|
22,416 |
def filter_output(tool, output, incoming):
for filter in output.filters:
try:
if not eval(filter.text.strip(), globals(), incoming):
return True # do not create this dataset
except Exception as e:
log.info('Tool %s output %s: dataset output filter (%s) failed: %s' % (tool.id, output.name, filter.text, e))
return False
|
def filter_output(tool, output, incoming):
for filter in output.filters:
try:
if not eval(filter.text.strip(), globals(), incoming):
return True # do not create this dataset
except Exception as e:
log.info(f'Tool {tool.id} output {output.name}: dataset output filter ({filter.text}) failed: {e}')
return False
|
52,306 |
def _parse_accept_header(accept):
"""
Parse the Accept header *accept*, returning a list with 3-tuples of
[(str(media_type), dict(params), float(q_value)),] ordered by q values.
If the accept header includes vendor-specific types like::
application/vnd.yourcompany.yourproduct-v1.1+json
It will actually convert the vendor and version into parameters and
convert the content type into `application/json` so appropriate content
negotiation decisions can be made.
Default `q` for values that are not specified is 1.0
From: https://gist.github.com/samuraisam/2714195
"""
result = []
for media_range in accept.split(","):
parts = media_range.split(";")
media_type = parts.pop(0).strip()
media_params = []
# convert vendor-specific content types into something useful (see
# docstring)
typ, subtyp = media_type.split('/')
# check for a + in the sub-type
if '+' in subtyp:
# if it exists, determine if the subtype is a vendor-specific type
vnd, sep, extra = subtyp.partition('+')
if vnd.startswith('vnd'):
# and then... if it ends in something like "-v1.1" parse the
# version out
if '-v' in vnd:
vnd, sep, rest = vnd.rpartition('-v')
if len(rest):
# add the version as a media param
try:
version = media_params.append(('version', float(rest)))
except ValueError:
version = 1.0 # could not be parsed
# add the vendor code as a media param
media_params.append(('vendor', vnd))
# and re-write media_type to something like application/json so
# it can be used usefully when looking up emitters
media_type = '{}/{}'.format(typ, extra)
q = 1.0
for part in parts:
(key, value) = part.lstrip().split("=", 1)
key = key.strip()
value = value.strip()
if key == "q":
q = float(value)
else:
media_params.append((key, value))
result.append((media_type, dict(media_params), q))
result.sort(key=itemgetter(2))
return result
|
def _parse_accept_header(accept):
"""
Parse the Accept header *accept*
Return a list with 3-tuples of
[(str(media_type), dict(params), float(q_value)),] ordered by q values.
If the accept header includes vendor-specific types like::
application/vnd.yourcompany.yourproduct-v1.1+json
It will actually convert the vendor and version into parameters and
convert the content type into `application/json` so appropriate content
negotiation decisions can be made.
Default `q` for values that are not specified is 1.0
From: https://gist.github.com/samuraisam/2714195
"""
result = []
for media_range in accept.split(","):
parts = media_range.split(";")
media_type = parts.pop(0).strip()
media_params = []
# convert vendor-specific content types into something useful (see
# docstring)
typ, subtyp = media_type.split('/')
# check for a + in the sub-type
if '+' in subtyp:
# if it exists, determine if the subtype is a vendor-specific type
vnd, sep, extra = subtyp.partition('+')
if vnd.startswith('vnd'):
# and then... if it ends in something like "-v1.1" parse the
# version out
if '-v' in vnd:
vnd, sep, rest = vnd.rpartition('-v')
if len(rest):
# add the version as a media param
try:
version = media_params.append(('version', float(rest)))
except ValueError:
version = 1.0 # could not be parsed
# add the vendor code as a media param
media_params.append(('vendor', vnd))
# and re-write media_type to something like application/json so
# it can be used usefully when looking up emitters
media_type = '{}/{}'.format(typ, extra)
q = 1.0
for part in parts:
(key, value) = part.lstrip().split("=", 1)
key = key.strip()
value = value.strip()
if key == "q":
q = float(value)
else:
media_params.append((key, value))
result.append((media_type, dict(media_params), q))
result.sort(key=itemgetter(2))
return result
|
29,880 |
def linear_mixed_model(y,
x,
z_t=None,
k=None,
p_path=None,
overwrite=False,
standardize=True,
mean_impute=True):
r"""Initialize a linear mixed model from a matrix table.
.. warning::
This functionality is no longer implemented/supported as of Hail 0.2.94.
"""
raise NotImplementedError("linear_mixed_modelv is no longer implemented/supported as of Hail 0.2.94")
|
def linear_mixed_model(y,
x,
z_t=None,
k=None,
p_path=None,
overwrite=False,
standardize=True,
mean_impute=True):
r"""Initialize a linear mixed model from a matrix table.
.. warning::
This functionality is no longer implemented/supported as of Hail 0.2.94.
"""
raise NotImplementedError("linear_mixed_model is no longer implemented/supported as of Hail 0.2.94")
|
45,720 |
def initialize_forecast_exporter_netcdf(
outpath,
outfnprefix,
startdate,
timestep,
n_timesteps,
shape,
metadata,
n_ens_members=1,
incremental=None,
**kwargs,
):
"""Initialize a netCDF forecast exporter. All outputs are written to a
single file named as '<outfnprefix>_.nc'.
Parameters
----------
outpath: str
Output path.
outfnprefix: str
Prefix for output file names.
startdate: datetime.datetime
Start date of the forecast.
timestep: int
Time step of the forecast (minutes).
n_timesteps: int
Number of time steps in the forecast this argument is ignored if
incremental is set to 'timestep'.
shape: tuple of int
Two-element tuple defining the shape (height,width) of the forecast
grids.
metadata: dict
Metadata dictionary containing the projection, x1, x2, y1, y2,
unit attributes (projection and variable units) described in the
documentation of :py:mod:`pysteps.io.importers`.
n_ens_members: int
Number of ensemble members in the forecast. This argument is ignored if
incremental is set to 'member'.
incremental: {None,'timestep','member'}, optional
Allow incremental writing of datasets into the netCDF files.\n
The available options are: 'timestep' = write a forecast or a forecast
ensemble for a given time step; 'member' = write a forecast sequence
for a given ensemble member. If set to None, incremental writing is
disabled.
Returns
-------
exporter: dict
The return value is a dictionary containing an exporter object. This c
an be used with :py:func:`pysteps.io.exporters.export_forecast_dataset`
to write datasets into the given file format.
"""
if not NETCDF4_IMPORTED:
raise MissingOptionalDependency(
"netCDF4 package is required for netcdf "
"exporters but it is not installed"
)
if not PYPROJ_IMPORTED:
raise MissingOptionalDependency(
"pyproj package is required for netcdf " "exporters but it is not installed"
)
if not isinstance(startdate, datetime):
raise ValueError("argument 'stardate' must be a python datetime object")
if incremental not in [None, "timestep", "member"]:
raise ValueError(
f"unknown option {incremental}: incremental must be "
+ "'timestep' or 'member'"
)
if incremental == "timestep":
n_timesteps = None
elif incremental == "member":
n_ens_members = None
elif incremental is not None:
raise ValueError(
f"unknown argument value incremental='{str(incremental)}': "
+ "must be 'timestep' or 'member'"
)
n_ens_gt_one = False
if n_ens_members is not None:
if n_ens_members > 1:
n_ens_gt_one = True
exporter = {}
outfn = os.path.join(outpath, outfnprefix + ".nc")
ncf = netCDF4.Dataset(outfn, "w", format="NETCDF4")
ncf.Conventions = "CF-1.7"
ncf.title = "pysteps-generated nowcast"
ncf.institution = "the pySTEPS community (https://pysteps.github.io)"
ncf.source = "pysteps" # TODO(exporters): Add pySTEPS version here
ncf.history = ""
ncf.references = ""
ncf.comment = ""
h, w = shape
ncf.createDimension("ens_number", size=n_ens_members)
ncf.createDimension("time", size=n_timesteps)
ncf.createDimension("y", size=h)
ncf.createDimension("x", size=w)
if metadata["unit"] == "mm/h":
var_name = "precip_intensity"
var_standard_name = None
var_long_name = "instantaneous precipitation rate"
var_unit = "mm h-1"
elif metadata["unit"] == "mm":
var_name = "precip_accum"
var_standard_name = None
var_long_name = "accumulated precipitation"
var_unit = "mm"
elif metadata["unit"] == "dBZ":
var_name = "reflectivity"
var_long_name = "equivalent reflectivity factor"
var_standard_name = "equivalent_reflectivity_factor"
var_unit = "dBZ"
else:
raise ValueError("unknown unit %s" % metadata["unit"])
xr = np.linspace(*metadata["bounding_box"][:2], w + 1)[:-1]
xr += 0.5 * (xr[1] - xr[0])
yr = np.linspace(*metadata["bounding_box"][2:], h + 1)[:-1]
yr += 0.5 * (yr[1] - yr[0])
# flip yr vector if yorigin is upper
if metadata["yorigin"] == "upper":
yr = np.flip(yr)
var_xc = ncf.createVariable("x", np.float32, dimensions=("x",))
var_xc[:] = xr
var_xc.axis = "X"
var_xc.standard_name = "projection_x_coordinate"
var_xc.long_name = "x-coordinate in Cartesian system"
var_xc.units = metadata.get("cartesian_unit", "m")
var_yc = ncf.createVariable("y", np.float32, dimensions=("y",))
var_yc[:] = yr
var_yc.axis = "Y"
var_yc.standard_name = "projection_y_coordinate"
var_yc.long_name = "y-coordinate in Cartesian system"
var_yc.units = metadata.get("cartesian_unit")
x_2d, y_2d = np.meshgrid(xr, yr)
pr = pyproj.Proj(metadata["projection"])
lon, lat = pr(x_2d.flatten(), y_2d.flatten(), inverse=True)
var_lon = ncf.createVariable("lon", float, dimensions=("y", "x"))
var_lon[:] = lon.reshape(shape)
var_lon.standard_name = "longitude"
var_lon.long_name = "longitude coordinate"
# TODO(exporters): Don't hard-code the unit.
var_lon.units = "degrees_east"
var_lat = ncf.createVariable("lat", float, dimensions=("y", "x"))
var_lat[:] = lat.reshape(shape)
var_lat.standard_name = "latitude"
var_lat.long_name = "latitude coordinate"
# TODO(exporters): Don't hard-code the unit.
var_lat.units = "degrees_north"
ncf.projection = metadata["projection"]
(
grid_mapping_var_name,
grid_mapping_name,
grid_mapping_params,
) = _convert_proj4_to_grid_mapping(metadata["projection"])
# skip writing the grid mapping if a matching name was not found
if grid_mapping_var_name is not None:
var_gm = ncf.createVariable(grid_mapping_var_name, int, dimensions=())
var_gm.grid_mapping_name = grid_mapping_name
for i in grid_mapping_params.items():
var_gm.setncattr(i[0], i[1])
if incremental == "member" or n_ens_gt_one:
var_ens_num = ncf.createVariable("ens_number", int, dimensions=("ens_number",))
if incremental != "member":
var_ens_num[:] = list(range(1, n_ens_members + 1))
var_ens_num.long_name = "ensemble member"
var_ens_num.standard_name = "realization"
var_ens_num.units = ""
var_time = ncf.createVariable("time", int, dimensions=("time",))
if incremental != "timestep":
var_time[:] = [i * timestep * 60 for i in range(1, n_timesteps + 1)]
var_time.long_name = "forecast time"
var_time.units = f"seconds since {startdate: %Y-%m-%d %H:%M:%S}"
if incremental == "member" or n_ens_gt_one:
var_f = ncf.createVariable(
var_name,
np.float32,
dimensions=("ens_number", "time", "y", "x"),
zlib=True,
complevel=9,
)
else:
var_f = ncf.createVariable(
var_name, np.float32, dimensions=("time", "y", "x"), zlib=True, complevel=9
)
if var_standard_name is not None:
var_f.standard_name = var_standard_name
var_f.long_name = var_long_name
var_f.coordinates = "y x"
var_f.units = var_unit
if grid_mapping_var_name is not None:
var_f.grid_mapping = grid_mapping_var_name
exporter["method"] = "netcdf"
exporter["ncfile"] = ncf
exporter["var_F"] = var_f
if incremental == "member" or n_ens_gt_one:
exporter["var_ens_num"] = var_ens_num
exporter["var_time"] = var_time
exporter["var_name"] = var_name
exporter["startdate"] = startdate
exporter["timestep"] = timestep
exporter["metadata"] = metadata
exporter["incremental"] = incremental
exporter["num_timesteps"] = n_timesteps
exporter["num_ens_members"] = n_ens_members
exporter["shape"] = shape
return exporter
|
def initialize_forecast_exporter_netcdf(
outpath,
outfnprefix,
startdate,
timestep,
n_timesteps,
shape,
metadata,
n_ens_members=1,
incremental=None,
**kwargs,
):
"""Initialize a netCDF forecast exporter. All outputs are written to a
single file named as '<outfnprefix>_.nc'.
Parameters
----------
outpath: str
Output path.
outfnprefix: str
Prefix for output file names.
startdate: datetime.datetime
Start date of the forecast.
timestep: int
Time step of the forecast (minutes).
n_timesteps: int
Number of time steps in the forecast this argument is ignored if
incremental is set to 'timestep'.
shape: tuple of int
Two-element tuple defining the shape (height,width) of the forecast
grids.
metadata: dict
Metadata dictionary containing the projection, x1, x2, y1, y2,
unit attributes (projection and variable units) described in the
documentation of :py:mod:`pysteps.io.importers`.
n_ens_members: int
Number of ensemble members in the forecast. This argument is ignored if
incremental is set to 'member'.
incremental: {None,'timestep','member'}, optional
Allow incremental writing of datasets into the netCDF files.\n
The available options are: 'timestep' = write a forecast or a forecast
ensemble for a given time step; 'member' = write a forecast sequence
for a given ensemble member. If set to None, incremental writing is
disabled.
Returns
-------
exporter: dict
The return value is a dictionary containing an exporter object. This c
an be used with :py:func:`pysteps.io.exporters.export_forecast_dataset`
to write datasets into the given file format.
"""
if not NETCDF4_IMPORTED:
raise MissingOptionalDependency(
"netCDF4 package is required for netcdf "
"exporters but it is not installed"
)
if not PYPROJ_IMPORTED:
raise MissingOptionalDependency(
"pyproj package is required for netcdf " "exporters but it is not installed"
)
if not isinstance(startdate, datetime):
raise ValueError("argument 'stardate' must be a python datetime object")
if incremental not in [None, "timestep", "member"]:
raise ValueError(
f"unknown option {incremental}: incremental must be "
+ "'timestep' or 'member'"
)
if incremental == "timestep":
n_timesteps = None
elif incremental == "member":
n_ens_members = None
elif incremental is not None:
raise ValueError(
f"unknown argument value incremental='{str(incremental)}': "
+ "must be 'timestep' or 'member'"
)
n_ens_gt_one = False
if n_ens_members is not None:
if n_ens_members > 1:
n_ens_gt_one = True
exporter = {}
outfn = os.path.join(outpath, outfnprefix + ".nc")
ncf = netCDF4.Dataset(outfn, "w", format="NETCDF4")
ncf.Conventions = "CF-1.7"
ncf.title = "pysteps-generated nowcast"
ncf.institution = "the pySTEPS community (https://pysteps.github.io)"
ncf.source = "pysteps" # TODO(exporters): Add pySTEPS version here
ncf.history = ""
ncf.references = ""
ncf.comment = ""
h, w = shape
ncf.createDimension("ens_number", size=n_ens_members)
ncf.createDimension("time", size=n_timesteps)
ncf.createDimension("y", size=h)
ncf.createDimension("x", size=w)
if metadata["unit"] == "mm/h":
var_name = "precip_intensity"
var_standard_name = None
var_long_name = "instantaneous precipitation rate"
var_unit = "mm h-1"
elif metadata["unit"] == "mm":
var_name = "precip_accum"
var_standard_name = None
var_long_name = "accumulated precipitation"
var_unit = "mm"
elif metadata["unit"] == "dBZ":
var_name = "reflectivity"
var_long_name = "equivalent reflectivity factor"
var_standard_name = "equivalent_reflectivity_factor"
var_unit = "dBZ"
else:
raise ValueError("unknown unit %s" % metadata["unit"])
xr = np.linspace(*metadata["bounding_box"][:2], w + 1)[:-1]
xr += 0.5 * (xr[1] - xr[0])
yr = np.linspace(*metadata["bounding_box"][2:], h + 1)[:-1]
yr += 0.5 * (yr[1] - yr[0])
# flip yr vector if yorigin is upper
if metadata["yorigin"] == "upper":
yr = np.flip(yr)
var_xc = ncf.createVariable("x", np.float32, dimensions=("x",))
var_xc[:] = xr
var_xc.axis = "X"
var_xc.standard_name = "projection_x_coordinate"
var_xc.long_name = "x-coordinate in Cartesian system"
var_xc.units = metadata.get("cartesian_unit", "m")
var_yc = ncf.createVariable("y", np.float32, dimensions=("y",))
var_yc[:] = yr
var_yc.axis = "Y"
var_yc.standard_name = "projection_y_coordinate"
var_yc.long_name = "y-coordinate in Cartesian system"
var_yc.units = metadata.get("cartesian_unit", "m")
x_2d, y_2d = np.meshgrid(xr, yr)
pr = pyproj.Proj(metadata["projection"])
lon, lat = pr(x_2d.flatten(), y_2d.flatten(), inverse=True)
var_lon = ncf.createVariable("lon", float, dimensions=("y", "x"))
var_lon[:] = lon.reshape(shape)
var_lon.standard_name = "longitude"
var_lon.long_name = "longitude coordinate"
# TODO(exporters): Don't hard-code the unit.
var_lon.units = "degrees_east"
var_lat = ncf.createVariable("lat", float, dimensions=("y", "x"))
var_lat[:] = lat.reshape(shape)
var_lat.standard_name = "latitude"
var_lat.long_name = "latitude coordinate"
# TODO(exporters): Don't hard-code the unit.
var_lat.units = "degrees_north"
ncf.projection = metadata["projection"]
(
grid_mapping_var_name,
grid_mapping_name,
grid_mapping_params,
) = _convert_proj4_to_grid_mapping(metadata["projection"])
# skip writing the grid mapping if a matching name was not found
if grid_mapping_var_name is not None:
var_gm = ncf.createVariable(grid_mapping_var_name, int, dimensions=())
var_gm.grid_mapping_name = grid_mapping_name
for i in grid_mapping_params.items():
var_gm.setncattr(i[0], i[1])
if incremental == "member" or n_ens_gt_one:
var_ens_num = ncf.createVariable("ens_number", int, dimensions=("ens_number",))
if incremental != "member":
var_ens_num[:] = list(range(1, n_ens_members + 1))
var_ens_num.long_name = "ensemble member"
var_ens_num.standard_name = "realization"
var_ens_num.units = ""
var_time = ncf.createVariable("time", int, dimensions=("time",))
if incremental != "timestep":
var_time[:] = [i * timestep * 60 for i in range(1, n_timesteps + 1)]
var_time.long_name = "forecast time"
var_time.units = f"seconds since {startdate: %Y-%m-%d %H:%M:%S}"
if incremental == "member" or n_ens_gt_one:
var_f = ncf.createVariable(
var_name,
np.float32,
dimensions=("ens_number", "time", "y", "x"),
zlib=True,
complevel=9,
)
else:
var_f = ncf.createVariable(
var_name, np.float32, dimensions=("time", "y", "x"), zlib=True, complevel=9
)
if var_standard_name is not None:
var_f.standard_name = var_standard_name
var_f.long_name = var_long_name
var_f.coordinates = "y x"
var_f.units = var_unit
if grid_mapping_var_name is not None:
var_f.grid_mapping = grid_mapping_var_name
exporter["method"] = "netcdf"
exporter["ncfile"] = ncf
exporter["var_F"] = var_f
if incremental == "member" or n_ens_gt_one:
exporter["var_ens_num"] = var_ens_num
exporter["var_time"] = var_time
exporter["var_name"] = var_name
exporter["startdate"] = startdate
exporter["timestep"] = timestep
exporter["metadata"] = metadata
exporter["incremental"] = incremental
exporter["num_timesteps"] = n_timesteps
exporter["num_ens_members"] = n_ens_members
exporter["shape"] = shape
return exporter
|
21,476 |
def measure_func(name=None):
"""
Used to decorator an async function with a `Measure` context manager.
Usage:
@measure_func
async def foo(...):
...
Which is analogous to:
async def foo(...):
with Measure(...):
...
"""
def wrapper(func):
block_name = func.__name__ if name is None else name
@wraps(func)
async def measured_func(self, *args, **kwargs):
with Measure(self.clock, block_name):
r = await func(self, *args, **kwargs)
return r
return measured_func
return wrapper
|
def measure_func(name=None):
"""
Used to decorator an async function with a `Measure` context manager.
Usage:
@measure_func()
async def foo(...):
...
Which is analogous to:
async def foo(...):
with Measure(...):
...
"""
def wrapper(func):
block_name = func.__name__ if name is None else name
@wraps(func)
async def measured_func(self, *args, **kwargs):
with Measure(self.clock, block_name):
r = await func(self, *args, **kwargs)
return r
return measured_func
return wrapper
|
20,917 |
def pre_init_hook_for_submodules(cr, model, field):
"""Moves images from single to multi mode.
Feel free to use this as a ``pre_init_hook`` for submodules.
:param str model:
Model name, like ``product.template``.
:param str field:
Binary field that had the images in that :param:`model`, like
``image``.
"""
env = api.Environment(cr, SUPERUSER_ID, dict())
with cr.savepoint():
cr.execute(
"""
INSERT INTO base_multi_image_image (
owner_id,
owner_model,
storage,
file_db_store
)
SELECT
id,
%(model)s,
'db',
%(field)s
FROM
%(table)s
WHERE
%(field)s IS NOT NULL
""", {"table": AsIs(env[model]._table), "field": AsIs(field), "model": (model,)}
)
|
def pre_init_hook_for_submodules(cr, model, field):
"""Moves images from single to multi mode.
Feel free to use this as a ``pre_init_hook`` for submodules.
:param str model:
Model name, like ``product.template``.
:param str field:
Binary field that had the images in that :param:`model`, like
``image``.
"""
env = api.Environment(cr, SUPERUSER_ID, dict())
with cr.savepoint():
cr.execute(
"""
INSERT INTO base_multi_image_image (
owner_id,
owner_model,
storage,
file_db_store
)
SELECT
id,
%(model)s,
'db',
%(field)s
FROM
%(table)s
WHERE
%(field)s IS NOT NULL
""", {"table": AsIs(env[model]._table), "field": AsIs(field), "model": model}
)
|
41,207 |
def decompose_clifford_tableau_to_operations(
qubits: List['cirq.Qid'], clifford_tableau: qis.CliffordTableau
) -> List[ops.Operation]:
"""Decompose an n-qubit Clifford Tableau into a list of one/two qubit operations.
Args:
qubits: The list of qubits being operated on.
clifford_tableau: The Clifford Tableau for decomposition.
Returns:
A list of operations reconstructs the same Clifford tableau.
"""
if len(qubits) != clifford_tableau.n:
raise ValueError(
f"The number of qubits must be the same as the number of Clifford Tableau."
)
assert (
clifford_tableau._validate()
), "The provided clifford_tableau must satisfy the symplectic property."
t: qis.CliffordTableau = clifford_tableau.copy()
operations: List[ops.Operation] = []
args = sim.ActOnCliffordTableauArgs(
tableau=t, axes=[], prng=np.random.RandomState(), log_of_measurement_results={}
)
_X_with_ops = functools.partial(_X, args=args, operations=operations, qubits=qubits)
_Z_with_ops = functools.partial(_Z, args=args, operations=operations, qubits=qubits)
_H_with_ops = functools.partial(_H, args=args, operations=operations, qubits=qubits)
_S_with_ops = functools.partial(_Sdg, args=args, operations=operations, qubits=qubits)
_CNOT_with_ops = functools.partial(_CNOT, args=args, operations=operations, qubits=qubits)
_SWAP_with_ops = functools.partial(_SWAP, args=args, operations=operations, qubits=qubits)
# The procedure is based on Theorem 8 in
# [1] S. Aaronson, D. Gottesman, *Improved Simulation of Stabilizer Circuits*,
# Phys. Rev. A 70, 052328 (2004). https://arxiv.org/abs/quant-ph/0406196
# with modification by doing it row-by-row instead.
# Suppose we have a Clifford Tableau:
# Xs Zs
# Destabilizers: [ A | B ]
# Stabilizers: [ C | D ]
for i in range(t.n):
# Step 1a: Make the diagonal element of A as 1 by Hadamard gate if necessary.
if not t.xs[i, i] and t.zs[i, i]:
_H_with_ops(i)
# Step 1b: Make the diagonal element of A as 1 by swapping gate if necessary.
if not t.xs[i, i]:
for j in range(i + 1, t.n):
if t.xs[i, j]:
_SWAP_with_ops(i, j)
break
# Step 1c: We may still not be able to find non-zero element in whole Xs row. Then,
# apply swap + Hadamard from zs. It is guaranteed to find one by lemma 5 in [1].
if not t.xs[i, i]:
for j in range(i + 1, t.n):
if t.zs[i, j]:
_H_with_ops(j)
_SWAP_with_ops(i, j)
break
# Step 2: Eliminate the elements in A By CNOT and phase gate (i-th row)
# first i rows of destabilizers: [ I 0 | 0 0 ]
_ = [_CNOT_with_ops(i, j) for j in range(i + 1, t.n) if t.xs[i, j]]
if np.any(t.zs[i, i:]):
if not t.zs[i, i]:
_S_with_ops(i)
_ = [_CNOT_with_ops(j, i) for j in range(i + 1, t.n) if t.zs[i, j]]
_S_with_ops(i)
# Step 3: Eliminate the elements in D By CNOT and phase gate (i-th row)
# first i rows of stabilizers: [ 0 0 | I 0 ]
_ = [_CNOT_with_ops(j, i) for j in range(i + 1, t.n) if t.zs[i + t.n, j]]
if np.any(t.xs[i + t.n, i:]):
# Swap xs and zs
_H_with_ops(i)
_ = [_CNOT_with_ops(i, j) for j in range(i + 1, t.n) if t.xs[i + t.n, j]]
if t.zs[i + t.n, i]:
_S_with_ops(i)
_H_with_ops(i)
# Step 4: Correct the phase of tableau
_ = [_Z_with_ops(i) for i, p in enumerate(t.rs[: t.n]) if p]
_ = [_X_with_ops(i) for i, p in enumerate(t.rs[t.n :]) if p]
# Step 5: invert the operations by reversing the orde: (AB)^{+} = B^{+} A^{+}.
# Note only S gate is not self-adjoint.
return operations[::-1]
|
def decompose_clifford_tableau_to_operations(
qubits: List['cirq.Qid'], clifford_tableau: qis.CliffordTableau
) -> List[ops.Operation]:
"""Decompose an n-qubit Clifford Tableau into a list of one/two qubit operations.
Args:
qubits: The list of qubits being operated on.
clifford_tableau: The Clifford Tableau for decomposition.
Returns:
A list of operations reconstructs the same Clifford tableau.
"""
if len(qubits) != clifford_tableau.n:
raise ValueError(
f"The number of qubits must be the same as the number of Clifford Tableau."
)
assert (
clifford_tableau._validate()
), "The provided clifford_tableau must satisfy the symplectic property."
t: qis.CliffordTableau = clifford_tableau.copy()
operations: List[ops.Operation] = []
args = sim.ActOnCliffordTableauArgs(
tableau=t, axes=[], prng=np.random.RandomState(), log_of_measurement_results={}
)
_X_with_ops = functools.partial(_X, args=args, operations=operations, qubits=qubits)
_Z_with_ops = functools.partial(_Z, args=args, operations=operations, qubits=qubits)
_H_with_ops = functools.partial(_H, args=args, operations=operations, qubits=qubits)
_S_with_ops = functools.partial(_Sdg, args=args, operations=operations, qubits=qubits)
_CNOT_with_ops = functools.partial(_CNOT, args=args, operations=operations, qubits=qubits)
_SWAP_with_ops = functools.partial(_SWAP, args=args, operations=operations, qubits=qubits)
# The procedure is based on Theorem 8 in
# [1] S. Aaronson, D. Gottesman, *Improved Simulation of Stabilizer Circuits*,
# Phys. Rev. A 70, 052328 (2004). https://arxiv.org/abs/quant-ph/0406196
# with modification by doing it row-by-row instead.
# Suppose we have a Clifford Tableau:
# Xs Zs
# Destabilizers: [ A | B ]
# Stabilizers: [ C | D ]
for i in range(t.n):
# Step 1a: Make the diagonal element of A as 1 by Hadamard gate if necessary.
if not t.xs[i, i] and t.zs[i, i]:
_H_with_ops(i)
# Step 1b: Make the diagonal element of A equal to 1 by SWAP gate if necessary.
if not t.xs[i, i]:
for j in range(i + 1, t.n):
if t.xs[i, j]:
_SWAP_with_ops(i, j)
break
# Step 1c: We may still not be able to find non-zero element in whole Xs row. Then,
# apply swap + Hadamard from zs. It is guaranteed to find one by lemma 5 in [1].
if not t.xs[i, i]:
for j in range(i + 1, t.n):
if t.zs[i, j]:
_H_with_ops(j)
_SWAP_with_ops(i, j)
break
# Step 2: Eliminate the elements in A By CNOT and phase gate (i-th row)
# first i rows of destabilizers: [ I 0 | 0 0 ]
_ = [_CNOT_with_ops(i, j) for j in range(i + 1, t.n) if t.xs[i, j]]
if np.any(t.zs[i, i:]):
if not t.zs[i, i]:
_S_with_ops(i)
_ = [_CNOT_with_ops(j, i) for j in range(i + 1, t.n) if t.zs[i, j]]
_S_with_ops(i)
# Step 3: Eliminate the elements in D By CNOT and phase gate (i-th row)
# first i rows of stabilizers: [ 0 0 | I 0 ]
_ = [_CNOT_with_ops(j, i) for j in range(i + 1, t.n) if t.zs[i + t.n, j]]
if np.any(t.xs[i + t.n, i:]):
# Swap xs and zs
_H_with_ops(i)
_ = [_CNOT_with_ops(i, j) for j in range(i + 1, t.n) if t.xs[i + t.n, j]]
if t.zs[i + t.n, i]:
_S_with_ops(i)
_H_with_ops(i)
# Step 4: Correct the phase of tableau
_ = [_Z_with_ops(i) for i, p in enumerate(t.rs[: t.n]) if p]
_ = [_X_with_ops(i) for i, p in enumerate(t.rs[t.n :]) if p]
# Step 5: invert the operations by reversing the orde: (AB)^{+} = B^{+} A^{+}.
# Note only S gate is not self-adjoint.
return operations[::-1]
|
917 |
def test_issue_22058():
sol = solveset(-sqrt(t)*x**2 + 2*x + sqrt(t), x, S.Reals)
# doesn't fail (and following numerical check)
assert sol.xreplace({t: 1}) == {1 + sqrt(2), 1 - sqrt(2)}
|
def test_issue_22058():
sol = solveset(-sqrt(t)*x**2 + 2*x + sqrt(t), x, S.Reals)
# doesn't fail (and following numerical check)
assert sol.xreplace({t: 1}) == {1 - sqrt(2), 1 + sqrt(2)}, sol.xreplace({t: 1})
|
14,494 |
def _items_sorter(
sort_keys: bool,
key_order: Optional[Sequence[str]],
drop_missing: bool,
) -> Callable[[EventDict], List[Tuple[str, Any]]]:
"""
Return a function to sort items from an `event_dict`.
See :class:`KeyValueRenderer` for an explanation of the parameters.
"""
# Use an optimized version for each case.
if key_order and sort_keys:
def ordered_items(event_dict: EventDict) -> List[Tuple[str, Any]]:
items = []
for key in key_order: # type: ignore
value = event_dict.pop(key, None)
if value is not None or not drop_missing:
items.append((key, value))
items += sorted(event_dict.items())
return items
elif key_order:
def ordered_items(event_dict: EventDict) -> List[Tuple[str, Any]]:
items = []
for key in key_order: # type: ignore
value = event_dict.pop(key, None)
if value is not None or not drop_missing:
items.append((key, value))
items += event_dict.items()
return items
elif sort_keys:
def ordered_items(event_dict: EventDict) -> List[Tuple[str, Any]]:
return sorted(event_dict.items())
else:
ordered_items = operator.methodcaller("items") # type: ignore
return ordered_items
|
def _items_sorter(
sort_keys: bool,
key_order: Optional[Sequence[str]],
drop_missing: bool,
) -> Callable[[EventDict], List[Tuple[str, Any]]]:
"""
Return a function to sort items from an `event_dict`.
See `KeyValueRenderer` for an explanation of the parameters.
"""
# Use an optimized version for each case.
if key_order and sort_keys:
def ordered_items(event_dict: EventDict) -> List[Tuple[str, Any]]:
items = []
for key in key_order: # type: ignore
value = event_dict.pop(key, None)
if value is not None or not drop_missing:
items.append((key, value))
items += sorted(event_dict.items())
return items
elif key_order:
def ordered_items(event_dict: EventDict) -> List[Tuple[str, Any]]:
items = []
for key in key_order: # type: ignore
value = event_dict.pop(key, None)
if value is not None or not drop_missing:
items.append((key, value))
items += event_dict.items()
return items
elif sort_keys:
def ordered_items(event_dict: EventDict) -> List[Tuple[str, Any]]:
return sorted(event_dict.items())
else:
ordered_items = operator.methodcaller("items") # type: ignore
return ordered_items
|
6,813 |
def expand_relative_urls(html):
# expand relative urls
url = get_url()
if url.endswith("/"): url = url[:-1]
def _expand_relative_urls(match):
to_expand = list(match.groups())
if not to_expand[2].startswith('mailto') and not to_expand[2].startswith('data:'):
if not to_expand[2].startswith("/"):
to_expand[2] = "/" + to_expand[2]
to_expand.insert(2, url)
if 'url' in to_expand[0] and to_expand[1].startswith('(') and to_expand[-1].endswith(')'):
# background-image: url('/assets/...') - workaround for wkhtmltopdf print-media-type
to_expand.append(' !important')
return "".join(to_expand)
html = re.sub('(href|src){1}([\s]*=[\s]*[\'"]?)((?!http)[^\'" >]+)([\'"]?)', _expand_relative_urls, html)
# background-image: url('/assets/...')
html = re.sub('(:[\s]?url)(\([\'"]?)((?!http)[^\'" >]+)([\'"]?)', _expand_relative_urls, html)
return html
|
def expand_relative_urls(html):
# expand relative urls
url = get_url()
if url.endswith("/"): url = url[:-1]
def _expand_relative_urls(match):
to_expand = list(match.groups())
if not to_expand[2].startswith('mailto') and not to_expand[2].startswith('data:'):
if not to_expand[2].startswith("/"):
to_expand[2] = "/" + to_expand[2]
to_expand.insert(2, url)
if 'url' in to_expand[0] and to_expand[1].startswith('(') and to_expand[-1].endswith(')'):
# background-image: url('/assets/...') - workaround for wkhtmltopdf print-media-type
to_expand.append(' !important')
return "".join(to_expand)
html = re.sub('(href|src){1}([\s]*=[\s]*[\'"]?)((?!http)[^\'" >]+)([\'"]?)', _expand_relative_urls, html)
# background-image: url('/assets/...')
html = re.sub('(:[\s]?url)(\([\'"]?)((?!http)[^\'" >]+)([\'"]?\))', _expand_relative_urls, html)
return html
|
30,914 |
def build_human_readable(entry_context: dict) -> str:
human_readable = ""
entry_context = entry_context.get("TroubleShout", {})
# Engine docker container
engine: dict = dict_safe_get(entry_context, ['Engine', 'SSL/TLS'], {}, dict)
human_readable += "## Docker container engine - custom certificate\n"
readable_engine_issuer = [item.get('Decode').get('Issuer') for item in engine.get('CustomCertificateAuthorities', {})]
readable_engine_subject = [item.get('Decode').get('Subject') for item in engine.get('CustomCertificateAuthorities', {})]
readable_engine_vars = engine.get('ShellVariables')
human_readable += tableToMarkdown(name="Enviorment variables", t=readable_engine_vars)
human_readable += tableToMarkdown(name="Issuer", t=readable_engine_issuer, removeNull=True)
human_readable += tableToMarkdown(name="Subject", t=readable_engine_subject, removeNull=True)
# Endpoint
endpoint: dict = entry_context.get('Endpoint', {}).get('SSL/TLS', {})
readable_endpoint_issuer = [item.get('Decode').get('Issuer') for item in endpoint.get('Certificates', {})]
readable_endpoint_subject = [item.get('Decode').get('Subject') for item in endpoint.get('Certificates', {})]
human_readable += f"\n\n## Endpoint certificate - {endpoint.get('Identifier')}\n"
human_readable += tableToMarkdown(name="Issuer", t=readable_endpoint_issuer, removeNull=True)
human_readable += tableToMarkdown(name="Subject", t=readable_endpoint_subject, removeNull=True)
human_readable += "\n"
return human_readable
|
def build_human_readable(entry_context: dict) -> str:
human_readable = ""
entry_context = entry_context.get("TroubleShout", {})
# Engine docker container
engine: dict = dict_safe_get(entry_context, ['Engine', 'SSL/TLS'], {}, dict)
human_readable += "## Docker container engine - custom certificate\n"
readable_engine_issuer = [dict_safe_get(item, ('Decode', 'Issuer')) for item in engine.get('CustomCertificateAuthorities', {})]
readable_engine_subject = [item.get('Decode').get('Subject') for item in engine.get('CustomCertificateAuthorities', {})]
readable_engine_vars = engine.get('ShellVariables')
human_readable += tableToMarkdown(name="Enviorment variables", t=readable_engine_vars)
human_readable += tableToMarkdown(name="Issuer", t=readable_engine_issuer, removeNull=True)
human_readable += tableToMarkdown(name="Subject", t=readable_engine_subject, removeNull=True)
# Endpoint
endpoint: dict = entry_context.get('Endpoint', {}).get('SSL/TLS', {})
readable_endpoint_issuer = [item.get('Decode').get('Issuer') for item in endpoint.get('Certificates', {})]
readable_endpoint_subject = [item.get('Decode').get('Subject') for item in endpoint.get('Certificates', {})]
human_readable += f"\n\n## Endpoint certificate - {endpoint.get('Identifier')}\n"
human_readable += tableToMarkdown(name="Issuer", t=readable_endpoint_issuer, removeNull=True)
human_readable += tableToMarkdown(name="Subject", t=readable_endpoint_subject, removeNull=True)
human_readable += "\n"
return human_readable
|
52,465 |
def test_span_group_copy(doc):
doc.spans["test"] = [doc[0:1], doc[2:4]]
doc_copy = doc.copy()
# check that the spans were indeed copied
assert len(doc_copy.spans) > 0
doc.spans["test"].append(doc[3:4])
# check that the copy spans were not modified and this is an isolated doc
assert len(doc.spans["test"]) != len(doc_copy.spans["test"])
|
def test_span_group_copy(doc):
doc.spans["test"] = [doc[0:1], doc[2:4]]
assert len(doc.spans["test"]) == 2
doc_copy = doc.copy()
# check that the spans were indeed copied
assert len(doc_copy.spans["test"]) == 2
# add a new span to the original doc
doc.spans["test"].append(doc[3:4])
assert len(doc.spans["test"]) == 3
# check that the copy spans were not modified and this is an isolated doc
assert len(doc_copy.spans["test"]) == 2
|
22,719 |
def _prepare_environ(workspace):
new_environ = os.environ.copy()
new_environ['TMPDIR'] = workspace
# So, pytest is nice, and a little to for our usage.
# In order to help user to call seamlessly any piece of python code without requiring to
# install it as a full-fledged setuptools distribution for instance, it injects the current
# path into the PYTHONPATH environment variable. This allows the python interpreter to import
# as modules any python file available in current working directory.
# See https://docs.pytest.org/en/3.2.5/pythonpath.html for the explanation and description.
# However this behavior is not good in integration tests, in particular the nginx oldest ones.
# Indeed during these kind of tests certbot is installed as a transitive dependency to
# certbot-nginx. Here is the trick: this certbot version is not necessarily the same than
# the certbot codebase lying in current working directory. For instance in oldest tests
# certbot==0.36.0 may be installed while the codebase corresponds to certbot==0.37.0.dev0.
# If at this point PYTHONPATH is set up like pytest does, invoking certbot will import the
# modules from the codebase (0.37.0.dev0), not from the required/installed version (0.36.0).
# This will lead to funny and totally incomprehensible errors. To avoid that, we ensure that if
# PYTHONPATH is set, it does not contain the current working directory.
if new_environ.get('PYTHONPATH'):
# certbot_integration_tests.__file__ is:
# '/path/to/certbot/certbot-ci/certbot_integration_tests/__init__.pyc'
# ... and we want '/path/to/certbot'
certbot_root = os.path.dirname(os.path.dirname(os.path.dirname(certbot_integration_tests.__file__)))
python_paths = [path for path in new_environ['PYTHONPATH'].split(':') if path != certbot_root]
new_environ['PYTHONPATH'] = ':'.join(python_paths)
return new_environ
|
def _prepare_environ(workspace):
new_environ = os.environ.copy()
new_environ['TMPDIR'] = workspace
# So, pytest is nice, and a little to for our usage.
# In order to help user to call seamlessly any piece of python code without requiring to
# install it as a full-fledged setuptools distribution for instance, it injects the current
# path into the PYTHONPATH environment variable. This allows the python interpreter to import
# as modules any python file available at this path.
# See https://docs.pytest.org/en/3.2.5/pythonpath.html for the explanation and description.
# However this behavior is not good in integration tests, in particular the nginx oldest ones.
# Indeed during these kind of tests certbot is installed as a transitive dependency to
# certbot-nginx. Here is the trick: this certbot version is not necessarily the same than
# the certbot codebase lying in current working directory. For instance in oldest tests
# certbot==0.36.0 may be installed while the codebase corresponds to certbot==0.37.0.dev0.
# If at this point PYTHONPATH is set up like pytest does, invoking certbot will import the
# modules from the codebase (0.37.0.dev0), not from the required/installed version (0.36.0).
# This will lead to funny and totally incomprehensible errors. To avoid that, we ensure that if
# PYTHONPATH is set, it does not contain the current working directory.
if new_environ.get('PYTHONPATH'):
# certbot_integration_tests.__file__ is:
# '/path/to/certbot/certbot-ci/certbot_integration_tests/__init__.pyc'
# ... and we want '/path/to/certbot'
certbot_root = os.path.dirname(os.path.dirname(os.path.dirname(certbot_integration_tests.__file__)))
python_paths = [path for path in new_environ['PYTHONPATH'].split(':') if path != certbot_root]
new_environ['PYTHONPATH'] = ':'.join(python_paths)
return new_environ
|
9,205 |
def getPSF(SCA, bandpass,
SCA_pos=None, pupil_bin=4, n_waves=None, extra_aberrations=None,
wavelength=None, gsparams=None,
logger=None, high_accuracy=None, approximate_struts=None):
"""Get a single PSF for Roman ST observations.
The user must provide the SCA and bandpass; the latter is used when setting up the pupil
plane configuration and when interpolating chromatic information, if requested.
This routine carries out linear interpolation of the aberrations within a given SCA, based on
the Roman (then WFIRST) Cycle 7 specification of the aberrations as a function of focal plane
position, more specifically from ``Roman_Phase-A_SRR_WFC_Zernike_and_Field_Data_170727.xlsm``
downloaded from https://roman.gsfc.nasa.gov/science/Roman_Reference_Information.html. Phase
B updates that became available in mid-2019 have not yet been incorporated into this module.
(Note: the files at that url still use the old WFIRST name. We have renamed them to use the
new name of the telescope, Roman, after downloading.)
The mask images for the Roman pupil plane are available at from the Roman Reference Information
page: https://roman.gsfc.nasa.gov/science/Roman_Reference_Information.html.
There are separate files for each SCA, since the view of the spider pattern varies somwhat
across the field of view of the wide field camera. Furthermore, the effect of the obscuration
is somewhat different at longer wavelengths, so F184 has a different set fo files than the
other filters. cf. the ``galsm.roman.longwave_bands`` and ``galsim.roman.shortwave_bands``
attributes, which define which bands use which pupil plane images.
To avoid using the full pupil plane configuration, use the optional keyword ``pupil_bin``.
The full pupil-plane images are 4096 x 4096, which is more detail than is typically needed for
most applications. The default binning is 4x4, which results in an image that is 1024 x 1024.
This provides enough detail for most purposes and is much faster to render than using the full
pupil plane image. Using pupil_bin=8 (resulting in a 512 x 512 image) still provides fairly
reasonable results and is even faster to render, but it is not recommended to use higher
binning than that, as the diffraction spikes will be noticeably degraded.
Also note that currently the orientation of the struts is fixed, rather than rotating depending
on the orientation of the focal plane. Rotation of the PSF can easily be affected by the user
via::
psf = galsim.roman.getPSF(...).rotate(angle)
which will rotate the entire PSF (including the diffraction spikes and all other features).
The calculation takes advantage of the fact that the diffraction limit and aberrations have a
simple, understood wavelength-dependence. (The Roman project webpage for Cycle 7 does in fact
provide aberrations as a function of wavelength, but the deviation from the expected chromatic
dependence is sub-percent so we neglect it here.) For reference, the script used to parse the
Zernikes given on the webpage and create the files in the GalSim repository can be found in
``devel/external/parse_roman_zernikes_1217.py``. The resulting chromatic object can be used to
draw into any of the Roman bandpasses, though the pupil plane configuration will only be
correct for those bands in the same range (i.e., long- or short-wavelength bands).
For applications that require very high accuracy in the modeling of the PSF, with very limited
aliasing, you may want to lower the folding_threshold in the gsparams. Otherwise very bright
stars will show some reflections in the spider pattern and possibly some boxiness at the
outskirts of the PSF. Using ``gsparams = GSParams(folding_threshold=2.e-3)`` generally
provides good results even for very bright (e.g. mag=10) stars. In these cases, you probably
also want to reduce ``pupil_bin`` somewhat from the default value of 4.
By default, no additional aberrations are included above the basic design. However, users can
provide an optional keyword ``extra_aberrations`` that will be included on top of those that are
part of the design. This should be in the same format as for the ChromaticOpticalPSF class,
with units of waves at the fiducial wavelength, 1293 nm. Currently, only aberrations up to order
22 (Noll convention) are simulated. For Roman, the tolerance for additional
aberrations was a total of 90 nanometers RMS as of mid-2015, distributed largely among coma,
astigmatism, trefoil, and spherical aberrations (NOT defocus). This information might serve as
a guide for reasonable ``extra_aberrations`` inputs. The reference for that number is
an earlier Cycle 5 document:
http://roman.gsfc.nasa.gov/science/sdt_public/wps/references/instrument/README_AFTA_C5_WFC_Zernike_and_Field_Data.pdf
However, the default (non-extra) aberrations are from Cycle 7 material linked earlier in this
docstring.
Jitter and charge diffusion are, by default, not included. Users who wish to include these can
find some guidelines for typical length scales of the Gaussians that can represent these
effects, and convolve the ChromaticOpticalPSF with appropriate achromatic Gaussians.
The PSFs are always defined assuming the user will specify length scales in arcsec.
Users may find they do not have to call `getPSF` for all objects in their simulations; for a
given SCA and position within the SCA, and a given pupil plane configuration and wavelength
information, it should be possible to reuse the PSFs.
Parameters:
SCA: Single value specifying the SCA for which the PSF should be
loaded.
bandpass: Single string specifying the bandpass to use when defining the
pupil plane configuration and/or interpolation of chromatic PSFs.
You may also pass a string 'long' or 'short' for this argument, in
which case, the correct pupil plane configuration will be used for
long- or short-wavelength bands (F184 is long, all else is short).
In this case, no interpolation can be used, since it is defined
using the extent of the chosen bandpass. If ``wavelength`` is given,
then bandpass may be None, which will use the short-wavelength pupil
plane image.
SCA_pos: Single galsim.PositionD indicating the position within the SCA
for which the PSF should be created. If None, the exact center of
the SCA is chosen. [default: None]
pupil_bin: The binning to apply to the pupil plane image. (See discussion above.)
[default: 4]
n_waves: Number of wavelengths to use for setting up interpolation of the
chromatic PSF objects, which can lead to much faster image
rendering. If None, then no interpolation is used. Note that
users who want to interpolate can always set up the interpolation
later on even if they do not do so when calling `getPSF`.
[default: None]
extra_aberrations: Array of extra aberrations to include in the PSF model, on top of
those that are part of the Roman design. These should be
provided in units of waves at the fiducial wavelength of 1293 nm,
as an array of length 23 with entries 4 through 22 corresponding
to defocus through the 22nd Zernike in the Noll convention.
[default: None]
wavelength: An option to get an achromatic PSF for a single wavelength, for
users who do not care about chromaticity of the PSF. If None,
then the fully chromatic PSF is returned. Alternatively the user
should supply either (a) a wavelength in nanometers, and they
will get achromatic OpticalPSF objects for that wavelength, or
(b) a bandpass object, in which case they will get achromatic
OpticalPSF objects defined at the effective wavelength of that
bandpass. [default: False]
gsparams: An optional GSParams argument. See the docstring for GSParams
for details. [default: None]
Returns:
A single PSF object (either a ChromaticOpticalPSF or an OpticalPSF depending on the
inputs).
"""
from ..position import PositionD
from ..errors import GalSimValueError, GalSimRangeError
from ..bandpass import Bandpass
from . import n_pix, n_sca, longwave_bands, shortwave_bands
# Deprecated options
if high_accuracy:
if approximate_struts:
from ..deprecated import depr
from ..gsparams import GSParams
depr('high_accuracy=True,approximate_struts=True', 2.3,
'pupil_bin=4, gsparams=galsim.GSParams(folding_threshold=2.e-3)',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
# Set folding_threshold 2.5x smaller than default.
gsparams = GSParams.check(gsparams, folding_threshold=2.e-3)
pupil_bin = 4
else:
from ..deprecated import depr
from ..gsparams import GSParams
depr('high_accuracy=True', 2.3,
'pupil_bin=1, gsparams=galsim.GSParams(folding_threshold=2.e-3)',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
# Set folding_threshold 2.5x smaller than default.
gsparams = GSParams.check(gsparams, folding_threshold=2.e-3)
pupil_bin = 1
elif approximate_struts:
from ..deprecated import depr
from ..gsparams import GSParams
depr('approximate_struts=True', 2.3, 'pupil_bin=8',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
pupil_bin = 8
elif approximate_struts is False or high_accuracy is False:
# If they are explicitly given, rather than default (None), then trigger this.
from ..deprecated import depr
from ..gsparams import GSParams
depr('approximate_struts=False, high_accuracy=False', 2.3, 'pupil_bin=4',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
pupil_bin = 4
if SCA <= 0 or SCA > n_sca:
raise GalSimRangeError("Invalid SCA.", SCA, 1, n_sca)
# SCA_pos: if None, then all should just be center of the SCA.
if SCA_pos is None:
SCA_pos = PositionD(n_pix/2, n_pix/2)
# Parse the bandpasses to see which pupil plane image is needed
pupil_plane_type = None
if bandpass in longwave_bands or bandpass=='long':
pupil_plane_type = 'long'
elif bandpass in shortwave_bands or bandpass=='short':
pupil_plane_type = 'short'
elif bandpass is None and n_waves is None:
pupil_plane_type = 'short'
else:
raise GalSimValueError("Bandpass not a valid Roman bandpass or 'short'/'long'.",
bandpass, default_bandpass_list)
# If bandpass is 'short'/'long', then make sure that interpolation is not called for, since that
# requires an actual bandpass.
if bandpass in ['short','long'] and n_waves is not None:
raise GalSimValueError("Cannot use bandpass='short'/'long' with interpolation.", bandpass)
if not isinstance(wavelength, (Bandpass, float, type(None))):
raise TypeError("wavelength should either be a Bandpass, float, or None.")
# Now call _get_single_PSF().
psf = _get_single_PSF(SCA, bandpass, SCA_pos, pupil_bin,
n_waves, extra_aberrations, wavelength,
pupil_plane_type, gsparams)
return psf
|
def getPSF(SCA, bandpass,
SCA_pos=None, pupil_bin=4, n_waves=None, extra_aberrations=None,
wavelength=None, gsparams=None,
logger=None, high_accuracy=None, approximate_struts=None):
"""Get a single PSF for Roman ST observations.
The user must provide the SCA and bandpass; the latter is used when setting up the pupil
plane configuration and when interpolating chromatic information, if requested.
This routine carries out linear interpolation of the aberrations within a given SCA, based on
the Roman (then WFIRST) Cycle 7 specification of the aberrations as a function of focal plane
position, more specifically from ``Roman_Phase-A_SRR_WFC_Zernike_and_Field_Data_170727.xlsm``
downloaded from https://roman.gsfc.nasa.gov/science/Roman_Reference_Information.html. Phase
B updates that became available in mid-2019 have not yet been incorporated into this module.
(Note: the files at that url still use the old WFIRST name. We have renamed them to use the
new name of the telescope, Roman, after downloading.)
The mask images for the Roman pupil plane are available at from the Roman Reference Information
page: https://roman.gsfc.nasa.gov/science/Roman_Reference_Information.html.
There are separate files for each SCA, since the view of the spider pattern varies somewhat
across the field of view of the wide field camera. Furthermore, the effect of the obscuration
is somewhat different at longer wavelengths, so F184 has a different set fo files than the
other filters. cf. the ``galsm.roman.longwave_bands`` and ``galsim.roman.shortwave_bands``
attributes, which define which bands use which pupil plane images.
To avoid using the full pupil plane configuration, use the optional keyword ``pupil_bin``.
The full pupil-plane images are 4096 x 4096, which is more detail than is typically needed for
most applications. The default binning is 4x4, which results in an image that is 1024 x 1024.
This provides enough detail for most purposes and is much faster to render than using the full
pupil plane image. Using pupil_bin=8 (resulting in a 512 x 512 image) still provides fairly
reasonable results and is even faster to render, but it is not recommended to use higher
binning than that, as the diffraction spikes will be noticeably degraded.
Also note that currently the orientation of the struts is fixed, rather than rotating depending
on the orientation of the focal plane. Rotation of the PSF can easily be affected by the user
via::
psf = galsim.roman.getPSF(...).rotate(angle)
which will rotate the entire PSF (including the diffraction spikes and all other features).
The calculation takes advantage of the fact that the diffraction limit and aberrations have a
simple, understood wavelength-dependence. (The Roman project webpage for Cycle 7 does in fact
provide aberrations as a function of wavelength, but the deviation from the expected chromatic
dependence is sub-percent so we neglect it here.) For reference, the script used to parse the
Zernikes given on the webpage and create the files in the GalSim repository can be found in
``devel/external/parse_roman_zernikes_1217.py``. The resulting chromatic object can be used to
draw into any of the Roman bandpasses, though the pupil plane configuration will only be
correct for those bands in the same range (i.e., long- or short-wavelength bands).
For applications that require very high accuracy in the modeling of the PSF, with very limited
aliasing, you may want to lower the folding_threshold in the gsparams. Otherwise very bright
stars will show some reflections in the spider pattern and possibly some boxiness at the
outskirts of the PSF. Using ``gsparams = GSParams(folding_threshold=2.e-3)`` generally
provides good results even for very bright (e.g. mag=10) stars. In these cases, you probably
also want to reduce ``pupil_bin`` somewhat from the default value of 4.
By default, no additional aberrations are included above the basic design. However, users can
provide an optional keyword ``extra_aberrations`` that will be included on top of those that are
part of the design. This should be in the same format as for the ChromaticOpticalPSF class,
with units of waves at the fiducial wavelength, 1293 nm. Currently, only aberrations up to order
22 (Noll convention) are simulated. For Roman, the tolerance for additional
aberrations was a total of 90 nanometers RMS as of mid-2015, distributed largely among coma,
astigmatism, trefoil, and spherical aberrations (NOT defocus). This information might serve as
a guide for reasonable ``extra_aberrations`` inputs. The reference for that number is
an earlier Cycle 5 document:
http://roman.gsfc.nasa.gov/science/sdt_public/wps/references/instrument/README_AFTA_C5_WFC_Zernike_and_Field_Data.pdf
However, the default (non-extra) aberrations are from Cycle 7 material linked earlier in this
docstring.
Jitter and charge diffusion are, by default, not included. Users who wish to include these can
find some guidelines for typical length scales of the Gaussians that can represent these
effects, and convolve the ChromaticOpticalPSF with appropriate achromatic Gaussians.
The PSFs are always defined assuming the user will specify length scales in arcsec.
Users may find they do not have to call `getPSF` for all objects in their simulations; for a
given SCA and position within the SCA, and a given pupil plane configuration and wavelength
information, it should be possible to reuse the PSFs.
Parameters:
SCA: Single value specifying the SCA for which the PSF should be
loaded.
bandpass: Single string specifying the bandpass to use when defining the
pupil plane configuration and/or interpolation of chromatic PSFs.
You may also pass a string 'long' or 'short' for this argument, in
which case, the correct pupil plane configuration will be used for
long- or short-wavelength bands (F184 is long, all else is short).
In this case, no interpolation can be used, since it is defined
using the extent of the chosen bandpass. If ``wavelength`` is given,
then bandpass may be None, which will use the short-wavelength pupil
plane image.
SCA_pos: Single galsim.PositionD indicating the position within the SCA
for which the PSF should be created. If None, the exact center of
the SCA is chosen. [default: None]
pupil_bin: The binning to apply to the pupil plane image. (See discussion above.)
[default: 4]
n_waves: Number of wavelengths to use for setting up interpolation of the
chromatic PSF objects, which can lead to much faster image
rendering. If None, then no interpolation is used. Note that
users who want to interpolate can always set up the interpolation
later on even if they do not do so when calling `getPSF`.
[default: None]
extra_aberrations: Array of extra aberrations to include in the PSF model, on top of
those that are part of the Roman design. These should be
provided in units of waves at the fiducial wavelength of 1293 nm,
as an array of length 23 with entries 4 through 22 corresponding
to defocus through the 22nd Zernike in the Noll convention.
[default: None]
wavelength: An option to get an achromatic PSF for a single wavelength, for
users who do not care about chromaticity of the PSF. If None,
then the fully chromatic PSF is returned. Alternatively the user
should supply either (a) a wavelength in nanometers, and they
will get achromatic OpticalPSF objects for that wavelength, or
(b) a bandpass object, in which case they will get achromatic
OpticalPSF objects defined at the effective wavelength of that
bandpass. [default: False]
gsparams: An optional GSParams argument. See the docstring for GSParams
for details. [default: None]
Returns:
A single PSF object (either a ChromaticOpticalPSF or an OpticalPSF depending on the
inputs).
"""
from ..position import PositionD
from ..errors import GalSimValueError, GalSimRangeError
from ..bandpass import Bandpass
from . import n_pix, n_sca, longwave_bands, shortwave_bands
# Deprecated options
if high_accuracy:
if approximate_struts:
from ..deprecated import depr
from ..gsparams import GSParams
depr('high_accuracy=True,approximate_struts=True', 2.3,
'pupil_bin=4, gsparams=galsim.GSParams(folding_threshold=2.e-3)',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
# Set folding_threshold 2.5x smaller than default.
gsparams = GSParams.check(gsparams, folding_threshold=2.e-3)
pupil_bin = 4
else:
from ..deprecated import depr
from ..gsparams import GSParams
depr('high_accuracy=True', 2.3,
'pupil_bin=1, gsparams=galsim.GSParams(folding_threshold=2.e-3)',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
# Set folding_threshold 2.5x smaller than default.
gsparams = GSParams.check(gsparams, folding_threshold=2.e-3)
pupil_bin = 1
elif approximate_struts:
from ..deprecated import depr
from ..gsparams import GSParams
depr('approximate_struts=True', 2.3, 'pupil_bin=8',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
pupil_bin = 8
elif approximate_struts is False or high_accuracy is False:
# If they are explicitly given, rather than default (None), then trigger this.
from ..deprecated import depr
from ..gsparams import GSParams
depr('approximate_struts=False, high_accuracy=False', 2.3, 'pupil_bin=4',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
pupil_bin = 4
if SCA <= 0 or SCA > n_sca:
raise GalSimRangeError("Invalid SCA.", SCA, 1, n_sca)
# SCA_pos: if None, then all should just be center of the SCA.
if SCA_pos is None:
SCA_pos = PositionD(n_pix/2, n_pix/2)
# Parse the bandpasses to see which pupil plane image is needed
pupil_plane_type = None
if bandpass in longwave_bands or bandpass=='long':
pupil_plane_type = 'long'
elif bandpass in shortwave_bands or bandpass=='short':
pupil_plane_type = 'short'
elif bandpass is None and n_waves is None:
pupil_plane_type = 'short'
else:
raise GalSimValueError("Bandpass not a valid Roman bandpass or 'short'/'long'.",
bandpass, default_bandpass_list)
# If bandpass is 'short'/'long', then make sure that interpolation is not called for, since that
# requires an actual bandpass.
if bandpass in ['short','long'] and n_waves is not None:
raise GalSimValueError("Cannot use bandpass='short'/'long' with interpolation.", bandpass)
if not isinstance(wavelength, (Bandpass, float, type(None))):
raise TypeError("wavelength should either be a Bandpass, float, or None.")
# Now call _get_single_PSF().
psf = _get_single_PSF(SCA, bandpass, SCA_pos, pupil_bin,
n_waves, extra_aberrations, wavelength,
pupil_plane_type, gsparams)
return psf
|
58,301 |
def build_auxiliary_output_path(args: Union[argparse.Namespace, MkosiArgs], suffix: str, can_compress: bool = False) -> Path:
output = strip_suffixes(args.output)
compression = f".{should_compress_output(args)}" if can_compress and should_compress_output(args) else ''
return output.with_name(f"{output.name}{suffix}{compression}")
|
def build_auxiliary_output_path(args: Union[argparse.Namespace, MkosiArgs], suffix: str, can_compress: bool = False) -> Path:
output = strip_suffixes(args.output)
should_compress = should_compress_output(args)
compression = f".{should_compress}" if can_compress and should_compress else ''
return output.with_name(f"{output.name}{suffix}{compression}")
|
33,130 |
def l96s_tay2_step(x, t, dt, s):
"""Steps forward state of L96s model by order 2.0 Taylor scheme
This is the method that must be used to generate the truth twin for this model due
to the high-accuracy with respect to convergence in the strong sense. The ensemble
model twin will be generated by the general integration functionality, with the
diffusion set appropriately. This is the basic formulation which makes a Fourier
truncation at p=1 for the simple form of the order 2.0 method. See
`bib.grudzien2020numerical` for full details of the scheme and other versions."""
# Infer system dimension
sys_dim = len(x)
# Compute the deterministic dxdt and the jacobian equations
dx = dxdt(x)
dxF = d2x_dtdx(x)
# coefficients defined based on the p=1 Fourier truncation
rho = 1.0/12.0 - 0.5 * np.pi**(-2)
alpha = np.pi**2 / 180.0 - 0.5 * np.pi**(-2)
# draw standard normal sample to define the
# recursive Stratonovich integral coefficients
rndm = np.random.standard_normal([5, sys_dim])
xi = rndm[0, :]
mu = rndm[1, :]
phi = rndm[2, :]
zeta = rndm[3, :]
eta = rndm[4, :]
# define the auxiliary functions of random Fourier coefficients, a and b
a = -2.0 * np.sqrt(dt * rho) * mu - np.sqrt(2.0*dt) * zeta / np.pi
b = np.sqrt(dt * alpha) * phi + np.sqrt(dt / (2.0 * np.pi**2) ) * eta
# vector of first order Stratonovich integrals
J_pdelta = (dt/2.0) * (np.sqrt(dt) * xi + a)
def Psi(l1, l2):
# psi will be a generic function of the indicies l1 and l2, we will define
# psi plus and psi minus via this
psi = dt**2 * xi[l1] * xi[l2] / 3.0 + dt * a[l1] * a[l2] / 2.0 \
+ dt**(1.5) * (xi[l1] * a[l2] + xi[l2] * a[l1]) / 4.0 \
- dt**(1.5) * (xi[l1] * b[l2] + xi[l2] * b[l1]) / (2.0 * np.pi)
return psi
# we define the approximations of the second order Stratonovich integral
psi_plus = np.array([Psi((i-1) % sys_dim, (i+1) % sys_dim)
for i in range(sys_dim)])
psi_minus = np.array([Psi((i-2) % sys_dim, (i-1) % sys_dim)
for i in range(sys_dim)])
# the final vectorized step forward is given as
x = x + dx * dt + dt**2 * 0.5 * dxF @ dx # deterministic taylor step
x += s * np.sqrt(dt) * xi # stochastic euler step
x += s * dxF @ J_pdelta # stochastic first order taylor step
x += s**2 * (psi_plus - psi_minus) # stochastic second order taylor step
return x
|
def l96s_tay2_step(x, t, dt, s):
"""Steps forward state of L96s model by order 2.0 Taylor scheme
This is the method that must be used to generate the truth twin for this model due
to the high-accuracy with respect to convergence in the strong sense. The ensemble
model uses general integration functionality, with the
diffusion set appropriately. This is the basic formulation which makes a Fourier
truncation at p=1 for the simple form of the order 2.0 method. See
`bib.grudzien2020numerical` for full details of the scheme and other versions."""
# Infer system dimension
sys_dim = len(x)
# Compute the deterministic dxdt and the jacobian equations
dx = dxdt(x)
dxF = d2x_dtdx(x)
# coefficients defined based on the p=1 Fourier truncation
rho = 1.0/12.0 - 0.5 * np.pi**(-2)
alpha = np.pi**2 / 180.0 - 0.5 * np.pi**(-2)
# draw standard normal sample to define the
# recursive Stratonovich integral coefficients
rndm = np.random.standard_normal([5, sys_dim])
xi = rndm[0, :]
mu = rndm[1, :]
phi = rndm[2, :]
zeta = rndm[3, :]
eta = rndm[4, :]
# define the auxiliary functions of random Fourier coefficients, a and b
a = -2.0 * np.sqrt(dt * rho) * mu - np.sqrt(2.0*dt) * zeta / np.pi
b = np.sqrt(dt * alpha) * phi + np.sqrt(dt / (2.0 * np.pi**2) ) * eta
# vector of first order Stratonovich integrals
J_pdelta = (dt/2.0) * (np.sqrt(dt) * xi + a)
def Psi(l1, l2):
# psi will be a generic function of the indicies l1 and l2, we will define
# psi plus and psi minus via this
psi = dt**2 * xi[l1] * xi[l2] / 3.0 + dt * a[l1] * a[l2] / 2.0 \
+ dt**(1.5) * (xi[l1] * a[l2] + xi[l2] * a[l1]) / 4.0 \
- dt**(1.5) * (xi[l1] * b[l2] + xi[l2] * b[l1]) / (2.0 * np.pi)
return psi
# we define the approximations of the second order Stratonovich integral
psi_plus = np.array([Psi((i-1) % sys_dim, (i+1) % sys_dim)
for i in range(sys_dim)])
psi_minus = np.array([Psi((i-2) % sys_dim, (i-1) % sys_dim)
for i in range(sys_dim)])
# the final vectorized step forward is given as
x = x + dx * dt + dt**2 * 0.5 * dxF @ dx # deterministic taylor step
x += s * np.sqrt(dt) * xi # stochastic euler step
x += s * dxF @ J_pdelta # stochastic first order taylor step
x += s**2 * (psi_plus - psi_minus) # stochastic second order taylor step
return x
|
59,431 |
def _parse_pulse_args(backend, qubit_lo_freq, meas_lo_freq, qubit_lo_range,
meas_lo_range, schedule_los, meas_level,
meas_return, meas_map,
memory_slot_size,
rep_time, rep_delay,
parametric_pulses,
**run_config):
"""Build a pulse RunConfig replacing unset arguments with defaults derived from the `backend`.
See `assemble` for more information on the required arguments.
Returns:
RunConfig: a run config, which is a standardized object that configures the qobj
and determines the runtime environment.
Raises:
SchemaValidationError: if the given meas_level, rep_time, rep_delay is not allowed
for the given `backend`.
"""
# grab relevant info from backend if it exists
backend_config = None
backend_default = None
if backend:
backend_default = backend.defaults()
backend_config = backend.configuration()
if meas_level not in getattr(backend_config, 'meas_levels', [MeasLevel.CLASSIFIED]):
raise SchemaValidationError(
('meas_level = {} not supported for backend {}, only {} is supported'
).format(meas_level, backend_config.backend_name, backend_config.meas_levels)
)
if rep_time not in getattr(backend_config, 'rep_times', None):
raise SchemaValidationError(
('rep_time = {} not supported for backend {}, only {} is supported'
).format(rep_time, backend_config.backend_name, backend_config.rep_times)
)
if rep_delay not in getattr(backend_config, 'rep_delays', None):
raise SchemaValidationError(
('rep_delay = {} not supported for backend {}, only {} is supported'
).format(rep_delay, backend_config.backend_name, backend_config.rep_delays)
)
meas_map = meas_map or getattr(backend_config, 'meas_map', None)
schedule_los = schedule_los or []
if isinstance(schedule_los, (LoConfig, dict)):
schedule_los = [schedule_los]
# Convert to LoConfig if LO configuration supplied as dictionary
schedule_los = [lo_config if isinstance(lo_config, LoConfig) else LoConfig(lo_config)
for lo_config in schedule_los]
if not qubit_lo_freq and hasattr(backend_default, 'qubit_freq_est'):
qubit_lo_freq = backend_default.qubit_freq_est
if not meas_lo_freq and hasattr(backend_default, 'meas_freq_est'):
meas_lo_freq = backend_default.meas_freq_est
qubit_lo_range = qubit_lo_range or getattr(backend_config, 'qubit_lo_range', None)
meas_lo_range = meas_lo_range or getattr(backend_config, 'meas_lo_range', None)
dynamic_reprate_enabled = getattr(backend_config, 'dynamic_reprate_enabled', False)
rep_time = rep_time or getattr(backend_config, 'rep_times', None)
if rep_time:
if dynamic_reprate_enabled:
warnings.warn("Dynamic rep rates are supported on this backend. 'rep_delay' will be "
"used instead, if specified.", RuntimeWarning)
if isinstance(rep_time, list):
rep_time = rep_time[0]
rep_time = int(rep_time * 1e6) # convert sec to μs
rep_delay = rep_delay or getattr(backend_config, 'rep_delays', None)
if rep_delay:
if not dynamic_reprate_enabled:
warnings.warn("Dynamic rep rates not supported on this backend. 'rep_time' will be "
"used instead.", RuntimeWarning)
if isinstance(rep_delay, list):
rep_delay = rep_delay[0]
rep_delay = rep_delay * 1e6 # convert sec to μs
parametric_pulses = parametric_pulses or getattr(backend_config, 'parametric_pulses', [])
# create run configuration and populate
run_config_dict = dict(qubit_lo_freq=qubit_lo_freq,
meas_lo_freq=meas_lo_freq,
qubit_lo_range=qubit_lo_range,
meas_lo_range=meas_lo_range,
schedule_los=schedule_los,
meas_level=meas_level,
meas_return=meas_return,
meas_map=meas_map,
memory_slot_size=memory_slot_size,
rep_time=rep_time,
rep_delay=rep_delay,
parametric_pulses=parametric_pulses,
**run_config)
run_config = RunConfig(**{k: v for k, v in run_config_dict.items() if v is not None})
return run_config
|
def _parse_pulse_args(backend, qubit_lo_freq, meas_lo_freq, qubit_lo_range,
meas_lo_range, schedule_los, meas_level,
meas_return, meas_map,
memory_slot_size,
rep_time, rep_delay,
parametric_pulses,
**run_config):
"""Build a pulse RunConfig replacing unset arguments with defaults derived from the `backend`.
See `assemble` for more information on the required arguments.
Returns:
RunConfig: a run config, which is a standardized object that configures the qobj
and determines the runtime environment.
Raises:
SchemaValidationError: if the given ``meas_level``, ``rep_time`` or ``rep_delay`` is not allowed
for the given ``backend``.
"""
# grab relevant info from backend if it exists
backend_config = None
backend_default = None
if backend:
backend_default = backend.defaults()
backend_config = backend.configuration()
if meas_level not in getattr(backend_config, 'meas_levels', [MeasLevel.CLASSIFIED]):
raise SchemaValidationError(
('meas_level = {} not supported for backend {}, only {} is supported'
).format(meas_level, backend_config.backend_name, backend_config.meas_levels)
)
if rep_time not in getattr(backend_config, 'rep_times', None):
raise SchemaValidationError(
('rep_time = {} not supported for backend {}, only {} is supported'
).format(rep_time, backend_config.backend_name, backend_config.rep_times)
)
if rep_delay not in getattr(backend_config, 'rep_delays', None):
raise SchemaValidationError(
('rep_delay = {} not supported for backend {}, only {} is supported'
).format(rep_delay, backend_config.backend_name, backend_config.rep_delays)
)
meas_map = meas_map or getattr(backend_config, 'meas_map', None)
schedule_los = schedule_los or []
if isinstance(schedule_los, (LoConfig, dict)):
schedule_los = [schedule_los]
# Convert to LoConfig if LO configuration supplied as dictionary
schedule_los = [lo_config if isinstance(lo_config, LoConfig) else LoConfig(lo_config)
for lo_config in schedule_los]
if not qubit_lo_freq and hasattr(backend_default, 'qubit_freq_est'):
qubit_lo_freq = backend_default.qubit_freq_est
if not meas_lo_freq and hasattr(backend_default, 'meas_freq_est'):
meas_lo_freq = backend_default.meas_freq_est
qubit_lo_range = qubit_lo_range or getattr(backend_config, 'qubit_lo_range', None)
meas_lo_range = meas_lo_range or getattr(backend_config, 'meas_lo_range', None)
dynamic_reprate_enabled = getattr(backend_config, 'dynamic_reprate_enabled', False)
rep_time = rep_time or getattr(backend_config, 'rep_times', None)
if rep_time:
if dynamic_reprate_enabled:
warnings.warn("Dynamic rep rates are supported on this backend. 'rep_delay' will be "
"used instead, if specified.", RuntimeWarning)
if isinstance(rep_time, list):
rep_time = rep_time[0]
rep_time = int(rep_time * 1e6) # convert sec to μs
rep_delay = rep_delay or getattr(backend_config, 'rep_delays', None)
if rep_delay:
if not dynamic_reprate_enabled:
warnings.warn("Dynamic rep rates not supported on this backend. 'rep_time' will be "
"used instead.", RuntimeWarning)
if isinstance(rep_delay, list):
rep_delay = rep_delay[0]
rep_delay = rep_delay * 1e6 # convert sec to μs
parametric_pulses = parametric_pulses or getattr(backend_config, 'parametric_pulses', [])
# create run configuration and populate
run_config_dict = dict(qubit_lo_freq=qubit_lo_freq,
meas_lo_freq=meas_lo_freq,
qubit_lo_range=qubit_lo_range,
meas_lo_range=meas_lo_range,
schedule_los=schedule_los,
meas_level=meas_level,
meas_return=meas_return,
meas_map=meas_map,
memory_slot_size=memory_slot_size,
rep_time=rep_time,
rep_delay=rep_delay,
parametric_pulses=parametric_pulses,
**run_config)
run_config = RunConfig(**{k: v for k, v in run_config_dict.items() if v is not None})
return run_config
|
45,469 |
def mask_secrets(key: str, value: str) -> str:
key = key.lower()
if any(
[
"key" in key,
"token" in key,
"password" in key,
"secret" in key,
]
):
return "*" * 8
return value
|
def mask_secrets(key: str, value: str) -> str:
key = key.lower()
if any(k in key for k in ["key", "token", "password", "secret"]):
return "*" * 8
return value
|
5,782 |
def _milp_iv(c, integrality, bounds, constraints, options):
# objective IV
c = np.atleast_1d(c).astype(np.double)
if c.ndim != 1:
message = "`c` must be a one-dimensional array."
raise ValueError(message)
# integrality IV
message = ("`integrality` must contain integers 0-3 and be broadcastable "
"to `c.shape`.")
if integrality is None:
integrality = 0
try:
integrality = np.broadcast_to(integrality, c.shape).astype(np.uint8)
except ValueError:
raise ValueError(message)
if integrality.min() < 0 or integrality.max() > 3:
raise ValueError(message)
# bounds IV
if bounds is None:
bounds = Bounds(0, np.inf)
elif not isinstance(bounds, Bounds):
message = "`bounds` must be an instance of `scipy.optimize.Bounds`."
try:
bounds = Bounds(*bounds)
except TypeError:
raise ValueError(message)
try:
lb = np.broadcast_to(bounds.lb, c.shape).astype(np.double)
ub = np.broadcast_to(bounds.ub, c.shape).astype(np.double)
except (ValueError, TypeError):
message = ("`bounds.lb` and `bounds.ub` must contain reals and "
"be broadcastable to `c.shape`.")
raise ValueError(message)
# constraints IV
if not constraints: # constraints is None, empty sequence, False, 0, etc.
constraints = [LinearConstraint(np.empty((0, c.size)),
np.empty((0,)), np.empty((0,)))]
A, b_l, b_u = _constraints_to_components(constraints)
if A.shape != (b_l.size, c.size):
message = "The shape of `A` must be (len(b_l), len(c))."
raise ValueError(message)
indptr, indices, data = A.indptr, A.indices, A.data.astype(np.double)
# options IV
options = options or {}
supported_options = {'disp', 'presolve', 'time_limit'}
unsupported_options = set(options).difference(supported_options)
if unsupported_options:
message = (f"Unrecognized options detected: {unsupported_options}. "
"These will be passed to HiGHS verbatim.")
warnings.warn(message, RuntimeWarning, stacklevel=3)
options_iv = {'log_to_console': options.get("disp", False)}
options_iv.update(options)
return c, integrality, lb, ub, indptr, indices, data, b_l, b_u, options_iv
|
def _milp_iv(c, integrality, bounds, constraints, options):
# objective IV
c = np.atleast_1d(c).astype(np.double)
if c.ndim != 1:
message = "`c` must be a one-dimensional array."
raise ValueError(message)
# integrality IV
message = ("`integrality` must contain integers 0-3 and be broadcastable "
"to `c.shape`.")
if integrality is None:
integrality = 0
try:
integrality = np.broadcast_to(integrality, c.shape).astype(np.uint8)
except ValueError:
raise ValueError(message)
if integrality.min() < 0 or integrality.max() > 3:
raise ValueError(message)
# bounds IV
if bounds is None:
bounds = Bounds(0, np.inf)
elif not isinstance(bounds, Bounds):
message = "`bounds` must be an instance of `scipy.optimize.Bounds`."
try:
bounds = Bounds(*bounds)
except TypeError:
raise ValueError(message)
try:
lb = np.broadcast_to(bounds.lb, c.shape).astype(np.double)
ub = np.broadcast_to(bounds.ub, c.shape).astype(np.double)
except (ValueError, TypeError):
message = ("`bounds.lb` and `bounds.ub` must contain reals and "
"be broadcastable to `c.shape`.")
raise ValueError(message)
# constraints IV
if not constraints:
constraints = [LinearConstraint(np.empty((0, c.size)),
np.empty((0,)), np.empty((0,)))]
A, b_l, b_u = _constraints_to_components(constraints)
if A.shape != (b_l.size, c.size):
message = "The shape of `A` must be (len(b_l), len(c))."
raise ValueError(message)
indptr, indices, data = A.indptr, A.indices, A.data.astype(np.double)
# options IV
options = options or {}
supported_options = {'disp', 'presolve', 'time_limit'}
unsupported_options = set(options).difference(supported_options)
if unsupported_options:
message = (f"Unrecognized options detected: {unsupported_options}. "
"These will be passed to HiGHS verbatim.")
warnings.warn(message, RuntimeWarning, stacklevel=3)
options_iv = {'log_to_console': options.get("disp", False)}
options_iv.update(options)
return c, integrality, lb, ub, indptr, indices, data, b_l, b_u, options_iv
|
14,483 |
def _perform_mockings() -> None:
"""Mock modules and roles."""
for role_name in options.mock_roles:
if re.match(r"\w+\.\w+\.\w+", role_name):
namespace, collection, role_dir = role_name.split(".")
path = f".cache/collections/ansible_collections/{ namespace }/{ collection }/roles/{ role_dir }/"
else:
path = f".cache/roles/{role_name}"
os.makedirs(path, exist_ok=True)
if options.mock_modules:
for module_name in options.mock_modules:
_make_module_stub(module_name)
# if inside a collection repo, symlink it to simulate its installed state
if not os.path.exists("galaxy.yml"):
return
yaml = yaml_from_file("galaxy.yml")
namespace = yaml.get('namespace', None)
collection = yaml.get('name', None)
if not namespace or not collection:
return
p = pathlib.Path(
f"{options.project_dir}/.cache/collections/ansible_collections/{ namespace }"
)
p.mkdir(parents=True, exist_ok=True)
link_path = p / collection
if not link_path.exists():
link_path.symlink_to(pathlib.Path("../../../..", target_is_directory=True))
|
def _perform_mockings() -> None:
"""Mock modules and roles."""
for role_name in options.mock_roles:
if re.match(r"\w+\.\w+\.\w+", role_name):
namespace, collection, role_dir = role_name.split(".")
path = f".cache/collections/ansible_collections/{ namespace }/{ collection }/roles/{ role_dir }/"
else:
path = f".cache/roles/{role_name}"
os.makedirs(path, exist_ok=True)
if options.mock_modules:
for module_name in options.mock_modules:
_make_module_stub(module_name)
# if inside a collection repo, symlink it to simulate its installed state
if not os.path.exists("galaxy.yml"):
return
yaml = yaml_from_file("galaxy.yml")
namespace = yaml.get('namespace', None)
collection = yaml.get('name', None)
if not namespace or not collection:
return
p = pathlib.Path(
f"{options.project_dir}/.cache/collections/ansible_collections/{ namespace }"
)
p.mkdir(parents=True, exist_ok=True)
link_path = p / collection
if not link_path.exists():
link_path.symlink_to(pathlib.Path(options.project_dir, target_is_directory=True))
|
38,996 |
def _add_pydantic_validation_attributes(
dc_cls: Type['Dataclass'],
config: Optional[Type['BaseConfig']],
validate_on_init: bool,
dc_cls_doc: Optional[str] = None,
) -> None:
"""
We need to replace the right method. If no `__post_init__` has been set in the stdlib dataclass
it won't even exist (code is generated on the fly by `dataclasses`)
By default, we run validation after `__init__` or `__post_init__` if defined
"""
if hasattr(dc_cls, '__post_init__'):
init = dc_cls.__init__
post_init = dc_cls.__post_init__
@wraps(init)
def new_init(
self: 'Dataclass', *args: Any, __pydantic_run_validation__: bool = validate_on_init, **kwargs: Any
) -> None:
self.__post_init__ = partial( # type: ignore[assignment]
self.__post_init__, __pydantic_run_validation__=__pydantic_run_validation__
)
init(self, *args, **kwargs)
@wraps(post_init)
def new_post_init(
self: 'Dataclass', *args: Any, __pydantic_run_validation__: bool = validate_on_init, **kwargs: Any
) -> None:
post_init(self, *args, **kwargs)
if __pydantic_run_validation__:
self.__pydantic_validate_values__()
if hasattr(self, '__post_init_post_parse__'):
self.__post_init_post_parse__(*args, **kwargs)
setattr(dc_cls, '__init__', new_init)
setattr(dc_cls, '__post_init__', new_post_init)
else:
init = dc_cls.__init__
@wraps(init)
def new_init(
self: 'Dataclass', *args: Any, __pydantic_run_validation__: bool = validate_on_init, **kwargs: Any
) -> None:
init(self, *args, **kwargs)
if __pydantic_run_validation__:
self.__pydantic_validate_values__()
if hasattr(self, '__post_init_post_parse__'):
# We need to find again the initvars. To do that we use `__dataclass_fields__` instead of
# public method `dataclasses.fields`
import dataclasses
# get all initvars and their default values
initvars_and_values: Dict[str, Any] = {}
for i, f in enumerate(self.__class__.__dataclass_fields__.values()):
if f._field_type is dataclasses._FIELD_INITVAR: # type: ignore[attr-defined]
try:
# set arg value by default
initvars_and_values[f.name] = args[i]
except IndexError:
initvars_and_values[f.name] = f.default
initvars_and_values.update(kwargs)
self.__post_init_post_parse__(**initvars_and_values)
setattr(dc_cls, '__init__', new_init)
setattr(dc_cls, '__processed__', ClassAttribute('__processed__', True))
setattr(dc_cls, '__pydantic_initialised__', False)
setattr(dc_cls, '__pydantic_model__', create_pydantic_model_from_dataclass(dc_cls, config, dc_cls_doc))
setattr(dc_cls, '__pydantic_validate_values__', _dataclass_validate_values)
setattr(dc_cls, '__validate__', classmethod(_validate_dataclass))
setattr(dc_cls, '__get_validators__', classmethod(_get_validators))
if dc_cls.__pydantic_model__.__config__.validate_assignment and not dc_cls.__dataclass_params__.frozen:
setattr(dc_cls, '__setattr__', _dataclass_validate_assignment_setattr)
|
def _add_pydantic_validation_attributes(
dc_cls: Type['Dataclass'],
config: Optional[Type['BaseConfig']],
validate_on_init: bool,
dc_cls_doc: str,
) -> None:
"""
We need to replace the right method. If no `__post_init__` has been set in the stdlib dataclass
it won't even exist (code is generated on the fly by `dataclasses`)
By default, we run validation after `__init__` or `__post_init__` if defined
"""
if hasattr(dc_cls, '__post_init__'):
init = dc_cls.__init__
post_init = dc_cls.__post_init__
@wraps(init)
def new_init(
self: 'Dataclass', *args: Any, __pydantic_run_validation__: bool = validate_on_init, **kwargs: Any
) -> None:
self.__post_init__ = partial( # type: ignore[assignment]
self.__post_init__, __pydantic_run_validation__=__pydantic_run_validation__
)
init(self, *args, **kwargs)
@wraps(post_init)
def new_post_init(
self: 'Dataclass', *args: Any, __pydantic_run_validation__: bool = validate_on_init, **kwargs: Any
) -> None:
post_init(self, *args, **kwargs)
if __pydantic_run_validation__:
self.__pydantic_validate_values__()
if hasattr(self, '__post_init_post_parse__'):
self.__post_init_post_parse__(*args, **kwargs)
setattr(dc_cls, '__init__', new_init)
setattr(dc_cls, '__post_init__', new_post_init)
else:
init = dc_cls.__init__
@wraps(init)
def new_init(
self: 'Dataclass', *args: Any, __pydantic_run_validation__: bool = validate_on_init, **kwargs: Any
) -> None:
init(self, *args, **kwargs)
if __pydantic_run_validation__:
self.__pydantic_validate_values__()
if hasattr(self, '__post_init_post_parse__'):
# We need to find again the initvars. To do that we use `__dataclass_fields__` instead of
# public method `dataclasses.fields`
import dataclasses
# get all initvars and their default values
initvars_and_values: Dict[str, Any] = {}
for i, f in enumerate(self.__class__.__dataclass_fields__.values()):
if f._field_type is dataclasses._FIELD_INITVAR: # type: ignore[attr-defined]
try:
# set arg value by default
initvars_and_values[f.name] = args[i]
except IndexError:
initvars_and_values[f.name] = f.default
initvars_and_values.update(kwargs)
self.__post_init_post_parse__(**initvars_and_values)
setattr(dc_cls, '__init__', new_init)
setattr(dc_cls, '__processed__', ClassAttribute('__processed__', True))
setattr(dc_cls, '__pydantic_initialised__', False)
setattr(dc_cls, '__pydantic_model__', create_pydantic_model_from_dataclass(dc_cls, config, dc_cls_doc))
setattr(dc_cls, '__pydantic_validate_values__', _dataclass_validate_values)
setattr(dc_cls, '__validate__', classmethod(_validate_dataclass))
setattr(dc_cls, '__get_validators__', classmethod(_get_validators))
if dc_cls.__pydantic_model__.__config__.validate_assignment and not dc_cls.__dataclass_params__.frozen:
setattr(dc_cls, '__setattr__', _dataclass_validate_assignment_setattr)
|
14,877 |
def setup_schluter(hass, config, api, authenticator):
"""Set up the Schluter component."""
authentication = None
try:
authentication = authenticator.authenticate()
except RequestException as ex:
_LOGGER.error("Unable to connect to Schluter service: %s", str(ex))
hass.components.persistent_notification.create(
"Error: {}<br />"
"You will need to restart hass after fixing."
"".format(ex),
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID,
)
state = authentication.state
if state == AuthenticationState.AUTHENTICATED:
hass.data[DATA_SCHLUTER] = SchluterData(hass, api, authentication.session_id)
for component in PLATFORMS:
discovery.load_platform(hass, component, DOMAIN, {}, config)
return True
if state == AuthenticationState.BAD_PASSWORD:
_LOGGER.error("Invalid password provided")
return False
if state == AuthenticationState.BAD_EMAIL:
_LOGGER.error("Invalid email provided")
return False
return False
|
def setup_schluter(hass, config, api, authenticator):
"""Set up the Schluter component."""
authentication = None
try:
authentication = authenticator.authenticate()
except RequestException as ex:
_LOGGER.error("Unable to connect to Schluter service: %s", ex)
hass.components.persistent_notification.create(
"Error: {}<br />"
"You will need to restart hass after fixing."
"".format(ex),
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID,
)
state = authentication.state
if state == AuthenticationState.AUTHENTICATED:
hass.data[DATA_SCHLUTER] = SchluterData(hass, api, authentication.session_id)
for component in PLATFORMS:
discovery.load_platform(hass, component, DOMAIN, {}, config)
return True
if state == AuthenticationState.BAD_PASSWORD:
_LOGGER.error("Invalid password provided")
return False
if state == AuthenticationState.BAD_EMAIL:
_LOGGER.error("Invalid email provided")
return False
return False
|
5,124 |
def test_no_definintion():
fig = Figure()
axs = fig.subplots(4, 4, subplot_kw=dict(projection="polar"))
for ax in axs.flat:
ax.set(xticks=[], yticks=[])
ax.plot([1, 2])
fig.suptitle("hello, world")
buf = io.StringIO()
fig.savefig(buf, format='eps')
buf.seek(0)
wds = [ln.partition(' ')[0] for
ln in buf.readlines()
if ln.startswith('/')]
assert max(Counter(wds).values()) == 1
|
def test_no_duplicate_definition():
fig = Figure()
axs = fig.subplots(4, 4, subplot_kw=dict(projection="polar"))
for ax in axs.flat:
ax.set(xticks=[], yticks=[])
ax.plot([1, 2])
fig.suptitle("hello, world")
buf = io.StringIO()
fig.savefig(buf, format='eps')
buf.seek(0)
wds = [ln.partition(' ')[0] for
ln in buf.readlines()
if ln.startswith('/')]
assert max(Counter(wds).values()) == 1
|
21,247 |
def _argparse_volumes(volumes_arg: str) -> List:
"""Custom argparse handling for volumes
:param volumes_arg: The volume argparse for harvard imports
:return: Range of values
"""
if ":" not in volumes_arg:
return [volumes_arg]
volumes = [int(e) if e.strip() else 2000 for e in volumes_arg.split(":")]
if len(volumes) == 1:
start = stop = volumes[0]
else:
start, stop = volumes[0], volumes[1] + 1
return [*range(start, stop)]
|
def _argparse_volumes(volumes_arg: str) -> List:
"""Custom argparse handling for volumes
:param volumes_arg: The volume argparse for harvard imports
:return: Range of values
"""
if ":" not in volumes_arg:
return [volumes_arg]
volumes = [int(e) if e.strip() else 2000 for e in volumes_arg.split(":")]
if len(volumes) == 1:
start = stop = volumes[0]
else:
start, stop = volumes[0], volumes[1] + 1
return list(range(start, stop))
|
7,637 |
def digest_file(fpath, hash_algo: str):
"""
Reads and digests a file according to specified hashing-algorith.
:param hash_algo: any algo contained in :mod:`hashlib`
:return: <hash_algo>=<hex_digest>
From http://stackoverflow.com/a/21565932/548792
"""
blocksize = 2 ** 16
digester = getattr(hashlib, hash_algo)()
with open(fpath, "rb") as f:
for block in iter(lambda: f.read(blocksize), b""):
digester.update(block)
return f"{hash_algo}={digester.hexdigest()}"
|
def digest_file(fpath, hash_algo: str):
"""
Reads and digests a file according to specified hashing-algorith.
:param hash_algo: any algo contained in :mod:`hashlib`
:return: <hash_algo>=<hex_digest>
From http://stackoverflow.com/a/21565932/548792
"""
blocksize = 2 ** 16
digester = hashlib.new(hash_algo)
with open(fpath, "rb") as f:
for block in iter(lambda: f.read(blocksize), b""):
digester.update(block)
return f"{hash_algo}={digester.hexdigest()}"
|
30,294 |
def add_tag_to_model(model_id, tags, model="intelligence"):
"""
Adds tag to specific Threat Model. By default is set to intelligence (indicators).
"""
data = {
'tags': [{'name': t.strip(), 'tlp': 'red'} for t in tags.split(',')]
}
res = http_request("POST", F"v1/{model}/{model_id}/tag/", params=CREDENTIALS, data=json.dumps(data))
if str(res.get('success', '')).lower() == 'true':
return_outputs(F"Added successfully tags: {tags} to {model} with {model_id}", None, res)
else:
return_outputs(F"Failed to add {tags} to {model} with {model_id}", None, res)
|
def add_tag_to_model(model_id='', tags=None, model="intelligence"):
"""
Adds tag to specific Threat Model. By default is set to intelligence (indicators).
"""
data = {
'tags': [{'name': t.strip(), 'tlp': 'red'} for t in tags.split(',')]
}
res = http_request("POST", F"v1/{model}/{model_id}/tag/", params=CREDENTIALS, data=json.dumps(data))
if str(res.get('success', '')).lower() == 'true':
return_outputs(F"Added successfully tags: {tags} to {model} with {model_id}", None, res)
else:
return_outputs(F"Failed to add {tags} to {model} with {model_id}", None, res)
|
12,279 |
def save_qasm(qc, file_loc):
'''
Save QASM output of circuit object to file.
Parameters
----------
qc: QubitCircuit
circuit object to produce QASM output for.
'''
qasm_out = QasmOutput("2.0")
lines = qasm_out._qasm_output(qc)
with open(file_loc, "w") as f:
for line in lines:
f.write("{}\n".format(line))
|
def save_qasm(qc, file_loc):
'''
Save QASM output of circuit object to file.
Parameters
----------
qc: :class:`.QubitCircuit`
circuit object to produce QASM output for.
'''
qasm_out = QasmOutput("2.0")
lines = qasm_out._qasm_output(qc)
with open(file_loc, "w") as f:
for line in lines:
f.write("{}\n".format(line))
|
32,569 |
def filter_dict_null(d):
if isinstance(d, dict):
return dict((k, v) for k, v in list(d.items()) if v is not None)
return d
|
def filter_dict_null(d):
if isinstance(d, dict):
return {k: v for k, v in d.items() if v is not None}
return d
|
59,821 |
def create_signature_help(response: Optional[dict]) -> Optional[SignatureHelp]:
if response is None:
return None
raw_signatures = response.get("signatures")
signatures = []
if isinstance(raw_signatures, list):
signatures = [parse_signature_information(signature) for signature in raw_signatures]
if signatures:
active_signature = response.get("activeSignature", -1)
active_parameter = response.get("activeParameter", -1)
if not 0 <= active_signature < len(signatures):
debug("activeSignature {} not a valid index for signatures length {}".format(
active_signature, len(signatures)))
active_signature = 0
return SignatureHelp(signatures, active_signature, active_parameter)
return None
|
def create_signature_help(response: Optional[dict]) -> Optional[SignatureHelp]:
if response is None:
return None
signatures = response.get("signatures") or []
signatures = [parse_signature_information(signature) for signature in signatures]
if signatures:
active_signature = response.get("activeSignature", -1)
active_parameter = response.get("activeParameter", -1)
if not 0 <= active_signature < len(signatures):
debug("activeSignature {} not a valid index for signatures length {}".format(
active_signature, len(signatures)))
active_signature = 0
return SignatureHelp(signatures, active_signature, active_parameter)
return None
|
31,794 |
def item_to_incident(item):
if not (occured := item.get('timestamp_occured_iso8601')):
occured = convert_timestamp_to_iso86(
item.get('timestamp_occured', ''),
item.get("time_offset", 'Z')
)
incident = {
'Type': 'AlienVault USM',
'name': 'Alarm: ' + item.get('uuid'),
'occurred': occured,
'rawJSON': json.dumps(item),
}
return incident
|
def item_to_incident(item):
if not (occured := item.get('timestamp_occured_iso8601')):
occured = convert_timestamp_to_iso86(
item.get('timestamp_occured', ''),
item.get("time_offset", 'Z')
)
incident = {
'Type': 'AlienVault USM',
'name': 'Alarm: ' + item.get('uuid'),
'occurred': occurred,
'rawJSON': json.dumps(item),
}
return incident
|
13,657 |
def _post_processing(samples, svs, itpl_part, d_nsp, L_rk_tol):
"""Compute coefficients/partition to construct minimal interpolant."""
num_vars = len(svs)
max_idx = np.argmax([*(len(ip) for ip in itpl_part)])
max_rks = []
for i in range(num_vars):
max_rk = 0
# we don't need to compute this max rank since we exploit nullspace structure
if i == max_idx:
max_rks.append(len(itpl_part[max_idx])-1)
continue
shapes = []
for j in range(num_vars):
if i != j:
shapes.append(samples.shape[j])
# compute max ranks of all possible 1-D Loewner matrices
for idc in itertools.product(*(range(s) for s in shapes)):
l_idc = list(idc)
l_idc.insert(i, slice(None))
L = nd_loewner(samples[tuple(l_idc)], [svs[i]], [itpl_part[i]])
rk = np.linalg.matrix_rank(L, tol=L_rk_tol)
if rk > max_rk:
max_rk = rk
max_rks.append(max_rk)
# exploit nullspace structure to obtain final max rank
denom = np.prod([*(len(itpl_part[k])-max_rks[k] for k in range(len(itpl_part)))])
if denom == 0 or d_nsp % denom != 0:
return None, None
max_rks[max_idx] = len(itpl_part[max_idx]) - d_nsp / denom
max_rks[max_idx] = round(max_rks[max_idx])
for i in range(len(max_rks)):
itpl_part[i] = itpl_part[i][0:max_rks[i]+1]
# solve LS problem
L = full_nd_loewner(samples, svs, itpl_part)
_, S, V = np.linalg.svd(L)
VH = np.conj(V.T)
coefs = VH[:, -1:]
return coefs, itpl_part
|
def _post_processing(samples, svs, itpl_part, d_nsp, L_rk_tol):
"""Compute coefficients/partition to construct minimal interpolant."""
num_vars = len(svs)
max_idx = np.argmax([*(len(ip) for ip in itpl_part)])
max_rks = []
for i in range(num_vars):
max_rk = 0
# we don't need to compute this max rank since we exploit nullspace structure
if i == max_idx:
max_rks.append(len(itpl_part[max_idx])-1)
continue
shapes = []
for j in range(num_vars):
if i != j:
shapes.append(samples.shape[j])
# compute max ranks of all possible 1-D Loewner matrices
for idc in itertools.product(*(range(s) for s in shapes)):
l_idc = list(idc)
l_idc.insert(i, slice(None))
L = nd_loewner(samples[tuple(l_idc)], [svs[i]], [itpl_part[i]])
rk = np.linalg.matrix_rank(L, tol=L_rk_tol)
if rk > max_rk:
max_rk = rk
max_rks.append(max_rk)
# exploit nullspace structure to obtain final max rank
denom = np.prod([len(itpl_part[k])-max_rks[k] for k in range(len(itpl_part))])
if denom == 0 or d_nsp % denom != 0:
return None, None
max_rks[max_idx] = len(itpl_part[max_idx]) - d_nsp / denom
max_rks[max_idx] = round(max_rks[max_idx])
for i in range(len(max_rks)):
itpl_part[i] = itpl_part[i][0:max_rks[i]+1]
# solve LS problem
L = full_nd_loewner(samples, svs, itpl_part)
_, S, V = np.linalg.svd(L)
VH = np.conj(V.T)
coefs = VH[:, -1:]
return coefs, itpl_part
|
57,779 |
def main():
try:
indicator = demisto.args()['indicator']
resp = demisto.executeCommand("getIndicator", {'value': indicator})
if isError(resp) or not resp:
demisto.results(resp)
sys.exit(0)
data = resp[0].get("Contents")
if not data:
demisto.results("No results.")
sys.exit(0)
for entry in data:
for results, outputs in iterate_indicator_entry(indicator, entry):
return_results(results)
appendContext(DbotScoreKey, outputs)
except Exception as error:
return_error(str(error), error)
|
def main():
try:
indicator = demisto.args()['indicator']
resp = demisto.executeCommand("getIndicator", {'value': indicator})
if isError(resp) or not resp:
demisto.results(resp)
sys.exit(0)
data = resp[0].get("Contents")
if not data:
demisto.results("No results.")
sys.exit(0)
for entry in data:
for results, dbot_score in iterate_indicator_entry(indicator, entry):
return_results(results)
appendContext(DbotScoreKey, dbot_score)
for dbot_score, results in iterate_indicator_entry(indicator, entry):
return_results(results)
appendContext(DbotScoreKey, dbot_score)
except Exception as error:
return_error(str(error), error)
|
58,888 |
def csys(self, kcn="", **kwargs):
"""Activates a previously defined coordinate system.
APDL Command: CSYS
Parameters
----------
kcn
Specifies the active coordinate system, as follows:
0 (default) - Cartesian
1 - Cylindrical with global Cartesian Z as the axis of rotation
2 - Spherical
4 or WP - Working Plane
5 - Cylindrical with global Cartesian Y as the axis of rotation
11 or greater - Any previously defined local coordinate system
Notes
-----
The CSYS command activates a previously defined coordinate system for
geometry input and generation. The LOCAL, CLOCAL, CS, CSKP, and CSWPLA
commands also activate coordinate systems as they are defined. To set
the active element coordinate system attribute pointer, issue the ESYS
command.
The active coordinate system for files created via the CDWRITE command
is Cartesian
>>> mapdl.csys(0)
This command is valid in any processor.
>>> mapdl.csys(4)
>>> # or
>>> mapdl.csys('WP')
activates working plane tracking, which updates the
coordinate system to follow working plane changes. To deactivate
working plane tracking, activate any other coordinate system.
>>> mapdl.csys(5)
is a cylindrical coordinate system with global Cartesian Y as
the axis. The local x, y and z axes are radial, θ, and axial
(respectively). The R-Theta plane is the global X-Z plane, as it is for
an axisymmetric model. Thus, at `θ = 0.0`, `mapdl.csys(5)` has a specific
orientation: the local x is in the global +X direction, local y is in
the global -Z direction, and local z (the cylindrical axis) is in the
global +Y direction.
Examples
--------
Suppose we want to create a cylindrical surface in cylindrical y (5)
with a radius of 6 and spanning `30 < θ < -90` and `0 < z < 4`
>>> mapdl.csys(5)
>>> mapdl.k(1, 6, 30)
>>> mapdl.k(2, 6, -90)
>>> mapdl.k(3, 6, -90, 4)
>>> mapdl.k(4, 6, 30, 4)
>>> mapdl.a(1, 2, 3, 4)
>>> mapdl.aplot()
"""
command = "CSYS,%s" % (str(kcn))
return self.run(command, **kwargs)
|
def csys(self, kcn="", **kwargs):
"""Activates a previously defined coordinate system.
APDL Command: CSYS
Parameters
----------
kcn
Specifies the active coordinate system, as follows:
0 (default) - Cartesian
1 - Cylindrical with global Cartesian Z as the axis of rotation
2 - Spherical
4 or WP - Working Plane
5 - Cylindrical with global Cartesian Y as the axis of rotation
11 or greater - Any previously defined local coordinate system
Notes
-----
The CSYS command activates a previously defined coordinate system for
geometry input and generation. The LOCAL, CLOCAL, CS, CSKP, and CSWPLA
commands also activate coordinate systems as they are defined. To set
the active element coordinate system attribute pointer, issue the ESYS
command.
The active coordinate system for files created via the CDWRITE command
is Cartesian
>>> mapdl.csys(0)
This command is valid in any processor.
>>> mapdl.csys(4)
>>> # or
>>> mapdl.csys('WP')
activates working plane tracking, which updates the
coordinate system to follow working plane changes. To deactivate
working plane tracking, activate any other coordinate system.
>>> mapdl.csys(5)
is a cylindrical coordinate system with global Cartesian Y as
the axis. The local x, y and z axes are radial, θ, and axial
(respectively). The R-Theta plane is the global X-Z plane, as it is for
an axisymmetric model. Thus, at `θ = 0.0`, `mapdl.csys(5)` has a specific
orientation: the local x is in the global +X direction, local y is in
the global -Z direction, and local z (the cylindrical axis) is in the
global +Y direction.
Examples
--------
Create a cylindrical surface in cylindrical y (CSYS=5) with
a radius of 6 and spanning `30 < θ < -90` and `0 < z < 4`.
>>> mapdl.csys(5)
>>> mapdl.k(1, 6, 30)
>>> mapdl.k(2, 6, -90)
>>> mapdl.k(3, 6, -90, 4)
>>> mapdl.k(4, 6, 30, 4)
>>> mapdl.a(1, 2, 3, 4)
>>> mapdl.aplot()
"""
command = "CSYS,%s" % (str(kcn))
return self.run(command, **kwargs)
|
41,701 |
def test_generate_packages_json(tmp_path):
# Set up directory to store dummy package files for SHA-256 hash verification
if not os.path.exists(tmp_path):
os.makedirs(tmp_path)
pkg_map = buildall.generate_dependency_graph(PACKAGES_DIR, {"pkg_1", "pkg_2"})
for pkg in pkg_map.values():
pkg.file_name = pkg.file_name or pkg.name + ".file"
# Write dummy package file for SHA-256 hash verification
with open(os.path.join(tmp_path, pkg.file_name), "w") as f:
f.write(pkg.name)
package_data = buildall.generate_packages_json(tmp_path, pkg_map)
assert set(package_data.keys()) == {"info", "packages"}
assert package_data["info"] == {"arch": "wasm32", "platform": "Emscripten-1.0"}
assert set(package_data["packages"]) == {
"pkg_1",
"pkg_1_1",
"pkg_2",
"pkg_3",
"pkg_3_1",
}
assert package_data["packages"]["pkg_1"] == {
"name": "pkg_1",
"version": "1.0.0",
"file_name": "pkg_1.file",
"depends": ["pkg_1_1", "pkg_3"],
"imports": ["pkg_1"],
"install_dir": "site",
"sha_256": "c1e38241013b5663e902fff97eb8585e98e6df446585da1dcf2ad121b52c2143",
}
|
def test_generate_packages_json(tmp_path):
"""Set up directory to store dummy package files for SHA-256 hash verification"""
pkg_map = buildall.generate_dependency_graph(PACKAGES_DIR, {"pkg_1", "pkg_2"})
for pkg in pkg_map.values():
pkg.file_name = pkg.file_name or pkg.name + ".file"
# Write dummy package file for SHA-256 hash verification
with open(os.path.join(tmp_path, pkg.file_name), "w") as f:
f.write(pkg.name)
package_data = buildall.generate_packages_json(tmp_path, pkg_map)
assert set(package_data.keys()) == {"info", "packages"}
assert package_data["info"] == {"arch": "wasm32", "platform": "Emscripten-1.0"}
assert set(package_data["packages"]) == {
"pkg_1",
"pkg_1_1",
"pkg_2",
"pkg_3",
"pkg_3_1",
}
assert package_data["packages"]["pkg_1"] == {
"name": "pkg_1",
"version": "1.0.0",
"file_name": "pkg_1.file",
"depends": ["pkg_1_1", "pkg_3"],
"imports": ["pkg_1"],
"install_dir": "site",
"sha_256": "c1e38241013b5663e902fff97eb8585e98e6df446585da1dcf2ad121b52c2143",
}
|
3,208 |
def in_iexact(column, values):
"""Operator to test if any of the given values are (case-insensitively)
matching to values in the given column."""
from operator import or_
query = u"{}__iexact".format(column)
return reduce(or_, [Q(**{query: v}) for v in values])
|
def in_iexact(column, values):
"""Operator to test if any of the given values are (case-insensitive)
matching to values in the given column."""
from operator import or_
query = u"{}__iexact".format(column)
return reduce(or_, [Q(**{query: v}) for v in values])
|
13,038 |
def parse_description_json_field(apps, schema):
Category = apps.get_model("product", "Category")
for category in Category.objects.iterator():
category.description_plaintext = parse_draftjs_content_to_string(
category.description
)
category.save()
|
def parse_description_json_field(apps, schema):
Category = apps.get_model("product", "Category")
for category in Category.objects.iterator():
category.description_plaintext = parse_draftjs_content_to_string(
category.description
)
category.save(update_fields=["description_plaintext"])
|
26,628 |
def string_comma_to_list(message: str) -> List[str]:
"""
Split string to list
"""
if not message:
return []
return message.split(",")
|
def string_comma_to_list(message: str) -> List[str]:
"""
Split string to list
"""
if not message:
return []
return message.split(",") if message else []
|
31,163 |
def create_alert(display_name: str, attributes=None):
"""
Create an alert using IdentityIQ SCIM API's.
Command: identityiq-create-alert
:type display_name: ``str``
:param display_name: Display name of the alert.
:type attributes: ``JSON``
:param attributes: List of JSON objects with the following structure.
[
{
'key': '',
'value': '',
'type': ''
}
]
:return: Newly created alert object (JSON).
"""
if display_name is None:
return None
data = {
'displayName': display_name,
'type': 'PAN XSOAR',
'attributes': attributes
}
return send_request(IIQ_SCIM_ALERTS_URL, "POST", json.dumps(data))
|
def create_alert(display_name: str, attributes=None):
"""
Create an alert using IdentityIQ SCIM API's.
Command: identityiq-create-alert
:type display_name: ``str``
:param display_name: Display name of the alert.
:type attributes: ``list``
:param attributes: List of JSON objects with the following structure.
[
{
'key': '',
'value': '',
'type': ''
}
]
:return: Newly created alert object (JSON).
"""
if display_name is None:
return None
data = {
'displayName': display_name,
'type': 'PAN XSOAR',
'attributes': attributes
}
return send_request(IIQ_SCIM_ALERTS_URL, "POST", json.dumps(data))
|
37,044 |
def _text_checker(job, interval, _interval_set=False, quiet=False, to_file=None):
"""A text-based job status checker
Args:
job (BaseJob): The job to check.
interval (int): The interval at which to check.
_interval_set (bool): Was interval time set by user?
quiet (bool): If True, do not print status messages.
to_file (file): If file print status messages to it, else to stdout.
"""
_outstream = to_file if to_file else sys.stdout
status = job.status()
msg = status.value
prev_msg = msg
msg_len = len(msg)
if not quiet:
print('\r%s: %s' % ('Job Status', msg), end='', file=_outstream)
while status.name not in ['DONE', 'CANCELLED', 'ERROR']:
time.sleep(interval)
status = job.status()
msg = status.value
if status.name == 'QUEUED':
msg += ' (%s)' % job.queue_position()
if not _interval_set:
interval = max(job.queue_position(), 2)
else:
if not _interval_set:
interval = 2
# Adjust length of message so there are no artifacts
if len(msg) < msg_len:
msg += ' ' * (msg_len - len(msg))
elif len(msg) > msg_len:
msg_len = len(msg)
if msg != prev_msg and not quiet:
print('\r%s: %s' % ('Job Status', msg), end='', file=_outstream)
prev_msg = msg
if not quiet:
print('', file=_outstream)
|
def _text_checker(job, interval, _interval_set=False, quiet=False, to_file=None):
"""A text-based job status checker
Args:
job (BaseJob): The job to check.
interval (int): The interval at which to check.
_interval_set (bool): Was interval time set by user?
quiet (bool): If True, do not print status messages.
output (file): The file like object to write status messages to. By default this is sys.stdout.
"""
_outstream = to_file if to_file else sys.stdout
status = job.status()
msg = status.value
prev_msg = msg
msg_len = len(msg)
if not quiet:
print('\r%s: %s' % ('Job Status', msg), end='', file=_outstream)
while status.name not in ['DONE', 'CANCELLED', 'ERROR']:
time.sleep(interval)
status = job.status()
msg = status.value
if status.name == 'QUEUED':
msg += ' (%s)' % job.queue_position()
if not _interval_set:
interval = max(job.queue_position(), 2)
else:
if not _interval_set:
interval = 2
# Adjust length of message so there are no artifacts
if len(msg) < msg_len:
msg += ' ' * (msg_len - len(msg))
elif len(msg) > msg_len:
msg_len = len(msg)
if msg != prev_msg and not quiet:
print('\r%s: %s' % ('Job Status', msg), end='', file=_outstream)
prev_msg = msg
if not quiet:
print('', file=_outstream)
|
47,964 |
def draw_detections(frame, detections, palette, labels, threshold):
size = frame.shape[:2]
for detection in detections:
if detection.score > threshold:
xmin = max(int(detection.xmin), 0)
ymin = max(int(detection.ymin), 0)
xmax = min(int(detection.xmax), size[1])
ymax = min(int(detection.ymax), size[0])
class_id = int(detection.id)
color = palette[class_id]
det_label = labels[class_id] if labels and len(labels) >= class_id else '#{}'.format(class_id)
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color, 2)
cv2.putText(frame, '{} {:.1%}'.format(det_label, detection.score),
(xmin, ymin - 7), cv2.FONT_HERSHEY_COMPLEX, 0.6, color, 1)
if isinstance(detection, models.DetectionWithLandmarks):
for landmark in detection.landmarks:
cv2.circle(frame, (int(landmark[0]), int(landmark[1])), 2, (0, 255, 255), 2)
y_scale, x_scale = [i / j for i, j in zip(size, (720, 1280))]
scale = max(x_scale, y_scale, 1)
frame = cv2.resize(frame, (int(size[1]/scale), int(size[0]/scale)))
return frame
|
def draw_detections(frame, detections, palette, labels, threshold):
size = frame.shape[:2]
for detection in detections:
if detection.score > threshold:
xmin = max(int(detection.xmin), 0)
ymin = max(int(detection.ymin), 0)
xmax = min(int(detection.xmax), size[1])
ymax = min(int(detection.ymax), size[0])
class_id = int(detection.id)
color = palette[class_id]
det_label = labels[class_id] if labels and len(labels) >= class_id else '#{}'.format(class_id)
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color, 2)
cv2.putText(frame, '{} {:.1%}'.format(det_label, detection.score),
(xmin, ymin - 7), cv2.FONT_HERSHEY_COMPLEX, 0.6, color, 1)
if isinstance(detection, models.DetectionWithLandmarks):
for landmark in detection.landmarks:
cv2.circle(frame, (int(landmark[0]), int(landmark[1])), 2, (0, 255, 255), 2)
scale = max(size[0] / 720, size[1] / 1280)
frame = cv2.resize(frame, (int(size[1]/scale), int(size[0]/scale)))
return frame
|
32,332 |
def split_context_path(path: str) -> Tuple[List[str], str]:
"""
Split a context path separated by a dot with a replacement name
following a comma into the key tree the replacement name.
:param path: The context path (with a optional replacement name)
:return: The key tree and the replacement name.
"""
key_tree = []
key = []
itr = iter(path)
for c in itr:
if c == '\\':
try:
key.append(next(itr))
except StopIteration:
key.append('\\')
elif c == '.':
key_tree.append(''.join(key))
key = []
else:
key.append(c)
names = ''.join(key).rsplit(',', 1)
if len(names) == 2:
key_tree.append(names[0])
return key_tree, names[1]
elif len(names) == 1:
key_tree.append(names[0])
return key_tree, names[0]
else:
raise ValueError(f'Invalid path: {path}')
|
def split_context_path(path: str) -> Tuple[List[str], str]:
"""
Split a context path separated by a dot with a replacement name
following a comma into the key tree the replacement name.
:param path: The context path (with an optional replacement name)
:return: The key tree and the replacement name.
"""
key_tree = []
key = []
itr = iter(path)
for c in itr:
if c == '\\':
try:
key.append(next(itr))
except StopIteration:
key.append('\\')
elif c == '.':
key_tree.append(''.join(key))
key = []
else:
key.append(c)
names = ''.join(key).rsplit(',', 1)
if len(names) == 2:
key_tree.append(names[0])
return key_tree, names[1]
elif len(names) == 1:
key_tree.append(names[0])
return key_tree, names[0]
else:
raise ValueError(f'Invalid path: {path}')
|
58,852 |
def _compile_with_cache_hip(source, options, arch, cache_dir, extra_source,
backend='hiprtc', name_expressions=None,
log_stream=None, cache_in_memory=False,
use_converter=True):
global _empty_file_preprocess_cache
# TODO(leofang): this might be possible but is currently undocumented
if _is_cudadevrt_needed(options):
raise ValueError('separate compilation is not supported in HIP')
# HIP's equivalent of -ftz=true, see ROCm-Developer-Tools/HIP#2252
# Notes:
# - For hipcc, this should just work, as invalid options would cause errors
# See https://clang.llvm.org/docs/ClangCommandLineReference.html.
# - For hiprtc, this is a no-op until the compiler options like -D and -I
# are accepted, see ROCm-Developer-Tools/HIP#2182 and
# ROCm-Developer-Tools/HIP#2248
options += ('-fcuda-flush-denormals-to-zero',)
# Workaround ROCm 4.3 LLVM_PATH issue in hipRTC #5689
rocm_build_version = driver.get_build_version()
if rocm_build_version >= 40300000 and rocm_build_version < 40500000:
options += ('-I' + get_rocm_path() + "/llvm/lib/clang/13.0.0/include/",)
if cache_dir is None:
cache_dir = get_cache_dir()
# As of ROCm 3.5.0 hiprtc/hipcc can automatically pick up the
# right arch without setting HCC_AMDGPU_TARGET, so we don't need
# to tell the compiler which arch we are targeting. But, we still
# need to know arch as part of the cache key:
if arch is None:
# On HIP, gcnArch is computed from "compute capability":
# https://github.com/ROCm-Developer-Tools/HIP/blob/rocm-4.0.0/rocclr/hip_device.cpp#L202
arch = device.Device().compute_capability
if use_converter:
source = _convert_to_hip_source(source, extra_source,
is_hiprtc=(backend == 'hiprtc'))
env = (arch, options, _get_nvrtc_version(), backend)
base = _empty_file_preprocess_cache.get(env, None)
if base is None:
# This is for checking HIPRTC/HIPCC compiler internal version
if backend == 'hiprtc':
base = _preprocess_hiprtc('', options)
else:
base = _preprocess_hipcc('', options)
_empty_file_preprocess_cache[env] = base
key_src = '%s %s %s %s' % (env, base, source, extra_source)
key_src = key_src.encode('utf-8')
name = '%s.hsaco' % hashlib.md5(key_src).hexdigest()
mod = function.Module()
if not cache_in_memory:
# Read from disk cache
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir, exist_ok=True)
# To handle conflicts in concurrent situation, we adopt lock-free
# method to avoid performance degradation.
# We force recompiling to retrieve C++ mangled names if so desired.
path = os.path.join(cache_dir, name)
if os.path.exists(path) and not name_expressions:
with open(path, 'rb') as f:
data = f.read()
if len(data) >= 32:
hash_value = data[:32]
binary = data[32:]
binary_hash = hashlib.md5(binary).hexdigest().encode('ascii')
if hash_value == binary_hash:
mod.load(binary)
return mod
else:
# Enforce compiling -- the resulting kernel will be cached elsewhere,
# so we do nothing
pass
if backend == 'hiprtc':
# compile_using_nvrtc calls hiprtc for hip builds
binary, mapping = compile_using_nvrtc(
source, options, arch, name + '.cu', name_expressions,
log_stream, cache_in_memory)
mod._set_mapping(mapping)
else:
binary = compile_using_hipcc(source, options, arch, log_stream)
if not cache_in_memory:
# Write to disk cache
binary_hash = hashlib.md5(binary).hexdigest().encode('ascii')
# shutil.move is not atomic operation, so it could result in a
# corrupted file. We detect it by appending md5 hash at the beginning
# of each cache file. If the file is corrupted, it will be ignored
# next time it is read.
with tempfile.NamedTemporaryFile(dir=cache_dir, delete=False) as tf:
tf.write(binary_hash)
tf.write(binary)
temp_path = tf.name
shutil.move(temp_path, path)
# Save .cu source file along with .hsaco
if _get_bool_env_variable('CUPY_CACHE_SAVE_CUDA_SOURCE', False):
with open(path + '.cpp', 'w') as f:
f.write(source)
else:
# we don't do any disk I/O
pass
mod.load(binary)
return mod
|
def _compile_with_cache_hip(source, options, arch, cache_dir, extra_source,
backend='hiprtc', name_expressions=None,
log_stream=None, cache_in_memory=False,
use_converter=True):
global _empty_file_preprocess_cache
# TODO(leofang): this might be possible but is currently undocumented
if _is_cudadevrt_needed(options):
raise ValueError('separate compilation is not supported in HIP')
# HIP's equivalent of -ftz=true, see ROCm-Developer-Tools/HIP#2252
# Notes:
# - For hipcc, this should just work, as invalid options would cause errors
# See https://clang.llvm.org/docs/ClangCommandLineReference.html.
# - For hiprtc, this is a no-op until the compiler options like -D and -I
# are accepted, see ROCm-Developer-Tools/HIP#2182 and
# ROCm-Developer-Tools/HIP#2248
options += ('-fcuda-flush-denormals-to-zero',)
# Workaround ROCm 4.3 LLVM_PATH issue in hipRTC #5689
rocm_build_version = driver.get_build_version()
if rocm_build_version >= 40300000 and rocm_build_version < 40500000:
options += (
'-I' + get_rocm_path() + '/llvm/lib/clang/13.0.0/include/',)
if cache_dir is None:
cache_dir = get_cache_dir()
# As of ROCm 3.5.0 hiprtc/hipcc can automatically pick up the
# right arch without setting HCC_AMDGPU_TARGET, so we don't need
# to tell the compiler which arch we are targeting. But, we still
# need to know arch as part of the cache key:
if arch is None:
# On HIP, gcnArch is computed from "compute capability":
# https://github.com/ROCm-Developer-Tools/HIP/blob/rocm-4.0.0/rocclr/hip_device.cpp#L202
arch = device.Device().compute_capability
if use_converter:
source = _convert_to_hip_source(source, extra_source,
is_hiprtc=(backend == 'hiprtc'))
env = (arch, options, _get_nvrtc_version(), backend)
base = _empty_file_preprocess_cache.get(env, None)
if base is None:
# This is for checking HIPRTC/HIPCC compiler internal version
if backend == 'hiprtc':
base = _preprocess_hiprtc('', options)
else:
base = _preprocess_hipcc('', options)
_empty_file_preprocess_cache[env] = base
key_src = '%s %s %s %s' % (env, base, source, extra_source)
key_src = key_src.encode('utf-8')
name = '%s.hsaco' % hashlib.md5(key_src).hexdigest()
mod = function.Module()
if not cache_in_memory:
# Read from disk cache
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir, exist_ok=True)
# To handle conflicts in concurrent situation, we adopt lock-free
# method to avoid performance degradation.
# We force recompiling to retrieve C++ mangled names if so desired.
path = os.path.join(cache_dir, name)
if os.path.exists(path) and not name_expressions:
with open(path, 'rb') as f:
data = f.read()
if len(data) >= 32:
hash_value = data[:32]
binary = data[32:]
binary_hash = hashlib.md5(binary).hexdigest().encode('ascii')
if hash_value == binary_hash:
mod.load(binary)
return mod
else:
# Enforce compiling -- the resulting kernel will be cached elsewhere,
# so we do nothing
pass
if backend == 'hiprtc':
# compile_using_nvrtc calls hiprtc for hip builds
binary, mapping = compile_using_nvrtc(
source, options, arch, name + '.cu', name_expressions,
log_stream, cache_in_memory)
mod._set_mapping(mapping)
else:
binary = compile_using_hipcc(source, options, arch, log_stream)
if not cache_in_memory:
# Write to disk cache
binary_hash = hashlib.md5(binary).hexdigest().encode('ascii')
# shutil.move is not atomic operation, so it could result in a
# corrupted file. We detect it by appending md5 hash at the beginning
# of each cache file. If the file is corrupted, it will be ignored
# next time it is read.
with tempfile.NamedTemporaryFile(dir=cache_dir, delete=False) as tf:
tf.write(binary_hash)
tf.write(binary)
temp_path = tf.name
shutil.move(temp_path, path)
# Save .cu source file along with .hsaco
if _get_bool_env_variable('CUPY_CACHE_SAVE_CUDA_SOURCE', False):
with open(path + '.cpp', 'w') as f:
f.write(source)
else:
# we don't do any disk I/O
pass
mod.load(binary)
return mod
|
58,621 |
def convert_ndarray_to_torch_tensor(
ndarray: np.ndarray,
dtype: Optional[torch.dtype] = None,
device: Optional[str] = None,
) -> torch.Tensor:
"""Convert a NumPy ndarray to a Torch Tensor.
Args:
ndarray: A NumPy ndarray that we wish to convert to a Torch Tensor.
dtype: A Torch dtype for the created tensor; if None, the dtype will be
inferred from the NumPy ndarray data.
Returns: A Torch Tensor.
"""
ndarray = _unwrap_ndarray_object_type_if_needed(ndarray)
# NOTE: PyTorch raises a `UserWarning` if `ndarray` isn't writeable. See #28003.
if not ndarray.flags["WRITEABLE"]:
ndarray = np.copy(ndarray)
return torch.as_tensor(ndarray, dtype=dtype, device=device)
|
def convert_ndarray_to_torch_tensor(
ndarray: np.ndarray,
dtype: Optional[torch.dtype] = None,
device: Optional[str] = None,
) -> torch.Tensor:
"""Convert a NumPy ndarray to a Torch Tensor.
Args:
ndarray: A NumPy ndarray that we wish to convert to a Torch Tensor.
dtype: A Torch dtype for the created tensor; if None, the dtype will be
inferred from the NumPy ndarray data.
Returns: A Torch Tensor.
"""
ndarray = _unwrap_ndarray_object_type_if_needed(ndarray)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
tensor = torch.as_tensor(ndarray, dtype=dtype, device=device)
return tensor
|
922 |
def substitution(system, symbols, result=[{}], known_symbols=[],
exclude=[], all_symbols=None):
r"""
Solves the `system` using substitution method. It is used in
:func:`~.nonlinsolve`. This will be called from :func:`~.nonlinsolve` when any
equation(s) is non polynomial equation.
Parameters
==========
system : list of equations
The target system of equations
symbols : list of symbols to be solved.
The variable(s) for which the system is solved
known_symbols : list of solved symbols
Values are known for these variable(s)
result : An empty list or list of dict
If No symbol values is known then empty list otherwise
symbol as keys and corresponding value in dict.
exclude : Set of expression.
Mostly denominator expression(s) of the equations of the system.
Final solution should not satisfy these expressions.
all_symbols : known_symbols + symbols(unsolved).
Returns
=======
A FiniteSet of ordered tuple of values of `all_symbols` for which the
`system` has solution. Order of values in the tuple is same as symbols
present in the parameter `all_symbols`. If parameter `all_symbols` is None
then same as symbols present in the parameter `symbols`.
Please note that general FiniteSet is unordered, the solution returned
here is not simply a FiniteSet of solutions, rather it is a FiniteSet of
ordered tuple, i.e. the first & only argument to FiniteSet is a tuple of
solutions, which is ordered, & hence the returned solution is ordered.
Also note that solution could also have been returned as an ordered tuple,
FiniteSet is just a wrapper `{}` around the tuple. It has no other
significance except for the fact it is just used to maintain a consistent
output format throughout the solveset.
Raises
======
ValueError
The input is not valid.
The symbols are not given.
AttributeError
The input symbols are not :class:`~.Symbol` type.
Examples
========
>>> from sympy.core.symbol import symbols
>>> x, y = symbols('x, y', real=True)
>>> from sympy.solvers.solveset import substitution
>>> substitution([x + y], [x], [{y: 1}], [y], set([]), [x, y])
{(-1, 1)}
* When you want soln should not satisfy eq $x + 1 = 0$
>>> substitution([x + y], [x], [{y: 1}], [y], set([x + 1]), [y, x])
EmptySet
>>> substitution([x + y], [x], [{y: 1}], [y], set([x - 1]), [y, x])
{(1, -1)}
>>> substitution([x + y - 1, y - x**2 + 5], [x, y])
{(-3, 4), (2, -1)}
* Returns both real and complex solution
>>> x, y, z = symbols('x, y, z')
>>> from sympy import exp, sin
>>> substitution([exp(x) - sin(y), y**2 - 4], [x, y])
{(ImageSet(Lambda(_n, I*(2*_n*pi + pi) + log(sin(2))), Integers), -2),
(ImageSet(Lambda(_n, 2*_n*I*pi + log(sin(2))), Integers), 2)}
>>> eqs = [z**2 + exp(2*x) - sin(y), -3 + exp(-y)]
>>> substitution(eqs, [y, z])
{(-log(3), -sqrt(-exp(2*x) - sin(log(3)))),
(-log(3), sqrt(-exp(2*x) - sin(log(3)))),
(ImageSet(Lambda(_n, 2*_n*I*pi - log(3)), Integers),
ImageSet(Lambda(_n, -sqrt(-exp(2*x) + sin(2*_n*I*pi - log(3)))), Integers)),
(ImageSet(Lambda(_n, 2*_n*I*pi - log(3)), Integers),
ImageSet(Lambda(_n, sqrt(-exp(2*x) + sin(2*_n*I*pi - log(3)))), Integers))}
"""
if not system:
return S.EmptySet
if not symbols:
msg = ('Symbols must be given, for which solution of the '
'system is to be found.')
raise ValueError(filldedent(msg))
if not is_sequence(symbols):
msg = ('symbols should be given as a sequence, e.g. a list.'
'Not type %s: %s')
raise TypeError(filldedent(msg % (type(symbols), symbols)))
if not getattr(symbols[0], 'is_Symbol', False):
msg = ('Iterable of symbols must be given as '
'second argument, not type %s: %s')
raise ValueError(filldedent(msg % (type(symbols[0]), symbols[0])))
# By default `all_symbols` will be same as `symbols`
if all_symbols is None:
all_symbols = symbols
old_result = result
# storing complements and intersection for particular symbol
complements = {}
intersections = {}
# when total_solveset_call equals total_conditionset
# it means that solveset failed to solve all eqs.
total_conditionset = -1
total_solveset_call = -1
def _unsolved_syms(eq, sort=False):
"""Returns the unsolved symbol present
in the equation `eq`.
"""
free = eq.free_symbols
unsolved = (free - set(known_symbols)) & set(all_symbols)
if sort:
unsolved = list(unsolved)
unsolved.sort(key=default_sort_key)
return unsolved
# end of _unsolved_syms()
# sort such that equation with the fewest potential symbols is first.
# means eq with less number of variable first in the list.
eqs_in_better_order = list(
ordered(system, lambda _: len(_unsolved_syms(_))))
def add_intersection_complement(result, intersection_dict, complement_dict):
# If solveset has returned some intersection/complement
# for any symbol, it will be added in the final solution.
final_result = []
for res in result:
res_copy = res
for key_res, value_res in res.items():
intersect_set, complement_set = None, None
for key_sym, value_sym in intersection_dict.items():
if key_sym == key_res:
intersect_set = value_sym
for key_sym, value_sym in complement_dict.items():
if key_sym == key_res:
complement_set = value_sym
if intersect_set or complement_set:
new_value = FiniteSet(value_res)
if intersect_set and intersect_set != S.Complexes:
new_value = Intersection(new_value, intersect_set)
if complement_set:
new_value = Complement(new_value, complement_set)
if new_value is S.EmptySet:
res_copy = None
break
elif new_value.is_FiniteSet and len(new_value) == 1:
res_copy[key_res] = set(new_value).pop()
else:
res_copy[key_res] = new_value
if res_copy is not None:
final_result.append(res_copy)
return final_result
# end of def add_intersection_complement()
def _extract_main_soln(sym, sol, soln_imageset):
"""Separate the Complements, Intersections, ImageSet lambda expr and
its base_set. This function returns the unmasks sol from different classes
of sets and also returns the appended ImageSet elements in a
soln_imageset (dict: where key as unmasked element and value as ImageSet).
"""
# if there is union, then need to check
# Complement, Intersection, Imageset.
# Order should not be changed.
if isinstance(sol, ConditionSet):
# extracts any solution in ConditionSet
sol = sol.base_set
if isinstance(sol, Complement):
# extract solution and complement
complements[sym] = sol.args[1]
sol = sol.args[0]
# complement will be added at the end
# using `add_intersection_complement` method
# if there is union of Imageset or other in soln.
# no testcase is written for this if block
if isinstance(sol, Union):
sol_args = sol.args
sol = S.EmptySet
# We need in sequence so append finteset elements
# and then imageset or other.
for sol_arg2 in sol_args:
if isinstance(sol_arg2, FiniteSet):
sol += sol_arg2
else:
# ImageSet, Intersection, complement then
# append them directly
sol += FiniteSet(sol_arg2)
if isinstance(sol, Intersection):
# Interval/Set will be at 0th index always
if sol.args[0] not in (S.Reals, S.Complexes):
# Sometimes solveset returns soln with intersection
# S.Reals or S.Complexes. We don't consider that
# intersection.
intersections[sym] = sol.args[0]
sol = sol.args[1]
# after intersection and complement Imageset should
# be checked.
if isinstance(sol, ImageSet):
soln_imagest = sol
expr2 = sol.lamda.expr
sol = FiniteSet(expr2)
soln_imageset[expr2] = soln_imagest
if not isinstance(sol, FiniteSet):
sol = FiniteSet(sol)
return sol, soln_imageset
# end of def _extract_main_soln()
# helper function for _append_new_soln
def _check_exclude(rnew, imgset_yes):
rnew_ = rnew
if imgset_yes:
# replace all dummy variables (Imageset lambda variables)
# with zero before `checksol`. Considering fundamental soln
# for `checksol`.
rnew_copy = rnew.copy()
dummy_n = imgset_yes[0]
for key_res, value_res in rnew_copy.items():
rnew_copy[key_res] = value_res.subs(dummy_n, 0)
rnew_ = rnew_copy
# satisfy_exclude == true if it satisfies the expr of `exclude` list.
try:
# something like : `Mod(-log(3), 2*I*pi)` can't be
# simplified right now, so `checksol` returns `TypeError`.
# when this issue is fixed this try block should be
# removed. Mod(-log(3), 2*I*pi) == -log(3)
satisfy_exclude = any(
checksol(d, rnew_) for d in exclude)
except TypeError:
satisfy_exclude = None
return satisfy_exclude
# end of def _check_exclude()
# helper function for _append_new_soln
def _restore_imgset(rnew, original_imageset, newresult):
restore_sym = set(rnew.keys()) & \
set(original_imageset.keys())
for key_sym in restore_sym:
img = original_imageset[key_sym]
rnew[key_sym] = img
if rnew not in newresult:
newresult.append(rnew)
# end of def _restore_imgset()
def _append_eq(eq, result, res, delete_soln, n=None):
u = Dummy('u')
if n:
eq = eq.subs(n, 0)
satisfy = eq if eq in (True, False) else checksol(u, u, eq, minimal=True)
if satisfy is False:
delete_soln = True
res = {}
else:
result.append(res)
return result, res, delete_soln
def _append_new_soln(rnew, sym, sol, imgset_yes, soln_imageset,
original_imageset, newresult, eq=None):
"""If `rnew` (A dict <symbol: soln>) contains valid soln
append it to `newresult` list.
`imgset_yes` is (base, dummy_var) if there was imageset in previously
calculated result(otherwise empty tuple). `original_imageset` is dict
of imageset expr and imageset from this result.
`soln_imageset` dict of imageset expr and imageset of new soln.
"""
satisfy_exclude = _check_exclude(rnew, imgset_yes)
delete_soln = False
# soln should not satisfy expr present in `exclude` list.
if not satisfy_exclude:
local_n = None
# if it is imageset
if imgset_yes:
local_n = imgset_yes[0]
base = imgset_yes[1]
if sym and sol:
# when `sym` and `sol` is `None` means no new
# soln. In that case we will append rnew directly after
# substituting original imagesets in rnew values if present
# (second last line of this function using _restore_imgset)
dummy_list = list(sol.atoms(Dummy))
# use one dummy `n` which is in
# previous imageset
local_n_list = [
local_n for i in range(
0, len(dummy_list))]
dummy_zip = zip(dummy_list, local_n_list)
lam = Lambda(local_n, sol.subs(dummy_zip))
rnew[sym] = ImageSet(lam, base)
if eq is not None:
newresult, rnew, delete_soln = _append_eq(
eq, newresult, rnew, delete_soln, local_n)
elif eq is not None:
newresult, rnew, delete_soln = _append_eq(
eq, newresult, rnew, delete_soln)
elif sol in soln_imageset.keys():
rnew[sym] = soln_imageset[sol]
# restore original imageset
_restore_imgset(rnew, original_imageset, newresult)
else:
newresult.append(rnew)
elif satisfy_exclude:
delete_soln = True
rnew = {}
_restore_imgset(rnew, original_imageset, newresult)
return newresult, delete_soln
# end of def _append_new_soln()
def _new_order_result(result, eq):
# separate first, second priority. `res` that makes `eq` value equals
# to zero, should be used first then other result(second priority).
# If it is not done then we may miss some soln.
first_priority = []
second_priority = []
for res in result:
if not any(isinstance(val, ImageSet) for val in res.values()):
if eq.subs(res) == 0:
first_priority.append(res)
else:
second_priority.append(res)
if first_priority or second_priority:
return first_priority + second_priority
return result
def _solve_using_known_values(result, solver):
"""Solves the system using already known solution
(result contains the dict <symbol: value>).
solver is :func:`~.solveset_complex` or :func:`~.solveset_real`.
"""
# stores imageset <expr: imageset(Lambda(n, expr), base)>.
soln_imageset = {}
total_solvest_call = 0
total_conditionst = 0
# sort such that equation with the fewest potential symbols is first.
# means eq with less variable first
for index, eq in enumerate(eqs_in_better_order):
newresult = []
original_imageset = {}
# if imageset expr is used to solve other symbol
imgset_yes = False
result = _new_order_result(result, eq)
for res in result:
got_symbol = set() # symbols solved in one iteration
# find the imageset and use its expr.
for key_res, value_res in res.items():
if isinstance(value_res, ImageSet):
res[key_res] = value_res.lamda.expr
original_imageset[key_res] = value_res
dummy_n = value_res.lamda.expr.atoms(Dummy).pop()
(base,) = value_res.base_sets
imgset_yes = (dummy_n, base)
# update eq with everything that is known so far
eq2 = eq.subs(res).expand()
unsolved_syms = _unsolved_syms(eq2, sort=True)
if not unsolved_syms:
if res:
newresult, delete_res = _append_new_soln(
res, None, None, imgset_yes, soln_imageset,
original_imageset, newresult, eq2)
if delete_res:
# `delete_res` is true, means substituting `res` in
# eq2 doesn't return `zero` or deleting the `res`
# (a soln) since it staisfies expr of `exclude`
# list.
result.remove(res)
continue # skip as it's independent of desired symbols
depen1, depen2 = (eq2.rewrite(Add)).as_independent(*unsolved_syms)
if (depen1.has(Abs) or depen2.has(Abs)) and solver == solveset_complex:
# Absolute values cannot be inverted in the
# complex domain
continue
soln_imageset = {}
for sym in unsolved_syms:
not_solvable = False
try:
soln = solver(eq2, sym)
total_solvest_call += 1
soln_new = S.EmptySet
if isinstance(soln, Complement):
# separate solution and complement
complements[sym] = soln.args[1]
soln = soln.args[0]
# complement will be added at the end
if isinstance(soln, Intersection):
# Interval will be at 0th index always
if soln.args[0] != Interval(-oo, oo):
# sometimes solveset returns soln
# with intersection S.Reals, to confirm that
# soln is in domain=S.Reals
intersections[sym] = soln.args[0]
soln_new += soln.args[1]
soln = soln_new if soln_new else soln
if index > 0 and solver == solveset_real:
# one symbol's real soln, another symbol may have
# corresponding complex soln.
if not isinstance(soln, (ImageSet, ConditionSet)):
soln += solveset_complex(eq2, sym)
except NotImplementedError:
# If sovleset is not able to solve equation `eq2`. Next
# time we may get soln using next equation `eq2`
continue
if isinstance(soln, ConditionSet):
if soln.base_set in (S.Reals, S.Complexes):
soln = S.EmptySet
# don't do `continue` we may get soln
# in terms of other symbol(s)
not_solvable = True
total_conditionst += 1
else:
soln = soln.base_set
if soln is not S.EmptySet:
soln, soln_imageset = _extract_main_soln(
sym, soln, soln_imageset)
for sol in soln:
# sol is not a `Union` since we checked it
# before this loop
sol, soln_imageset = _extract_main_soln(
sym, sol, soln_imageset)
sol = set(sol).pop()
free = sol.free_symbols
if got_symbol and any(
ss in free for ss in got_symbol
):
# sol depends on previously solved symbols
# then continue
continue
rnew = res.copy()
# put each solution in res and append the new result
# in the new result list (solution for symbol `s`)
# along with old results.
for k, v in res.items():
if isinstance(v, Expr) and isinstance(sol, Expr):
# if any unsolved symbol is present
# Then subs known value
rnew[k] = v.subs(sym, sol)
# and add this new solution
if sol in soln_imageset.keys():
# replace all lambda variables with 0.
imgst = soln_imageset[sol]
rnew[sym] = imgst.lamda(
*[0 for i in range(0, len(
imgst.lamda.variables))])
else:
rnew[sym] = sol
newresult, delete_res = _append_new_soln(
rnew, sym, sol, imgset_yes, soln_imageset,
original_imageset, newresult)
if delete_res:
# deleting the `res` (a soln) since it staisfies
# eq of `exclude` list
result.remove(res)
# solution got for sym
if not not_solvable:
got_symbol.add(sym)
# next time use this new soln
if newresult:
result = newresult
return result, total_solvest_call, total_conditionst
# end def _solve_using_know_values()
new_result_real, solve_call1, cnd_call1 = _solve_using_known_values(
old_result, solveset_real)
new_result_complex, solve_call2, cnd_call2 = _solve_using_known_values(
old_result, solveset_complex)
# If total_solveset_call is equal to total_conditionset
# then solveset failed to solve all of the equations.
# In this case we return a ConditionSet here.
total_conditionset += (cnd_call1 + cnd_call2)
total_solveset_call += (solve_call1 + solve_call2)
if total_conditionset == total_solveset_call and total_solveset_call != -1:
return _return_conditionset(eqs_in_better_order, all_symbols)
# don't keep duplicate solutions
filtered_complex = []
for i in list(new_result_complex):
for j in list(new_result_real):
if i.keys() != j.keys():
continue
if all(a.dummy_eq(b) for a, b in zip(i.values(), j.values()) \
if type(a) != int or type(b) != int):
break
else:
filtered_complex.append(i)
# overall result
result = new_result_real + filtered_complex
result_all_variables = []
result_infinite = []
for res in result:
if not res:
# means {None : None}
continue
# If length < len(all_symbols) means infinite soln.
# Some or all the soln is dependent on 1 symbol.
# eg. {x: y+2} then final soln {x: y+2, y: y}
if len(res) < len(all_symbols):
solved_symbols = res.keys()
unsolved = list(filter(
lambda x: x not in solved_symbols, all_symbols))
for unsolved_sym in unsolved:
res[unsolved_sym] = unsolved_sym
result_infinite.append(res)
if res not in result_all_variables:
result_all_variables.append(res)
if result_infinite:
# we have general soln
# eg : [{x: -1, y : 1}, {x : -y, y: y}] then
# return [{x : -y, y : y}]
result_all_variables = result_infinite
if intersections or complements:
result_all_variables = add_intersection_complement(
result_all_variables, intersections, complements)
# convert to ordered tuple
result = S.EmptySet
for r in result_all_variables:
temp = [r[symb] for symb in all_symbols]
result += FiniteSet(tuple(temp))
return result
# end of def substitution()
|
def substitution(system, symbols, result=[{}], known_symbols=[],
exclude=[], all_symbols=None):
r"""
Solves the `system` using substitution method. It is used in
:func:`~.nonlinsolve`. This will be called from :func:`~.nonlinsolve` when any
equation(s) is non polynomial equation.
Parameters
==========
system : list of equations
The target system of equations
symbols : list of symbols to be solved.
The variable(s) for which the system is solved
known_symbols : list of solved symbols
Values are known for these variable(s)
result : An empty list or list of dict
If No symbol values is known then empty list otherwise
symbol as keys and corresponding value in dict.
exclude : Set of expression.
Mostly denominator expression(s) of the equations of the system.
Final solution should not satisfy these expressions.
all_symbols : known_symbols + symbols(unsolved).
Returns
=======
A FiniteSet of ordered tuple of values of `all_symbols` for which the
`system` has solution. Order of values in the tuple is same as symbols
present in the parameter `all_symbols`. If parameter `all_symbols` is None
then same as symbols present in the parameter `symbols`.
Please note that general FiniteSet is unordered, the solution returned
here is not simply a FiniteSet of solutions, rather it is a FiniteSet of
ordered tuple, i.e. the first & only argument to FiniteSet is a tuple of
solutions, which is ordered, & hence the returned solution is ordered.
Also note that solution could also have been returned as an ordered tuple,
FiniteSet is just a wrapper `{}` around the tuple. It has no other
significance except for the fact it is just used to maintain a consistent
output format throughout the solveset.
Raises
======
ValueError
The input is not valid.
The symbols are not given.
AttributeError
The input symbols are not :class:`~.Symbol` type.
Examples
========
>>> from sympy.core.symbol import symbols
>>> x, y = symbols('x, y', real=True)
>>> from sympy.solvers.solveset import substitution
>>> substitution([x + y], [x], [{y: 1}], [y], set([]), [x, y])
{(-1, 1)}
* When you want a soln not satisfying $x + 1 = 0$
>>> substitution([x + y], [x], [{y: 1}], [y], set([x + 1]), [y, x])
EmptySet
>>> substitution([x + y], [x], [{y: 1}], [y], set([x - 1]), [y, x])
{(1, -1)}
>>> substitution([x + y - 1, y - x**2 + 5], [x, y])
{(-3, 4), (2, -1)}
* Returns both real and complex solution
>>> x, y, z = symbols('x, y, z')
>>> from sympy import exp, sin
>>> substitution([exp(x) - sin(y), y**2 - 4], [x, y])
{(ImageSet(Lambda(_n, I*(2*_n*pi + pi) + log(sin(2))), Integers), -2),
(ImageSet(Lambda(_n, 2*_n*I*pi + log(sin(2))), Integers), 2)}
>>> eqs = [z**2 + exp(2*x) - sin(y), -3 + exp(-y)]
>>> substitution(eqs, [y, z])
{(-log(3), -sqrt(-exp(2*x) - sin(log(3)))),
(-log(3), sqrt(-exp(2*x) - sin(log(3)))),
(ImageSet(Lambda(_n, 2*_n*I*pi - log(3)), Integers),
ImageSet(Lambda(_n, -sqrt(-exp(2*x) + sin(2*_n*I*pi - log(3)))), Integers)),
(ImageSet(Lambda(_n, 2*_n*I*pi - log(3)), Integers),
ImageSet(Lambda(_n, sqrt(-exp(2*x) + sin(2*_n*I*pi - log(3)))), Integers))}
"""
if not system:
return S.EmptySet
if not symbols:
msg = ('Symbols must be given, for which solution of the '
'system is to be found.')
raise ValueError(filldedent(msg))
if not is_sequence(symbols):
msg = ('symbols should be given as a sequence, e.g. a list.'
'Not type %s: %s')
raise TypeError(filldedent(msg % (type(symbols), symbols)))
if not getattr(symbols[0], 'is_Symbol', False):
msg = ('Iterable of symbols must be given as '
'second argument, not type %s: %s')
raise ValueError(filldedent(msg % (type(symbols[0]), symbols[0])))
# By default `all_symbols` will be same as `symbols`
if all_symbols is None:
all_symbols = symbols
old_result = result
# storing complements and intersection for particular symbol
complements = {}
intersections = {}
# when total_solveset_call equals total_conditionset
# it means that solveset failed to solve all eqs.
total_conditionset = -1
total_solveset_call = -1
def _unsolved_syms(eq, sort=False):
"""Returns the unsolved symbol present
in the equation `eq`.
"""
free = eq.free_symbols
unsolved = (free - set(known_symbols)) & set(all_symbols)
if sort:
unsolved = list(unsolved)
unsolved.sort(key=default_sort_key)
return unsolved
# end of _unsolved_syms()
# sort such that equation with the fewest potential symbols is first.
# means eq with less number of variable first in the list.
eqs_in_better_order = list(
ordered(system, lambda _: len(_unsolved_syms(_))))
def add_intersection_complement(result, intersection_dict, complement_dict):
# If solveset has returned some intersection/complement
# for any symbol, it will be added in the final solution.
final_result = []
for res in result:
res_copy = res
for key_res, value_res in res.items():
intersect_set, complement_set = None, None
for key_sym, value_sym in intersection_dict.items():
if key_sym == key_res:
intersect_set = value_sym
for key_sym, value_sym in complement_dict.items():
if key_sym == key_res:
complement_set = value_sym
if intersect_set or complement_set:
new_value = FiniteSet(value_res)
if intersect_set and intersect_set != S.Complexes:
new_value = Intersection(new_value, intersect_set)
if complement_set:
new_value = Complement(new_value, complement_set)
if new_value is S.EmptySet:
res_copy = None
break
elif new_value.is_FiniteSet and len(new_value) == 1:
res_copy[key_res] = set(new_value).pop()
else:
res_copy[key_res] = new_value
if res_copy is not None:
final_result.append(res_copy)
return final_result
# end of def add_intersection_complement()
def _extract_main_soln(sym, sol, soln_imageset):
"""Separate the Complements, Intersections, ImageSet lambda expr and
its base_set. This function returns the unmasks sol from different classes
of sets and also returns the appended ImageSet elements in a
soln_imageset (dict: where key as unmasked element and value as ImageSet).
"""
# if there is union, then need to check
# Complement, Intersection, Imageset.
# Order should not be changed.
if isinstance(sol, ConditionSet):
# extracts any solution in ConditionSet
sol = sol.base_set
if isinstance(sol, Complement):
# extract solution and complement
complements[sym] = sol.args[1]
sol = sol.args[0]
# complement will be added at the end
# using `add_intersection_complement` method
# if there is union of Imageset or other in soln.
# no testcase is written for this if block
if isinstance(sol, Union):
sol_args = sol.args
sol = S.EmptySet
# We need in sequence so append finteset elements
# and then imageset or other.
for sol_arg2 in sol_args:
if isinstance(sol_arg2, FiniteSet):
sol += sol_arg2
else:
# ImageSet, Intersection, complement then
# append them directly
sol += FiniteSet(sol_arg2)
if isinstance(sol, Intersection):
# Interval/Set will be at 0th index always
if sol.args[0] not in (S.Reals, S.Complexes):
# Sometimes solveset returns soln with intersection
# S.Reals or S.Complexes. We don't consider that
# intersection.
intersections[sym] = sol.args[0]
sol = sol.args[1]
# after intersection and complement Imageset should
# be checked.
if isinstance(sol, ImageSet):
soln_imagest = sol
expr2 = sol.lamda.expr
sol = FiniteSet(expr2)
soln_imageset[expr2] = soln_imagest
if not isinstance(sol, FiniteSet):
sol = FiniteSet(sol)
return sol, soln_imageset
# end of def _extract_main_soln()
# helper function for _append_new_soln
def _check_exclude(rnew, imgset_yes):
rnew_ = rnew
if imgset_yes:
# replace all dummy variables (Imageset lambda variables)
# with zero before `checksol`. Considering fundamental soln
# for `checksol`.
rnew_copy = rnew.copy()
dummy_n = imgset_yes[0]
for key_res, value_res in rnew_copy.items():
rnew_copy[key_res] = value_res.subs(dummy_n, 0)
rnew_ = rnew_copy
# satisfy_exclude == true if it satisfies the expr of `exclude` list.
try:
# something like : `Mod(-log(3), 2*I*pi)` can't be
# simplified right now, so `checksol` returns `TypeError`.
# when this issue is fixed this try block should be
# removed. Mod(-log(3), 2*I*pi) == -log(3)
satisfy_exclude = any(
checksol(d, rnew_) for d in exclude)
except TypeError:
satisfy_exclude = None
return satisfy_exclude
# end of def _check_exclude()
# helper function for _append_new_soln
def _restore_imgset(rnew, original_imageset, newresult):
restore_sym = set(rnew.keys()) & \
set(original_imageset.keys())
for key_sym in restore_sym:
img = original_imageset[key_sym]
rnew[key_sym] = img
if rnew not in newresult:
newresult.append(rnew)
# end of def _restore_imgset()
def _append_eq(eq, result, res, delete_soln, n=None):
u = Dummy('u')
if n:
eq = eq.subs(n, 0)
satisfy = eq if eq in (True, False) else checksol(u, u, eq, minimal=True)
if satisfy is False:
delete_soln = True
res = {}
else:
result.append(res)
return result, res, delete_soln
def _append_new_soln(rnew, sym, sol, imgset_yes, soln_imageset,
original_imageset, newresult, eq=None):
"""If `rnew` (A dict <symbol: soln>) contains valid soln
append it to `newresult` list.
`imgset_yes` is (base, dummy_var) if there was imageset in previously
calculated result(otherwise empty tuple). `original_imageset` is dict
of imageset expr and imageset from this result.
`soln_imageset` dict of imageset expr and imageset of new soln.
"""
satisfy_exclude = _check_exclude(rnew, imgset_yes)
delete_soln = False
# soln should not satisfy expr present in `exclude` list.
if not satisfy_exclude:
local_n = None
# if it is imageset
if imgset_yes:
local_n = imgset_yes[0]
base = imgset_yes[1]
if sym and sol:
# when `sym` and `sol` is `None` means no new
# soln. In that case we will append rnew directly after
# substituting original imagesets in rnew values if present
# (second last line of this function using _restore_imgset)
dummy_list = list(sol.atoms(Dummy))
# use one dummy `n` which is in
# previous imageset
local_n_list = [
local_n for i in range(
0, len(dummy_list))]
dummy_zip = zip(dummy_list, local_n_list)
lam = Lambda(local_n, sol.subs(dummy_zip))
rnew[sym] = ImageSet(lam, base)
if eq is not None:
newresult, rnew, delete_soln = _append_eq(
eq, newresult, rnew, delete_soln, local_n)
elif eq is not None:
newresult, rnew, delete_soln = _append_eq(
eq, newresult, rnew, delete_soln)
elif sol in soln_imageset.keys():
rnew[sym] = soln_imageset[sol]
# restore original imageset
_restore_imgset(rnew, original_imageset, newresult)
else:
newresult.append(rnew)
elif satisfy_exclude:
delete_soln = True
rnew = {}
_restore_imgset(rnew, original_imageset, newresult)
return newresult, delete_soln
# end of def _append_new_soln()
def _new_order_result(result, eq):
# separate first, second priority. `res` that makes `eq` value equals
# to zero, should be used first then other result(second priority).
# If it is not done then we may miss some soln.
first_priority = []
second_priority = []
for res in result:
if not any(isinstance(val, ImageSet) for val in res.values()):
if eq.subs(res) == 0:
first_priority.append(res)
else:
second_priority.append(res)
if first_priority or second_priority:
return first_priority + second_priority
return result
def _solve_using_known_values(result, solver):
"""Solves the system using already known solution
(result contains the dict <symbol: value>).
solver is :func:`~.solveset_complex` or :func:`~.solveset_real`.
"""
# stores imageset <expr: imageset(Lambda(n, expr), base)>.
soln_imageset = {}
total_solvest_call = 0
total_conditionst = 0
# sort such that equation with the fewest potential symbols is first.
# means eq with less variable first
for index, eq in enumerate(eqs_in_better_order):
newresult = []
original_imageset = {}
# if imageset expr is used to solve other symbol
imgset_yes = False
result = _new_order_result(result, eq)
for res in result:
got_symbol = set() # symbols solved in one iteration
# find the imageset and use its expr.
for key_res, value_res in res.items():
if isinstance(value_res, ImageSet):
res[key_res] = value_res.lamda.expr
original_imageset[key_res] = value_res
dummy_n = value_res.lamda.expr.atoms(Dummy).pop()
(base,) = value_res.base_sets
imgset_yes = (dummy_n, base)
# update eq with everything that is known so far
eq2 = eq.subs(res).expand()
unsolved_syms = _unsolved_syms(eq2, sort=True)
if not unsolved_syms:
if res:
newresult, delete_res = _append_new_soln(
res, None, None, imgset_yes, soln_imageset,
original_imageset, newresult, eq2)
if delete_res:
# `delete_res` is true, means substituting `res` in
# eq2 doesn't return `zero` or deleting the `res`
# (a soln) since it staisfies expr of `exclude`
# list.
result.remove(res)
continue # skip as it's independent of desired symbols
depen1, depen2 = (eq2.rewrite(Add)).as_independent(*unsolved_syms)
if (depen1.has(Abs) or depen2.has(Abs)) and solver == solveset_complex:
# Absolute values cannot be inverted in the
# complex domain
continue
soln_imageset = {}
for sym in unsolved_syms:
not_solvable = False
try:
soln = solver(eq2, sym)
total_solvest_call += 1
soln_new = S.EmptySet
if isinstance(soln, Complement):
# separate solution and complement
complements[sym] = soln.args[1]
soln = soln.args[0]
# complement will be added at the end
if isinstance(soln, Intersection):
# Interval will be at 0th index always
if soln.args[0] != Interval(-oo, oo):
# sometimes solveset returns soln
# with intersection S.Reals, to confirm that
# soln is in domain=S.Reals
intersections[sym] = soln.args[0]
soln_new += soln.args[1]
soln = soln_new if soln_new else soln
if index > 0 and solver == solveset_real:
# one symbol's real soln, another symbol may have
# corresponding complex soln.
if not isinstance(soln, (ImageSet, ConditionSet)):
soln += solveset_complex(eq2, sym)
except NotImplementedError:
# If sovleset is not able to solve equation `eq2`. Next
# time we may get soln using next equation `eq2`
continue
if isinstance(soln, ConditionSet):
if soln.base_set in (S.Reals, S.Complexes):
soln = S.EmptySet
# don't do `continue` we may get soln
# in terms of other symbol(s)
not_solvable = True
total_conditionst += 1
else:
soln = soln.base_set
if soln is not S.EmptySet:
soln, soln_imageset = _extract_main_soln(
sym, soln, soln_imageset)
for sol in soln:
# sol is not a `Union` since we checked it
# before this loop
sol, soln_imageset = _extract_main_soln(
sym, sol, soln_imageset)
sol = set(sol).pop()
free = sol.free_symbols
if got_symbol and any(
ss in free for ss in got_symbol
):
# sol depends on previously solved symbols
# then continue
continue
rnew = res.copy()
# put each solution in res and append the new result
# in the new result list (solution for symbol `s`)
# along with old results.
for k, v in res.items():
if isinstance(v, Expr) and isinstance(sol, Expr):
# if any unsolved symbol is present
# Then subs known value
rnew[k] = v.subs(sym, sol)
# and add this new solution
if sol in soln_imageset.keys():
# replace all lambda variables with 0.
imgst = soln_imageset[sol]
rnew[sym] = imgst.lamda(
*[0 for i in range(0, len(
imgst.lamda.variables))])
else:
rnew[sym] = sol
newresult, delete_res = _append_new_soln(
rnew, sym, sol, imgset_yes, soln_imageset,
original_imageset, newresult)
if delete_res:
# deleting the `res` (a soln) since it staisfies
# eq of `exclude` list
result.remove(res)
# solution got for sym
if not not_solvable:
got_symbol.add(sym)
# next time use this new soln
if newresult:
result = newresult
return result, total_solvest_call, total_conditionst
# end def _solve_using_know_values()
new_result_real, solve_call1, cnd_call1 = _solve_using_known_values(
old_result, solveset_real)
new_result_complex, solve_call2, cnd_call2 = _solve_using_known_values(
old_result, solveset_complex)
# If total_solveset_call is equal to total_conditionset
# then solveset failed to solve all of the equations.
# In this case we return a ConditionSet here.
total_conditionset += (cnd_call1 + cnd_call2)
total_solveset_call += (solve_call1 + solve_call2)
if total_conditionset == total_solveset_call and total_solveset_call != -1:
return _return_conditionset(eqs_in_better_order, all_symbols)
# don't keep duplicate solutions
filtered_complex = []
for i in list(new_result_complex):
for j in list(new_result_real):
if i.keys() != j.keys():
continue
if all(a.dummy_eq(b) for a, b in zip(i.values(), j.values()) \
if type(a) != int or type(b) != int):
break
else:
filtered_complex.append(i)
# overall result
result = new_result_real + filtered_complex
result_all_variables = []
result_infinite = []
for res in result:
if not res:
# means {None : None}
continue
# If length < len(all_symbols) means infinite soln.
# Some or all the soln is dependent on 1 symbol.
# eg. {x: y+2} then final soln {x: y+2, y: y}
if len(res) < len(all_symbols):
solved_symbols = res.keys()
unsolved = list(filter(
lambda x: x not in solved_symbols, all_symbols))
for unsolved_sym in unsolved:
res[unsolved_sym] = unsolved_sym
result_infinite.append(res)
if res not in result_all_variables:
result_all_variables.append(res)
if result_infinite:
# we have general soln
# eg : [{x: -1, y : 1}, {x : -y, y: y}] then
# return [{x : -y, y : y}]
result_all_variables = result_infinite
if intersections or complements:
result_all_variables = add_intersection_complement(
result_all_variables, intersections, complements)
# convert to ordered tuple
result = S.EmptySet
for r in result_all_variables:
temp = [r[symb] for symb in all_symbols]
result += FiniteSet(tuple(temp))
return result
# end of def substitution()
|
33,275 |
def _parsed_args():
""" parse commandline arguments with argparse """
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'-i',
'--input',
type=str,
required=True,
help='path to input file',
)
parser.add_argument(
'-o',
'--output',
type=str,
required=True,
help='path to output file',
)
parser.add_argument(
'-I',
'--input-adapter',
type=str,
default=None,
help="Explicitly use this adapter for reading the input file",
)
parser.add_argument(
'-O',
'--output-adapter',
type=str,
default=None,
help="Explicitly use this adapter for writing the output file",
)
parser.add_argument(
'-T',
'--tracks',
type=str,
default=None,
help="Pick one or more tracks, by 0-based index, separated by commas.",
)
parser.add_argument(
'-m',
'--media-linker',
type=str,
default="Default",
help=(
"Specify a media linker. 'Default' means use the "
"$OTIO_DEFAULT_MEDIA_LINKER if set, 'None' or '' means explicitly "
"disable the linker, and anything else is interpreted as the name"
" of the media linker to use."
)
)
parser.add_argument(
'-a',
'--adapter-arg',
type=str,
default=[],
action='append',
help='Extra arguments to be passed to input adapter in the form of '
'key=value. Values are strings, numbers or Python literals: True, '
'False, etc. Can be used multiple times: -a burrito="bar" -a taco=12.'
)
parser.add_argument(
'-A',
'--output-adapter-arg',
type=str,
default=[],
action='append',
help='Extra arguments to be passed to output adapter in the form of '
'key=value. Values are strings, numbers or Python literals: True, '
'False, etc. Can be used multiple times: -a burrito="bar" -a taco=12.'
)
return parser.parse_args()
|
def _parsed_args():
""" parse commandline arguments with argparse """
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'-i',
'--input',
type=str,
required=True,
help='path to input file',
)
parser.add_argument(
'-o',
'--output',
type=str,
required=True,
help='path to output file',
)
parser.add_argument(
'-I',
'--input-adapter',
type=str,
default=None,
help="Explicitly use this adapter for reading the input file",
)
parser.add_argument(
'-O',
'--output-adapter',
type=str,
default=None,
help="Explicitly use this adapter for writing the output file",
)
parser.add_argument(
'-T',
'--tracks',
type=str,
default=None,
help="Pick one or more tracks, by 0-based index, separated by commas.",
)
parser.add_argument(
'-m',
'--media-linker',
type=str,
default="Default",
help=(
"Specify a media linker. 'Default' means use the "
"$OTIO_DEFAULT_MEDIA_LINKER if set, 'None' or '' means explicitly "
"disable the linker, and anything else is interpreted as the name"
" of the media linker to use."
)
)
parser.add_argument(
'-a',
'--adapter-arg',
type=str,
default=[],
action='append',
help='Extra arguments to be passed to input adapter in the form of '
'key=value. Values are strings, numbers or Python literals: True, '
'False, etc. Can be used multiple times: -a burrito="bar" -a taco=12.'
)
parser.add_argument(
'-A',
'--output-adapter-arg',
type=str,
default=[],
action='append',
help='Extra arguments to be passed to output adapter in the form of '
'key=value. Values are strings, numbers or Python literals: True, '
'False, etc. Can be used multiple times: -A burrito="bar" -A taco=12.'
)
return parser.parse_args()
|
3,846 |
def to_pandas_edgelist(
G,
source="source",
target="target",
nodelist=None,
dtype=None,
order=None,
edge_key=None,
):
"""Returns the graph edge list as a Pandas DataFrame.
Parameters
----------
G : graph
The NetworkX graph used to construct the Pandas DataFrame.
source : str or int, optional
A valid column name (string or integer) for the source nodes (for the
directed case).
target : str or int, optional
A valid column name (string or integer) for the target nodes (for the
directed case).
nodelist : list, optional
Use only nodes specified in nodelist
dtype : dtype, default None
Use to create the DataFrame. Data type to force.
Only a single dtype is allowed. If None, infer.
order : None
An unused parameter mistakenly included in the function.
This is deprecated and will be removed in NetworkX v3.0.
edge_key : str or int or None, optional (default=None)
A valid column name (string or integer) for the edge keys (for the
multigraph case). If None, edge keys are not stored in the DataFrame.
Returns
-------
df : Pandas DataFrame
Graph edge list
Examples
--------
>>> G = nx.Graph(
... [
... ("A", "B", {"cost": 1, "weight": 7}),
... ("C", "E", {"cost": 9, "weight": 10}),
... ]
... )
>>> df = nx.to_pandas_edgelist(G, nodelist=["A", "C"])
>>> df[["source", "target", "cost", "weight"]]
source target cost weight
0 A B 1 7
1 C E 9 10
>>> G = nx.MultiGraph([('A', 'B', {'cost': 1}), ('A', 'B', {'cost': 9})])
>>> df = nx.to_pandas_edgelist(G, nodelist=['A', 'C'], edge_key='ekey')
>>> df[['source', 'target', 'cost', 'ekey']]
source target cost ekey
0 A B 1 0
1 A B 9 1
"""
import pandas as pd
if nodelist is None:
edgelist = G.edges(data=True)
else:
edgelist = G.edges(nodelist, data=True)
source_nodes = [s for s, t, d in edgelist]
target_nodes = [t for s, t, d in edgelist]
all_attrs = set().union(*(d.keys() for s, t, d in edgelist))
if source in all_attrs:
raise nx.NetworkXError(f"Source name '{source}' is an edge attr name")
if target in all_attrs:
raise nx.NetworkXError(f"Target name '{target}' is an edge attr name")
nan = float("nan")
edge_attr = {k: [d.get(k, nan) for s, t, d in edgelist] for k in all_attrs}
if G.is_multigraph() and edge_key is not None:
if edge_key in all_attrs:
raise nx.NetworkXError(f"Edge key name '{edge_key}' is an edge attr name")
edge_keys = [k for s, t, k in G.edges(keys=True)]
edgelistdict = {source: source_nodes, target: target_nodes, edge_key: edge_keys}
else:
edgelistdict = {source: source_nodes, target: target_nodes}
edgelistdict.update(edge_attr)
return pd.DataFrame(edgelistdict, dtype=dtype)
|
def to_pandas_edgelist(
G,
source="source",
target="target",
nodelist=None,
dtype=None,
order=None,
edge_key=None,
):
"""Returns the graph edge list as a Pandas DataFrame.
Parameters
----------
G : graph
The NetworkX graph used to construct the Pandas DataFrame.
source : str or int, optional
A valid column name (string or integer) for the source nodes (for the
directed case).
target : str or int, optional
A valid column name (string or integer) for the target nodes (for the
directed case).
nodelist : list, optional
Use only nodes specified in nodelist
dtype : dtype, default None
Use to create the DataFrame. Data type to force.
Only a single dtype is allowed. If None, infer.
order : None
An unused parameter mistakenly included in the function.
This is deprecated and will be removed in NetworkX v3.0.
edge_key : str or int or None, optional (default=None)
A valid column name (string or integer) for the edge keys (for the
multigraph case). If None, edge keys are not stored in the DataFrame.
Returns
-------
df : Pandas DataFrame
Graph edge list
Examples
--------
>>> G = nx.Graph(
... [
... ("A", "B", {"cost": 1, "weight": 7}),
... ("C", "E", {"cost": 9, "weight": 10}),
... ]
... )
>>> df = nx.to_pandas_edgelist(G, nodelist=["A", "C"])
>>> df[["source", "target", "cost", "weight"]]
source target cost weight
0 A B 1 7
1 C E 9 10
>>> G = nx.MultiGraph([('A', 'B', {'cost': 1}), ('A', 'B', {'cost': 9})])
>>> df = nx.to_pandas_edgelist(G, nodelist=['A', 'C'], edge_key='ekey')
>>> df[['source', 'target', 'cost', 'ekey']]
source target cost ekey
0 A B 1 0
1 A B 9 1
"""
import pandas as pd
if nodelist is None:
edgelist = G.edges(data=True)
else:
edgelist = G.edges(nodelist, data=True)
source_nodes = [s for s, t, d in edgelist]
target_nodes = [t for s, t, d in edgelist]
all_attrs = set().union(*(d.keys() for s, t, d in edgelist))
if source in all_attrs:
raise nx.NetworkXError(f"Source name '{source}' is an edge attr name")
if target in all_attrs:
raise nx.NetworkXError(f"Target name '{target}' is an edge attr name")
nan = float("nan")
edge_attr = {k: [d.get(k, nan) for s, t, d in edgelist] for k in all_attrs}
if G.is_multigraph() and edge_key is not None:
if edge_key in all_attrs:
raise nx.NetworkXError(f"Edge key name '{edge_key}' is an edge attr name")
edge_keys = [k for _, _, k in G.edges(keys=True)]
edgelistdict = {source: source_nodes, target: target_nodes, edge_key: edge_keys}
else:
edgelistdict = {source: source_nodes, target: target_nodes}
edgelistdict.update(edge_attr)
return pd.DataFrame(edgelistdict, dtype=dtype)
|
12,984 |
def test__update_configuration_structure_removes_old_keys(
monkeypatch, plugin_configuration
):
mocked_config = {
name: value
for name, value in PluginSample.CONFIG_STRUCTURE.items()
if name != "Username"
}
monkeypatch.setattr(PluginSample, "CONFIG_STRUCTURE", mocked_config)
configuration = PluginSample._update_configuration_structure(
plugin_configuration.configuration
)
assert all([config_field["name"] != "Username" for config_field in configuration])
|
def test_update_configuration_structure_removes_old_keys(
monkeypatch, plugin_configuration
):
mocked_config = {
name: value
for name, value in PluginSample.CONFIG_STRUCTURE.items()
if name != "Username"
}
monkeypatch.setattr(PluginSample, "CONFIG_STRUCTURE", mocked_config)
configuration = PluginSample._update_configuration_structure(
plugin_configuration.configuration
)
assert all([config_field["name"] != "Username" for config_field in configuration])
|
7,830 |
def get_rotation_matrix(rotation, order='xyz'):
r"""Generate a 3x3 rotation matrix from input angles
Parameters
----------
rotation : 3-tuple of float
A 3-tuple of angles :math:`(\phi, \theta, \psi)` in degrees where the
first element is the rotation about the x-axis in the fixed laboratory
frame, the second element is the rotation about the y-axis in the fixed
laboratory frame, and the third element is the rotation about the
z-axis in the fixed laboratory frame. The rotations are active
rotations.
order : str, optinoal
A string of 'x', 'y', and 'z' in some order specifying which rotation
to perform first, second, and third. Defaults to 'xyz' which means, the
rotation by angle phi about x will be applied first, followed by theta
about y and then phi about z. This corresponds to an x-y-z extrinsic
rotation as well as a z-y'-x'' intrinsic rotation using Tait-Bryan
angles :math:`(\phi, \theta, \psi)`.
"""
check_type('surface rotation', rotation, Iterable, Real)
check_length('surface rotation', rotation, 3)
phi, theta, psi = np.array(rotation)*(np.pi/180.)
cx, sx = np.cos(phi), np.sin(phi)
cy, sy = np.cos(theta), np.sin(theta)
cz, sz = np.cos(psi), np.sin(psi)
R = {}
R['x'] = np.array([[1., 0., 0.], [0., cx, -sx], [0., sx, cx]])
R['y'] = np.array([[cy, 0., sy], [0., 1., 0.], [-sy, 0., cy]])
R['z'] = np.array([[cz, -sz, 0.], [sz, cz, 0.], [0., 0., 1.]])
R1, R2, R3 = tuple(R[xi] for xi in order)
return R3 @ R2 @ R1
|
def get_rotation_matrix(rotation, order='xyz'):
r"""Generate a 3x3 rotation matrix from input angles
Parameters
----------
rotation : 3-tuple of float
A 3-tuple of angles :math:`(\phi, \theta, \psi)` in degrees where the
first element is the rotation about the x-axis in the fixed laboratory
frame, the second element is the rotation about the y-axis in the fixed
laboratory frame, and the third element is the rotation about the
z-axis in the fixed laboratory frame. The rotations are active
rotations.
order : str, optinoal
A string of 'x', 'y', and 'z' in some order specifying which rotation
to perform first, second, and third. Defaults to 'xyz' which means, the
rotation by angle phi about x will be applied first, followed by theta
about y and then phi about z. This corresponds to an x-y-z extrinsic
rotation as well as a z-y'-x'' intrinsic rotation using Tait-Bryan
angles :math:`(\phi, \theta, \psi)`.
"""
check_type('surface rotation', rotation, Iterable, Real)
check_length('surface rotation', rotation, 3)
phi, theta, psi = np.array(rotation)*(np.pi/180.)
cx, sx = np.cos(phi), np.sin(phi)
cy, sy = np.cos(theta), np.sin(theta)
cz, sz = np.cos(psi), np.sin(psi)
R = {}
R['x'] = np.array([[1., 0., 0.], [0., cx, -sx], [0., sx, cx]])
R['y'] = np.array([[cy, 0., sy], [0., 1., 0.], [-sy, 0., cy]])
R['z'] = np.array([[cz, -sz, 0.], [sz, cz, 0.], [0., 0., 1.]])
R1, R2, R3 = (R[xi] for xi in order)
return R3 @ R2 @ R1
|
1,339 |
def test_adaboost_regressor_sample_weight():
# check that giving weight will have an influence on the error computed
# for a weak learner
X, y = datasets.make_regression(n_features=50, random_state=0)
# add an arbitrary outlier to make sure
X = np.vstack([X, X.sum(axis=0)])
y = np.hstack([y, 10])
regr_no_outlier = AdaBoostRegressor(
base_estimator=LinearRegression(), n_estimators=4, random_state=0
)
regr_with_weight = clone(regr_no_outlier)
# fit 2 models:
# - a model without the outlier
# - a model containing the outlier but with a null sample-weight
# Therefore, the error of the first weak learner will be identical.
regr_no_outlier.fit(X[:-1], y[:-1])
sample_weight = np.array([1.] * (y.size - 1) + [0.])
regr_with_weight.fit(X, y, sample_weight=sample_weight)
# check that the error is similar with 1 decimal
assert (regr_no_outlier.estimator_errors_[0] ==
pytest.approx(regr_with_weight.estimator_errors_[0], abs=1e-1))
|
def test_adaboostregressor_sample_weight():
# check that giving weight will have an influence on the error computed
# for a weak learner
X, y = datasets.make_regression(n_features=50, random_state=0)
# add an arbitrary outlier to make sure
X = np.vstack([X, X.sum(axis=0)])
y = np.hstack([y, 10])
regr_no_outlier = AdaBoostRegressor(
base_estimator=LinearRegression(), n_estimators=4, random_state=0
)
regr_with_weight = clone(regr_no_outlier)
# fit 2 models:
# - a model without the outlier
# - a model containing the outlier but with a null sample-weight
# Therefore, the error of the first weak learner will be identical.
regr_no_outlier.fit(X[:-1], y[:-1])
sample_weight = np.array([1.] * (y.size - 1) + [0.])
regr_with_weight.fit(X, y, sample_weight=sample_weight)
# check that the error is similar with 1 decimal
assert (regr_no_outlier.estimator_errors_[0] ==
pytest.approx(regr_with_weight.estimator_errors_[0], abs=1e-1))
|
13,072 |
def _is_refund_ongoing(payment):
"""Return True if refund is ongoing for given payment."""
return (
payment.transactions.filter(
kind__in=[TransactionKind.REFUND_ONGOING], is_success=True
).exists()
if payment
else False
)
|
def _is_refund_ongoing(payment):
"""Return True if refund is ongoing for given payment."""
return (
payment.transactions.filter(
kind=TransactionKind.REFUND_ONGOING, is_success=True
).exists()
if payment
else False
)
|
39,395 |
def test_texture_repr():
texture = pyvista.Texture(examples.mapfile)
tex_repr = str(repr(texture))
assert 'Cube Map:\tFalse' in tex_repr
assert 'Components:\t3' in tex_repr
assert 'Dimensions:\t2048, 1024\n' in tex_repr
|
def test_texture_repr():
texture = pyvista.Texture(examples.mapfile)
tex_repr = repr(texture)
assert 'Cube Map:\tFalse' in tex_repr
assert 'Components:\t3' in tex_repr
assert 'Dimensions:\t2048, 1024\n' in tex_repr
|
32,467 |
def parse_date_string(date_string, date_format='%Y-%m-%dT%H:%M:%S'):
"""
Parses the date_string function to the corresponding datetime object.
Note: If possible (e.g. running Python 3), it is suggested to use
dateutil.parser.parse or dateparser.parse functions instead.
Examples:
>>> parse_date_string('2019-09-17T06:16:39Z')
datetime.datetime(2019, 9, 17, 6, 16, 39)
>>> parse_date_string('2019-09-17T06:16:39.22Z')
datetime.datetime(2019, 9, 17, 6, 16, 39, 220000)
>>> parse_date_string('2019-09-17T06:16:39.4040+05:00', '%Y-%m-%dT%H:%M:%S+02:00')
datetime.datetime(2019, 9, 17, 6, 16, 39, 404000)
:type date_string: ``str``
:param date_string: The date string to parse. (required)
:type date_format: ``str``
:param date_format:
The date format of the date string. If the date format is known, it should be provided. (optional)
:return: The parsed datetime.
:rtype: ``(datetime.datetime, datetime.datetime)``
"""
try:
return datetime.strptime(date_string, date_format)
except ValueError as e:
error_message = str(e)
date_format = '%Y-%m-%dT%H:%M:%S'
time_data_regex = r'time data \'(.*?)\''
time_data_match = re.findall(time_data_regex, error_message)
sliced_time_data = ''
if time_data_match:
# found time date which does not match date format
# example of caught error message:
# "time data '2019-09-17T06:16:39Z' does not match format '%Y-%m-%dT%H:%M:%S.%fZ'"
time_data = time_data_match[0]
# removing YYYY-MM-DDThh:mm:ss from the time data to keep only milliseconds and time zone
sliced_time_data = time_data[19:]
else:
unconverted_data_remains_regex = r'unconverted data remains: (.*)'
unconverted_data_remains_match = re.findall(unconverted_data_remains_regex, error_message)
if unconverted_data_remains_match:
# found unconverted_data_remains
# example of caught error message:
# "unconverted data remains: 22Z"
sliced_time_data = unconverted_data_remains_match[0]
if not sliced_time_data:
# did not catch expected error
raise ValueError(e)
if '.' in sliced_time_data:
# found milliseconds - appending ".%f" to date format
date_format += '.%f'
timezone_regex = r'[Zz+-].*'
time_zone = re.findall(timezone_regex, sliced_time_data)
if time_zone:
# found timezone - appending it to the date format
date_format += time_zone[0]
# Make the fractions shorter less than 6 charactors
# e.g. '2022-01-23T12:34:56.123456789+09:00' to '2022-01-23T12:34:56.123456+09:00'
date_string = re.sub(r'(.*?\.[0-9]{6})[0-9]*([Zz]|[+-]\S+?)', '\\1\\2', date_string)
return datetime.strptime(date_string, date_format)
|
def parse_date_string(date_string, date_format='%Y-%m-%dT%H:%M:%S'):
"""
Parses the date_string function to the corresponding datetime object.
Note: If possible (e.g. running Python 3), it is suggested to use
dateutil.parser.parse or dateparser.parse functions instead.
Examples:
>>> parse_date_string('2019-09-17T06:16:39Z')
datetime.datetime(2019, 9, 17, 6, 16, 39)
>>> parse_date_string('2019-09-17T06:16:39.22Z')
datetime.datetime(2019, 9, 17, 6, 16, 39, 220000)
>>> parse_date_string('2019-09-17T06:16:39.4040+05:00', '%Y-%m-%dT%H:%M:%S+02:00')
datetime.datetime(2019, 9, 17, 6, 16, 39, 404000)
:type date_string: ``str``
:param date_string: The date string to parse. (required)
:type date_format: ``str``
:param date_format:
The date format of the date string. If the date format is known, it should be provided. (optional)
:return: The parsed datetime.
:rtype: ``(datetime.datetime, datetime.datetime)``
"""
try:
return datetime.strptime(date_string, date_format)
except ValueError as e:
error_message = str(e)
date_format = '%Y-%m-%dT%H:%M:%S'
time_data_regex = r'time data \'(.*?)\''
time_data_match = re.findall(time_data_regex, error_message)
sliced_time_data = ''
if time_data_match:
# found time date which does not match date format
# example of caught error message:
# "time data '2019-09-17T06:16:39Z' does not match format '%Y-%m-%dT%H:%M:%S.%fZ'"
time_data = time_data_match[0]
# removing YYYY-MM-DDThh:mm:ss from the time data to keep only milliseconds and time zone
sliced_time_data = time_data[19:]
else:
unconverted_data_remains_regex = r'unconverted data remains: (.*)'
unconverted_data_remains_match = re.findall(unconverted_data_remains_regex, error_message)
if unconverted_data_remains_match:
# found unconverted_data_remains
# example of caught error message:
# "unconverted data remains: 22Z"
sliced_time_data = unconverted_data_remains_match[0]
if not sliced_time_data:
# did not catch expected error
raise ValueError(e)
if '.' in sliced_time_data:
# found milliseconds - appending ".%f" to date format
date_format += '.%f'
timezone_regex = r'[Zz+-].*'
time_zone = re.findall(timezone_regex, sliced_time_data)
if time_zone:
# found timezone - appending it to the date format
date_format += time_zone[0]
# Make the fractions shorter less than 6 charactors
# e.g. '2022-01-23T12:34:56.123456789+09:00' to '2022-01-23T12:34:56.123456+09:00'
date_string = re.sub(r'(.*?\.[0-9]{6})[0-9]*', '\\1', date_string)
return datetime.strptime(date_string, date_format)
|
28,280 |
def _insert_into_data_dict(
existing_values: np.ndarray,
new_values: np.ndarray,
write_status: Optional[int],
shape: Optional[Tuple[int, ...]]
) -> Tuple[np.ndarray, Optional[int]]:
if shape is None or write_status is None:
try:
data = np.append(existing_values, new_values, axis=0)
except ValueError:
# we cannot append into a ragged array so make that manually
n_existing = existing_values.shape[0]
n_new = new_values.shape[0]
n_rows = n_existing + n_new
data = np.ndarray((n_rows,), dtype=object)
for i in range(n_existing):
data[i] = np.atleast_1d(existing_values[i])
for i, j in enumerate(range(n_existing, n_existing+n_new)):
data[j] = np.atleast_1d(new_values[i])
return data, None
else:
if existing_values.dtype.kind in ('U', 'S'):
# string type arrays may be too small for the new data
# read so rescale if needed.
if new_values.dtype.itemsize > existing_values.dtype.itemsize:
existing_values = existing_values.astype(new_values.dtype)
n_values = new_values.size
new_write_status = write_status+n_values
if new_write_status > existing_values.size:
log.warning(f"Incorrect shape of dataset: Dataset is expected to "
f"contain {existing_values.size} points but trying to "
f"add an amount of data that makes it contain {new_write_status} points. Cache will "
f"be flattened into a 1D array")
return (np.append(existing_values.flatten(),
new_values.flatten(), axis=0),
new_write_status)
else:
existing_values.ravel()[write_status:new_write_status] = new_values.ravel()
return existing_values, new_write_status
|
def _insert_into_data_dict(
existing_values: np.ndarray,
new_values: np.ndarray,
write_status: Optional[int],
shape: Optional[Tuple[int, ...]]
) -> Tuple[np.ndarray, Optional[int]]:
if shape is None or write_status is None:
try:
data = np.append(existing_values, new_values, axis=0)
except ValueError:
# we cannot append into a ragged array so make that manually
n_existing = existing_values.shape[0]
n_new = new_values.shape[0]
n_rows = n_existing + n_new
data = np.ndarray((n_rows,), dtype=object)
data = np.ndarray((n_rows,), dtype=existing_values.dtype)
for i in range(n_existing):
data[i] = np.atleast_1d(existing_values[i])
for i, j in enumerate(range(n_existing, n_existing+n_new)):
data[j] = np.atleast_1d(new_values[i])
return data, None
else:
if existing_values.dtype.kind in ('U', 'S'):
# string type arrays may be too small for the new data
# read so rescale if needed.
if new_values.dtype.itemsize > existing_values.dtype.itemsize:
existing_values = existing_values.astype(new_values.dtype)
n_values = new_values.size
new_write_status = write_status+n_values
if new_write_status > existing_values.size:
log.warning(f"Incorrect shape of dataset: Dataset is expected to "
f"contain {existing_values.size} points but trying to "
f"add an amount of data that makes it contain {new_write_status} points. Cache will "
f"be flattened into a 1D array")
return (np.append(existing_values.flatten(),
new_values.flatten(), axis=0),
new_write_status)
else:
existing_values.ravel()[write_status:new_write_status] = new_values.ravel()
return existing_values, new_write_status
|
41,096 |
def get_parser():
"""Construct the parser."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--pretrained",
type=str,
help="Pretrained model."
)
parser.add_argument(
"--toolkit",
type=str,
help="Toolkit for Extracting X-vectors."
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="Verbosity level."
)
parser.add_argument(
"--device",
type=str,
default='cuda:0',
help="Verbosity level."
)
parser.add_argument(
"in_folder",
type=Path,
help="Path to the input data."
)
parser.add_argument(
"out_folder",
type=Path,
help="Output folder to save the xvectors.",
)
return parser
|
def get_parser():
"""Construct the parser."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--pretrained_model",
type=str,
help="Pretrained model."
)
parser.add_argument(
"--toolkit",
type=str,
help="Toolkit for Extracting X-vectors."
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="Verbosity level."
)
parser.add_argument(
"--device",
type=str,
default='cuda:0',
help="Verbosity level."
)
parser.add_argument(
"in_folder",
type=Path,
help="Path to the input data."
)
parser.add_argument(
"out_folder",
type=Path,
help="Output folder to save the xvectors.",
)
return parser
|
49,662 |
def evolve(inst, **changes):
"""
Create a new instance, based on *inst* with *changes* applied.
:param inst: Instance of a class with ``attrs`` attributes.
:param changes: Keyword changes in the new copy. Nested attrs classes ca
be updated by passing (nested) dicts of values.
:return: A copy of inst with *changes* incorporated.
:raise TypeError: If *attr_name* couldn't be found in the class
``__init__``.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
.. versionadded:: 17.1.0
"""
cls = inst.__class__
attrs = fields(cls)
for a in attrs:
if not a.init:
continue
attr_name = a.name # To deal with private attributes.
init_name = attr_name if attr_name[0] != "_" else attr_name[1:]
value = getattr(inst, attr_name)
if init_name not in changes:
# Add original value to changes
changes[init_name] = value
elif has(value):
# Evolve nested attrs classes
changes[init_name] = evolve(value, **changes[init_name])
return cls(**changes)
|
def evolve(inst, **changes):
"""
Create a new instance, based on *inst* with *changes* applied.
:param inst: Instance of a class with ``attrs`` attributes.
:param changes: Keyword changes in the new copy. Nested ``attrs`` classes
can be updated by passing (nested) dicts of values.
:return: A copy of inst with *changes* incorporated.
:raise TypeError: If *attr_name* couldn't be found in the class
``__init__``.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
.. versionadded:: 17.1.0
"""
cls = inst.__class__
attrs = fields(cls)
for a in attrs:
if not a.init:
continue
attr_name = a.name # To deal with private attributes.
init_name = attr_name if attr_name[0] != "_" else attr_name[1:]
value = getattr(inst, attr_name)
if init_name not in changes:
# Add original value to changes
changes[init_name] = value
elif has(value):
# Evolve nested attrs classes
changes[init_name] = evolve(value, **changes[init_name])
return cls(**changes)
|
31,352 |
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
''' EXECUTION '''
#LOG('command is %s' % (demisto.command(), ))
demisto.debug(f'Command being called is {demisto.command()}')
try:
LOG('Command being called is {command}'.format(command=demisto.command()))
if demisto.command() == 'Picus-GetAccessToken':
getAccessToken()
elif demisto.command() == 'Picus-Vector-Compare': # Makes a comparison of the given vector's results
token = getAccessToken()
demisto.results(vectorCompare(token))
elif demisto.command() == 'Picus-Attack-Result-List': # Returns the list of the attack results\nhave optional parameters for pagination and filtration
token = getAccessToken()
demisto.results(attackResultList(token))
elif demisto.command() == 'Picus-Specific-Threats-Results': # Returns the list of the attack results of a single threat\nhave optional
token = getAccessToken()
demisto.results(specificThreatsResults(token))
elif demisto.command() == 'Picus-Peer-List': # Returns the peer list with current statuses
token = getAccessToken()
demisto.results(peerList(token))
elif demisto.command() == 'Picus-EMail-Peer-List': # Returns the E-Mail peer list with current statuses
token = getAccessToken()
demisto.results(eMailPeerList(token))
elif demisto.command() == 'Picus-Attack-All-Vectors': # Schedules given attack on all possible vectors
token = getAccessToken()
demisto.results(attackAllVectors(token))
elif demisto.command() == 'Picus-Attack-Single': # Schedules a single attack on requested vector
token = getAccessToken()
demisto.results(attackSingle(token))
elif demisto.command() == 'Picus-Trigger-Update': # Triggers the update mechanism manually, returns if the update-command is taken successfully
token = getAccessToken()
demisto.results(triggerUpdate(token))
elif demisto.command() == 'Picus-Version': # Returns the current version and the update time config
token = getAccessToken()
demisto.results(version(token))
elif demisto.command() == 'Picus-Threat-List': # Returns the list of the threats\nhave optional parameters for pagination and filtration
token = getAccessToken()
demisto.results(threatList(token))
elif demisto.command() == 'Picus-Mitigation-List': # Returns the list of the mitigations of threats\nhave optional parameters for pagination and filtration, this route may not be used associated with your license
token = getAccessToken()
demisto.results(mitigationList(token))
elif demisto.command() == 'Picus-Mitre-Matrix': # Returns the mitre matrix metadata\ntakes no parameters
token = getAccessToken()
demisto.results(mitreMatrix(token))
elif demisto.command() == 'Picus-Sigma-Rules-List': # Returns the list of the sigma rules of scenario actions\nhave optional parameters for pagination and filtration, this route may not be used associated with your license
token = getAccessToken()
demisto.results(sigmaRulesList(token))
elif demisto.command() == 'Picus-Vector-List': # Returns the list of the vectors all disabled and enabled ones\nhave optional parameters for pagination
token = getAccessToken()
demisto.results(vectorList(token))
elif demisto.command() == 'test-module':
demisto.results(test_module())
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
''' EXECUTION '''
#LOG('command is %s' % (demisto.command(), ))
demisto.debug(f'Command being called is {demisto.command()}')
try:
LOG('Command being called is {command}'.format(command=demisto.command()))
if demisto.command() == 'Picus-GetAccessToken':
getAccessToken()
elif demisto.command() == 'Picus-Vector-Compare': # Makes a comparison of the given vector's results
token = getAccessToken()
demisto.results(vectorCompare(token))
elif demisto.command() == 'Picus-Attack-Result-List': # Returns the list of the attack results\nhave optional parameters for pagination and filtration
token = getAccessToken()
demisto.results(attackResultList(token))
elif demisto.command() == 'Picus-Specific-Threats-Results': # Returns the list of the attack results of a single threat\nhave optional
token = getAccessToken()
demisto.results(specificThreatsResults(token))
elif demisto.command() == 'Picus-Peer-List': # Returns the peer list with current statuses
token = getAccessToken()
demisto.results(peerList(token))
elif demisto.command() == 'Picus-EMail-Peer-List': # Returns the E-Mail peer list with current statuses
token = getAccessToken()
demisto.results(eMailPeerList(token))
elif demisto.command() == 'Picus-Attack-All-Vectors': # Schedules given attack on all possible vectors
token = getAccessToken()
demisto.results(attackAllVectors(token))
elif demisto.command() == 'Picus-Attack-Single': # Schedules a single attack on requested vector
token = getAccessToken()
demisto.results(attackSingle(token))
elif demisto.command() == 'Picus-Trigger-Update': # Triggers the update mechanism manually, returns if the update-command is taken successfully
token = getAccessToken()
demisto.results(triggerUpdate(token))
elif demisto.command() == 'Picus-Version': # Returns the current version and the update time config
token = getAccessToken()
demisto.results(version(token))
elif demisto.command() == 'picus-threat-list': # Returns the list of the threats\nhave optional parameters for pagination and filtration
token = getAccessToken()
demisto.results(threatList(token))
elif demisto.command() == 'Picus-Mitigation-List': # Returns the list of the mitigations of threats\nhave optional parameters for pagination and filtration, this route may not be used associated with your license
token = getAccessToken()
demisto.results(mitigationList(token))
elif demisto.command() == 'Picus-Mitre-Matrix': # Returns the mitre matrix metadata\ntakes no parameters
token = getAccessToken()
demisto.results(mitreMatrix(token))
elif demisto.command() == 'Picus-Sigma-Rules-List': # Returns the list of the sigma rules of scenario actions\nhave optional parameters for pagination and filtration, this route may not be used associated with your license
token = getAccessToken()
demisto.results(sigmaRulesList(token))
elif demisto.command() == 'Picus-Vector-List': # Returns the list of the vectors all disabled and enabled ones\nhave optional parameters for pagination
token = getAccessToken()
demisto.results(vectorList(token))
elif demisto.command() == 'test-module':
demisto.results(test_module())
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
7,271 |
def local_binary_pattern(image, P, R, method='default'):
"""Gray scale and rotation invariant LBP (Local Binary Patterns).
LBP is an invariant descriptor that can be used for texture classification.
Parameters
----------
image : (N, M) array
Graylevel image.
P : int
Number of circularly symmetric neighbour set points (quantization of
the angular space).
R : float
Radius of circle (spatial resolution of the operator).
method : {'default', 'ror', 'uniform', 'var'}
Method to determine the pattern:
``default``
Original local binary pattern which is gray scale but not
rotation invariant.
``ror``
Extension of default implementation which is gray scale and
rotation invariant.
``uniform``
Improved rotation invariance with uniform patterns and finer
quantization of the angular space which is gray scale and
rotation invariant.
``nri_uniform``
Non rotation-invariant uniform patterns variant which is
only gray scale invariant [2]_.
``var``
Rotation invariant variance measures of the contrast of local
image texture which is rotation but not gray scale invariant.
Returns
-------
output : (N, M) array
LBP image.
References
----------
.. [1] Multiresolution Gray-Scale and Rotation Invariant Texture
Classification with Local Binary Patterns.
Timo Ojala, Matti Pietikainen, Topi Maenpaa.
http://www.ee.oulu.fi/research/mvmp/mvg/files/pdf/pdf_94.pdf, 2002.
.. [2] Face recognition with local binary patterns.
Timo Ahonen, Abdenour Hadid, Matti Pietikainen,
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.214.6851,
2004.
"""
check_nD(image, 2)
methods = {
'default': ord('D'),
'ror': ord('R'),
'uniform': ord('U'),
'nri_uniform': ord('N'),
'var': ord('V')
}
image = np.ascontiguousarray(image, dtype=np.double)
output = _local_binary_pattern(image, P, R, methods[method.lower()])
return output
|
def local_binary_pattern(image, P, R, method='default'):
"""Gray scale and rotation invariant LBP (Local Binary Patterns).
LBP is an invariant descriptor that can be used for texture classification.
Parameters
----------
image : (N, M) array
Graylevel image.
P : int
Number of circularly symmetric neighbour set points (quantization of
the angular space).
R : float
Radius of circle (spatial resolution of the operator).
method : {'default', 'ror', 'uniform', 'var'}
Method to determine the pattern:
``default``
Original local binary pattern which is gray scale but not
rotation invariant.
``ror``
Extension of default implementation which is gray scale and
rotation invariant.
``uniform``
Improved rotation invariance with uniform patterns and finer
quantization of the angular space which is gray scale and
rotation invariant.
``nri_uniform``
Non rotation-invariant uniform patterns variant which is
only grayscale invariant [2]_.
``var``
Rotation invariant variance measures of the contrast of local
image texture which is rotation but not gray scale invariant.
Returns
-------
output : (N, M) array
LBP image.
References
----------
.. [1] Multiresolution Gray-Scale and Rotation Invariant Texture
Classification with Local Binary Patterns.
Timo Ojala, Matti Pietikainen, Topi Maenpaa.
http://www.ee.oulu.fi/research/mvmp/mvg/files/pdf/pdf_94.pdf, 2002.
.. [2] Face recognition with local binary patterns.
Timo Ahonen, Abdenour Hadid, Matti Pietikainen,
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.214.6851,
2004.
"""
check_nD(image, 2)
methods = {
'default': ord('D'),
'ror': ord('R'),
'uniform': ord('U'),
'nri_uniform': ord('N'),
'var': ord('V')
}
image = np.ascontiguousarray(image, dtype=np.double)
output = _local_binary_pattern(image, P, R, methods[method.lower()])
return output
|
41,687 |
def download_and_extract(
buildpath: Path, srcpath: Path, src_metadata: Dict[str, Any], args
) -> Path:
"""
Download the source from specified in the meta data, then checksum it, then
extract the archive into srcpath.
buildpath -- The path to the build directory. Generally will be
$(PYOIDE_ROOT)/packages/<package-name>/build/.
srcpath -- The place we want the source to end up. Will generally be
$(PYOIDE_ROOT)/packages/<package-name>/build/<package-name>-<package-version>.
pkg -- The dictionary from parsing meta.yaml.
"""
response = request.urlopen(src_metadata["url"])
_, parameters = cgi.parse_header(response.headers.get("Content-Disposition", ""))
if "filename" in parameters:
tarballname = parameters["filename"]
else:
tarballname = Path(response.geturl()).name
tarballpath = buildpath / tarballname
if not tarballpath.is_file():
os.makedirs(os.path.dirname(tarballpath), exist_ok=True)
with open(tarballpath, "wb") as f:
f.write(response.read())
try:
check_checksum(tarballpath, src_metadata)
except Exception:
tarballpath.unlink()
raise
if not srcpath.is_dir():
shutil.unpack_archive(str(tarballpath), str(buildpath))
extract_dir_name = src_metadata.get("extract_dir")
if not extract_dir_name:
extract_dir_name = trim_archive_extension(tarballname)
return buildpath / extract_dir_name
|
def download_and_extract(
buildpath: Path, srcpath: Path, src_metadata: Dict[str, Any], args
) -> Path:
"""
Download the source from specified in the meta data, then checksum it, then
extract the archive into srcpath.
buildpath -- The path to the build directory. Generally will be
$(PYOIDE_ROOT)/packages/<package-name>/build/.
srcpath -- The place we want the source to end up. Will generally be
$(PYOIDE_ROOT)/packages/<package-name>/build/<package-name>-<package-version>.
pkg -- The dictionary from parsing meta.yaml.
"""
response = request.urlopen(src_metadata["url"])
_, parameters = cgi.parse_header(response.headers.get("Content-Disposition", ""))
if "filename" in parameters:
tarballname = parameters["filename"]
else:
tarballname = Path(response.geturl()).name
tarballpath = buildpath / tarballname
if not tarballpath.is_file():
os.makedirs(tarballpath.parent, exist_ok=True)
with open(tarballpath, "wb") as f:
f.write(response.read())
try:
check_checksum(tarballpath, src_metadata)
except Exception:
tarballpath.unlink()
raise
if not srcpath.is_dir():
shutil.unpack_archive(str(tarballpath), str(buildpath))
extract_dir_name = src_metadata.get("extract_dir")
if not extract_dir_name:
extract_dir_name = trim_archive_extension(tarballname)
return buildpath / extract_dir_name
|
4,451 |
def test_foci_mapping(tmp_path):
"""Test mapping foci to the surface."""
tiny_brain, _ = tiny(tmp_path)
foci_coords = tiny_brain.geo['lh'].coords[:2] + 0.01
tiny_brain.add_foci(foci_coords, map_surface='white')
assert_array_equal(tiny_brain._data['lh']['foci'],
tiny_brain.geo['lh'].coords[:2])
|
def test_foci_mapping(tmp_path, renderer_interactive_pyvistaqt):
"""Test mapping foci to the surface."""
tiny_brain, _ = tiny(tmp_path)
foci_coords = tiny_brain.geo['lh'].coords[:2] + 0.01
tiny_brain.add_foci(foci_coords, map_surface='white')
assert_array_equal(tiny_brain._data['lh']['foci'],
tiny_brain.geo['lh'].coords[:2])
|
31,994 |
def create_time(given_time) -> str:
"""converts given argument time to iso format,
if received None returns None"""
if not given_time:
return given_time
datetime_time = arg_to_datetime(given_time)
if not datetime_time:
raise DemistoException("Time parameter supplied in invalid, please supply a valid argument")
return datetime_time.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
|
def create_time(given_time) -> str:
"""converts given argument time to iso format,
if received None returns None"""
if not given_time:
return given_time
datetime_time = arg_to_datetime(given_time)
if not datetime_time:
raise DemistoException("Time parameter supplied in invalid, please supply a valid argument")
return datetime_time.isoformat()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.