id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
2,428 |
def _check_feature_names_in(estimator, input_features=None, *, generate_names=True):
"""Check input_features and generate names if needed.
Commonly used in :term:`get_feature_names_out`.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then names are generated: `[x0, x1, ..., x(n_features_in_)]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
generate_names : bool, default=True
Wether to generate names when `input_features` is `None` and
`estimator.feature_names_in_` is not defined. This is useful for jransformers
validates `input_features` but does not require them in
:term:`get_feature_names_out` i.e. `PCA`.
Returns
-------
feature_names_in : ndarray of str or `None`
Feature names in.
"""
feature_names_in_ = getattr(estimator, "feature_names_in_", None)
n_features_in_ = getattr(estimator, "n_features_in_", None)
if input_features is not None:
input_features = np.asarray(input_features, dtype=object)
if feature_names_in_ is not None and not np.array_equal(
feature_names_in_, input_features
):
raise ValueError("input_features is not equal to feature_names_in_")
if n_features_in_ is not None and len(input_features) != n_features_in_:
raise ValueError(
"input_features should have length equal to number of "
f"features ({n_features_in_}), got {len(input_features)}"
)
return input_features
if feature_names_in_ is not None:
return feature_names_in_
if not generate_names:
return
# Generates feature names if `n_features_in_` is defined
if n_features_in_ is None:
raise ValueError("Unable to generate feature names without n_features_in_")
return np.asarray([f"x{i}" for i in range(n_features_in_)], dtype=object)
|
def _check_feature_names_in(estimator, input_features=None, *, generate_names=True):
"""Check input_features and generate names if needed.
Commonly used in :term:`get_feature_names_out`.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then names are generated: `[x0, x1, ..., x(n_features_in_)]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
generate_names : bool, default=True
Wether to generate names when `input_features` is `None` and
`estimator.feature_names_in_` is not defined. This is useful for transformers
validates `input_features` but does not require them in
:term:`get_feature_names_out` i.e. `PCA`.
Returns
-------
feature_names_in : ndarray of str or `None`
Feature names in.
"""
feature_names_in_ = getattr(estimator, "feature_names_in_", None)
n_features_in_ = getattr(estimator, "n_features_in_", None)
if input_features is not None:
input_features = np.asarray(input_features, dtype=object)
if feature_names_in_ is not None and not np.array_equal(
feature_names_in_, input_features
):
raise ValueError("input_features is not equal to feature_names_in_")
if n_features_in_ is not None and len(input_features) != n_features_in_:
raise ValueError(
"input_features should have length equal to number of "
f"features ({n_features_in_}), got {len(input_features)}"
)
return input_features
if feature_names_in_ is not None:
return feature_names_in_
if not generate_names:
return
# Generates feature names if `n_features_in_` is defined
if n_features_in_ is None:
raise ValueError("Unable to generate feature names without n_features_in_")
return np.asarray([f"x{i}" for i in range(n_features_in_)], dtype=object)
|
45,267 |
def train(
params: Dict,
dtrain: ModinDMatrix,
*args,
evals=(),
nthread: Optional[int] = cpu_count(),
evenly_data_distribution: Optional[bool] = True,
**kwargs,
):
"""
Train XGBoost model.
Parameters
----------
params : dict
Booster params.
dtrain : ModinDMatrix
Data to be trained.
nthread: int
Number of threads for using in each node. By default it is equal to
number of threads on master node.
evenly_data_distribution: boolean, default True
Whether make evenly distribution of partitions between nodes or not.
In case `False` minimal datatransfer between nodes will be provided
but the data may not be evenly distributed.
\\*\\*kwargs:
Other parameters are the same as `xgboost.train` except for
`evals_result`, which is returned as part of function return value
instead of argument.
Returns
-------
dict
A dictionary containing trained booster and evaluation history.
`history` field is the same as `eval_result` from `xgboost.train`.
.. code-block:: python
{'booster': xgboost.Booster,
'history': {'train': {'logloss': ['0.48253', '0.35953']},
'eval': {'logloss': ['0.480385', '0.357756']}}}
"""
LOGGER.info("Training started")
s = time.time()
X, y = dtrain
assert len(X) == len(y)
X_row_parts = unwrap_row_partitions(X, bind_ip=not evenly_data_distribution)
y_row_parts = unwrap_row_partitions(y, bind_ip=not evenly_data_distribution)
assert len(X_row_parts) == len(y_row_parts), "Unaligned train data"
# Create remote actors
actors = create_actors(nthread=nthread)
add_as_eval_method = None
if len(evals):
for (eval_data, method) in evals:
if id(eval_data) == id(dtrain):
add_as_eval_method = method
evals.remove((eval_data, method))
evals_unwrapped = [
(
(
unwrap_row_partitions(eval_X, bind_ip=not evenly_data_distribution),
unwrap_row_partitions(eval_y, bind_ip=not evenly_data_distribution),
eval_method,
)
)
for ((eval_X, eval_y), eval_method) in evals
]
for (
eval_X_row_parts,
eval_y_row_parts,
eval_method,
) in evals_unwrapped:
# Split data across workers
_split_data_across_actors(
actors,
lambda actor, *Xy: actor.add_eval_data.remote(
*Xy, eval_method=eval_method
),
eval_X_row_parts,
eval_y_row_parts,
evenly_data_distribution=evenly_data_distribution,
)
# Split data across workers
_split_data_across_actors(
actors,
lambda actor, *Xy: actor.set_train_data.remote(
*Xy, add_as_eval_method=add_as_eval_method
),
X_row_parts,
y_row_parts,
evenly_data_distribution=evenly_data_distribution,
)
LOGGER.info(f"Data preparation time: {time.time() - s} s")
s = time.time()
# Start Rabit tracker
env = _start_rabit_tracker(len(actors))
rabit_args = [("%s=%s" % item).encode() for item in env.items()]
# Train
fut = [actor.train.remote(rabit_args, params, *args, **kwargs) for actor in actors]
# All results should be the same because of Rabit tracking. So we just
# return the first one.
result = ray.get(fut[0])
LOGGER.info(f"Training time: {time.time() - s} s")
LOGGER.info("Training finished")
return result
|
def train(
params: Dict,
dtrain: ModinDMatrix,
*args,
evals=(),
nthread: Optional[int] = cpu_count(),
evenly_data_distribution: Optional[bool] = True,
**kwargs,
):
"""
Train XGBoost model.
Parameters
----------
params : dict
Booster params.
dtrain : ModinDMatrix
Data to be trained.
nthread: int
Number of threads for using in each node. By default it is equal to
number of threads on master node.
evenly_data_distribution: boolean, default True
Whether make evenly distribution of partitions between nodes or not.
In case `False` minimal datatransfer between nodes will be provided
but the data may not be evenly distributed.
\\*\\*kwargs:
Other parameters are the same as `xgboost.train` except for
`evals_result`, which is returned as part of function return value
instead of argument.
Returns
-------
dict
A dictionary containing trained booster and evaluation history.
`history` field is the same as `eval_result` from `xgboost.train`.
.. code-block:: python
{'booster': xgboost.Booster,
'history': {'train': {'logloss': ['0.48253', '0.35953']},
'eval': {'logloss': ['0.480385', '0.357756']}}}
"""
LOGGER.info("Training started")
s = time.time()
X, y = dtrain
assert len(X) == len(y)
X_row_parts = unwrap_row_partitions(X, bind_ip=not evenly_data_distribution)
y_row_parts = unwrap_row_partitions(y, bind_ip=not evenly_data_distribution)
assert len(X_row_parts) == len(y_row_parts), "Unaligned train data"
# Create remote actors
actors = create_actors(nthread=nthread)
add_as_eval_method = None
if evals:
for (eval_data, method) in evals:
if id(eval_data) == id(dtrain):
add_as_eval_method = method
evals.remove((eval_data, method))
evals_unwrapped = [
(
(
unwrap_row_partitions(eval_X, bind_ip=not evenly_data_distribution),
unwrap_row_partitions(eval_y, bind_ip=not evenly_data_distribution),
eval_method,
)
)
for ((eval_X, eval_y), eval_method) in evals
]
for (
eval_X_row_parts,
eval_y_row_parts,
eval_method,
) in evals_unwrapped:
# Split data across workers
_split_data_across_actors(
actors,
lambda actor, *Xy: actor.add_eval_data.remote(
*Xy, eval_method=eval_method
),
eval_X_row_parts,
eval_y_row_parts,
evenly_data_distribution=evenly_data_distribution,
)
# Split data across workers
_split_data_across_actors(
actors,
lambda actor, *Xy: actor.set_train_data.remote(
*Xy, add_as_eval_method=add_as_eval_method
),
X_row_parts,
y_row_parts,
evenly_data_distribution=evenly_data_distribution,
)
LOGGER.info(f"Data preparation time: {time.time() - s} s")
s = time.time()
# Start Rabit tracker
env = _start_rabit_tracker(len(actors))
rabit_args = [("%s=%s" % item).encode() for item in env.items()]
# Train
fut = [actor.train.remote(rabit_args, params, *args, **kwargs) for actor in actors]
# All results should be the same because of Rabit tracking. So we just
# return the first one.
result = ray.get(fut[0])
LOGGER.info(f"Training time: {time.time() - s} s")
LOGGER.info("Training finished")
return result
|
44,016 |
def _reconstruct_gen(fun, spectrum, shifts=None, fun_at_zero=None):
r"""Reconstruct a univariate (real-valued) Fourier series with given spectrum.
Args:
fun (callable): Fourier series to reconstruct with signature ``float -> float``
spectrum (Sequence): Frequency spectrum of the Fourier series; non-positive
frequencies are ignored
shifts (list): Shift angles at which to evaluate ``fun`` for the reconstruction
Chosen equidistantly within the interval :math:`[0, 2\pi/f_\text{min}]` if ``shifts=None``
where :math:`f_\text{min}` is the smallest frequency in ``spectrum``.
fun_at_zero (float): Value of ``fun`` at zero. If :math:`0` is among the ``shifts``
and ``fun_at_zero`` is provided, one evaluation of ``fun`` is saved.
Returns:
callable: Reconstructed Fourier series with :math:`R` frequencies in ``spectrum``,
as ``qml.numpy`` based function and coinciding with ``fun`` on :math:`2R+1` points.
"""
# pylint: disable=unused-argument
have_fun_at_zero = fun_at_zero is not None
have_shifts = shifts is not None
# For an empty/trivial spectrum, the function simply is constant
if spectrum in ([], [0.0]):
if have_shifts:
fun_value = fun(shifts[0])
if have_fun_at_zero:
warnings.warn(_warn_text_fun_at_zero_ignored)
else:
fun_value = fun_at_zero if have_fun_at_zero else fun(0.0)
def constant_fn(x):
"""Univariate reconstruction of a constant Fourier series."""
return fun_value
return constant_fn
spectrum = np.array([f for f in spectrum if f > 0.0])
f_max = max(spectrum)
# If no shifts are provided, choose equidistant ones
if not have_shifts:
R = len(spectrum)
shifts = np.arange(-R, R + 1) * 2 * np.pi / (f_max * (2 * R + 1)) * R
zero_idx = R
need_fun_at_zero = True
elif have_fun_at_zero:
zero_idx = np.where(np.isclose(shifts, 0.0))[0]
zero_idx = zero_idx[0] if len(zero_idx) > 0 else None
need_fun_at_zero = zero_idx is not None
# Take care of shifts close to zero if fun_at_zero was provided
if have_fun_at_zero and need_fun_at_zero:
# Only one shift may be zero at a time
shifts = np.concatenate([[shifts[zero_idx]], shifts[:zero_idx], shifts[zero_idx + 1 :]])
evals = np.array([fun_at_zero] + list(map(fun, shifts[1:])))
else:
if have_fun_at_zero and not need_fun_at_zero:
warnings.warn(_warn_text_fun_at_zero_ignored)
evals = np.array(list(map(fun, shifts)))
L = len(shifts)
# Construct the coefficient matrix case by case
C1 = np.ones((L, 1))
C2 = np.cos(np.outer(shifts, spectrum))
C3 = np.sin(np.outer(shifts, spectrum))
C = np.hstack([C1, C2, C3])
# Solve the system of linear equations
cond = np.linalg.cond(C)
if cond > 1e8:
warnings.warn(
f"The condition number of the Fourier transform matrix is very large: {cond}.",
UserWarning,
)
W = np.linalg.solve(C, evals)
# Extract the Fourier coefficients
R = (L - 1) // 2
a0 = W[0]
a = W[1 : R + 1]
b = W[R + 1 :]
# Construct the Fourier series
def _reconstruction(x):
"""Univariate reconstruction based on arbitrary shifts."""
return a0 + np.dot(a, np.cos(spectrum * x)) + np.dot(b, np.sin(spectrum * x))
return _reconstruction
|
def _reconstruct_gen(fun, spectrum, shifts=None, fun_at_zero=None):
r"""Reconstruct a univariate (real-valued) Fourier series with given spectrum.
Args:
fun (callable): Fourier series to reconstruct with signature ``float -> float``
spectrum (Sequence): Frequency spectrum of the Fourier series; non-positive
frequencies are ignored
shifts (list): Shift angles at which to evaluate ``fun`` for the reconstruction
Chosen equidistantly within the interval :math:`[0, 2\pi/f_\text{min}]` if ``shifts=None``
where :math:`f_\text{min}` is the smallest frequency in ``spectrum``.
fun_at_zero (float): Value of ``fun`` at zero. If :math:`0` is among the ``shifts``
and ``fun_at_zero`` is provided, one evaluation of ``fun`` is saved.
Returns:
callable: Reconstructed Fourier series with :math:`R` frequencies in ``spectrum``,
as ``qml.numpy`` based function and coinciding with ``fun`` on :math:`2R+1` points.
"""
# pylint: disable=unused-argument
have_fun_at_zero = fun_at_zero is not None
have_shifts = shifts is not None
# For an empty/trivial spectrum, the function simply is constant
if spectrum in ([], [0.0]):
if have_shifts:
fun_value = fun(shifts[0])
if have_fun_at_zero:
warnings.warn(_warn_text_fun_at_zero_ignored)
else:
fun_value = fun_at_zero if have_fun_at_zero else fun(0.0)
def constant_fn(x):
"""Univariate reconstruction of a constant Fourier series."""
return fun_value
return constant_fn
spectrum = spectrum[spectrum > 0.0]
f_max = max(spectrum)
# If no shifts are provided, choose equidistant ones
if not have_shifts:
R = len(spectrum)
shifts = np.arange(-R, R + 1) * 2 * np.pi / (f_max * (2 * R + 1)) * R
zero_idx = R
need_fun_at_zero = True
elif have_fun_at_zero:
zero_idx = np.where(np.isclose(shifts, 0.0))[0]
zero_idx = zero_idx[0] if len(zero_idx) > 0 else None
need_fun_at_zero = zero_idx is not None
# Take care of shifts close to zero if fun_at_zero was provided
if have_fun_at_zero and need_fun_at_zero:
# Only one shift may be zero at a time
shifts = np.concatenate([[shifts[zero_idx]], shifts[:zero_idx], shifts[zero_idx + 1 :]])
evals = np.array([fun_at_zero] + list(map(fun, shifts[1:])))
else:
if have_fun_at_zero and not need_fun_at_zero:
warnings.warn(_warn_text_fun_at_zero_ignored)
evals = np.array(list(map(fun, shifts)))
L = len(shifts)
# Construct the coefficient matrix case by case
C1 = np.ones((L, 1))
C2 = np.cos(np.outer(shifts, spectrum))
C3 = np.sin(np.outer(shifts, spectrum))
C = np.hstack([C1, C2, C3])
# Solve the system of linear equations
cond = np.linalg.cond(C)
if cond > 1e8:
warnings.warn(
f"The condition number of the Fourier transform matrix is very large: {cond}.",
UserWarning,
)
W = np.linalg.solve(C, evals)
# Extract the Fourier coefficients
R = (L - 1) // 2
a0 = W[0]
a = W[1 : R + 1]
b = W[R + 1 :]
# Construct the Fourier series
def _reconstruction(x):
"""Univariate reconstruction based on arbitrary shifts."""
return a0 + np.dot(a, np.cos(spectrum * x)) + np.dot(b, np.sin(spectrum * x))
return _reconstruction
|
22,342 |
def seconds_to_str(value):
"""Convert seconds to a simple simple string describing the amount of time."""
mins, secs = divmod(value, 60)
hours, mins = divmod(mins, 60)
if value < 60:
return "%s seconds" % secs
elif value < 3600:
return "%s minutes" % mins
else:
return "%s hours and %s minutes" % (hours, mins)
|
def seconds_to_str(value):
"""Convert seconds to a simple simple string describing the amount of time."""
mins, secs = divmod(value, 60)
hours, mins = divmod(mins, 60)
if value < 60:
return f"{secs} seconds"
elif value < 3600:
return "%s minutes" % mins
else:
return "%s hours and %s minutes" % (hours, mins)
|
36,574 |
def literal_eval(node_or_string):
"""
Safely evaluate an expression node or a string containing a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, bytes, numbers, tuples, lists, dicts,
sets, booleans, and None.
"""
if isinstance(node_or_string, str):
# bpo-41887: Allow leading whitespace
node_or_string = parse(node_or_string.strip(" \t"), mode='eval')
if isinstance(node_or_string, Expression):
node_or_string = node_or_string.body
def _raise_malformed_node(node):
raise ValueError(f'malformed node or string: {node!r}')
def _convert_num(node):
if not isinstance(node, Constant) or type(node.value) not in (int, float, complex):
_raise_malformed_node(node)
return node.value
def _convert_signed_num(node):
if isinstance(node, UnaryOp) and isinstance(node.op, (UAdd, USub)):
operand = _convert_num(node.operand)
if isinstance(node.op, UAdd):
return + operand
else:
return - operand
return _convert_num(node)
def _convert(node):
if isinstance(node, Constant):
return node.value
elif isinstance(node, Tuple):
return tuple(map(_convert, node.elts))
elif isinstance(node, List):
return list(map(_convert, node.elts))
elif isinstance(node, Set):
return set(map(_convert, node.elts))
elif (isinstance(node, Call) and isinstance(node.func, Name) and
node.func.id == 'set' and node.args == node.keywords == []):
return set()
elif isinstance(node, Dict):
if len(node.keys) != len(node.values):
_raise_malformed_node(node)
return dict(zip(map(_convert, node.keys),
map(_convert, node.values)))
elif isinstance(node, BinOp) and isinstance(node.op, (Add, Sub)):
left = _convert_signed_num(node.left)
right = _convert_num(node.right)
if isinstance(left, (int, float)) and isinstance(right, complex):
if isinstance(node.op, Add):
return left + right
else:
return left - right
return _convert_signed_num(node)
return _convert(node_or_string)
|
def literal_eval(node_or_string):
"""
Safely evaluate an expression node or a string containing a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, bytes, numbers, tuples, lists, dicts,
sets, booleans, and None.
"""
if isinstance(node_or_string, str):
# bpo-41887: Allow leading whitespace
node_or_string = parse(node_or_string.lstrip(" \t"), mode='eval')
if isinstance(node_or_string, Expression):
node_or_string = node_or_string.body
def _raise_malformed_node(node):
raise ValueError(f'malformed node or string: {node!r}')
def _convert_num(node):
if not isinstance(node, Constant) or type(node.value) not in (int, float, complex):
_raise_malformed_node(node)
return node.value
def _convert_signed_num(node):
if isinstance(node, UnaryOp) and isinstance(node.op, (UAdd, USub)):
operand = _convert_num(node.operand)
if isinstance(node.op, UAdd):
return + operand
else:
return - operand
return _convert_num(node)
def _convert(node):
if isinstance(node, Constant):
return node.value
elif isinstance(node, Tuple):
return tuple(map(_convert, node.elts))
elif isinstance(node, List):
return list(map(_convert, node.elts))
elif isinstance(node, Set):
return set(map(_convert, node.elts))
elif (isinstance(node, Call) and isinstance(node.func, Name) and
node.func.id == 'set' and node.args == node.keywords == []):
return set()
elif isinstance(node, Dict):
if len(node.keys) != len(node.values):
_raise_malformed_node(node)
return dict(zip(map(_convert, node.keys),
map(_convert, node.values)))
elif isinstance(node, BinOp) and isinstance(node.op, (Add, Sub)):
left = _convert_signed_num(node.left)
right = _convert_num(node.right)
if isinstance(left, (int, float)) and isinstance(right, complex):
if isinstance(node.op, Add):
return left + right
else:
return left - right
return _convert_signed_num(node)
return _convert(node_or_string)
|
55,959 |
def parse_args():
parser = argparse.ArgumentParser(description="Finetune a transformers model on a Question Answering task")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--train_file", type=str, default=None, help="A csv or a json file containing the training data."
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument(
"--do_train", action="store_true", default=True, required=True, help="Train the question answering model"
)
parser.add_argument(
"--do_eval", action="store_true", default=True, required=True, help="Eval the question answering model"
)
parser.add_argument(
"--do_predict", action="store_false", default=False, required=False, help="Eval the question answering model"
)
parser.add_argument(
"--validation_file", type=str, default=None, help="A csv or a json file containing the validation data."
)
parser.add_argument(
"--max_seq_length",
type=int,
default=384,
help="The maximum total input sequence length after tokenization. Sequences longer than this will be truncated,"
" sequences shorter will be padded if `--pad_to_max_lengh` is passed.",
)
parser.add_argument(
"--pad_to_max_length",
action="store_true",
help="If passed, pad all samples to `max_seq_length`. Otherwise, dynamic padding is used.",
)
parser.add_argument(
"--model_name_or_path",
type=str,
help="Path to pretrained model or model identifier from huggingface.co/models.",
required=True,
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=8,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=8,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.")
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument(
"--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument(
"--doc_stride",
type=int,
default=128,
help="When splitting up a long document into chunks how much stride to take between chunks.",
)
parser.add_argument(
"--n_best_size",
type=int,
default=20,
help="The total number of n-best predictions to generate when looking for an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="The threshold used to select the null answer: if the best answer has a score that is less than "
"the score of the null answer minus this threshold, the null answer is selected for this example. "
"Only useful when `version_2_with_negative=True`.",
)
parser.add_argument(
"--version_2_with_negative",
type=bool,
default=False,
help="If true, some of the examples do not have an answer.",
)
parser.add_argument(
"--max_answer_length",
type=int,
default=30,
help="The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another.",
)
parser.add_argument(
"--max_train_samples",
type=int,
default=None,
help="For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set.",
)
parser.add_argument(
"--max_val_samples",
type=int,
default=None,
help="For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set.",
)
parser.add_argument(
"--overwrite_cache", type=bool, default=False, help="Overwrite the cached training and evaluation sets"
)
parser.add_argument(
"--max_test_samples",
type=int,
default=None,
help="For debugging purposes or quicker training, truncate the number of test examples to this",
)
args = parser.parse_args()
# Sanity checks
if args.dataset_name is None and args.train_file is None and args.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if args.train_file is not None:
extension = args.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if args.validation_file is not None:
extension = args.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
return args
|
def parse_args():
parser = argparse.ArgumentParser(description="Finetune a transformers model on a Question Answering task")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--train_file", type=str, default=None, help="A csv or a json file containing the training data."
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument(
"--do_train", action="store_true", default=True, required=True, help="Train the question answering model"
)
parser.add_argument(
"--do_eval", action="store_true", default=True, required=True, help="Eval the question answering model"
)
parser.add_argument(
"--do_predict", action="store_false", help="Eval the question answering model"
)
parser.add_argument(
"--validation_file", type=str, default=None, help="A csv or a json file containing the validation data."
)
parser.add_argument(
"--max_seq_length",
type=int,
default=384,
help="The maximum total input sequence length after tokenization. Sequences longer than this will be truncated,"
" sequences shorter will be padded if `--pad_to_max_lengh` is passed.",
)
parser.add_argument(
"--pad_to_max_length",
action="store_true",
help="If passed, pad all samples to `max_seq_length`. Otherwise, dynamic padding is used.",
)
parser.add_argument(
"--model_name_or_path",
type=str,
help="Path to pretrained model or model identifier from huggingface.co/models.",
required=True,
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=8,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=8,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.")
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument(
"--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument(
"--doc_stride",
type=int,
default=128,
help="When splitting up a long document into chunks how much stride to take between chunks.",
)
parser.add_argument(
"--n_best_size",
type=int,
default=20,
help="The total number of n-best predictions to generate when looking for an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="The threshold used to select the null answer: if the best answer has a score that is less than "
"the score of the null answer minus this threshold, the null answer is selected for this example. "
"Only useful when `version_2_with_negative=True`.",
)
parser.add_argument(
"--version_2_with_negative",
type=bool,
default=False,
help="If true, some of the examples do not have an answer.",
)
parser.add_argument(
"--max_answer_length",
type=int,
default=30,
help="The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another.",
)
parser.add_argument(
"--max_train_samples",
type=int,
default=None,
help="For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set.",
)
parser.add_argument(
"--max_val_samples",
type=int,
default=None,
help="For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set.",
)
parser.add_argument(
"--overwrite_cache", type=bool, default=False, help="Overwrite the cached training and evaluation sets"
)
parser.add_argument(
"--max_test_samples",
type=int,
default=None,
help="For debugging purposes or quicker training, truncate the number of test examples to this",
)
args = parser.parse_args()
# Sanity checks
if args.dataset_name is None and args.train_file is None and args.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if args.train_file is not None:
extension = args.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if args.validation_file is not None:
extension = args.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
return args
|
55,950 |
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)
if "validation" not in datasets.keys():
datasets["validation"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]",
)
datasets["train"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]",
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = (
data_args.train_file.split(".")[-1]
if data_args.train_file is not None
else data_args.validation_file.split(".")[-1]
)
if extension == "txt":
extension = "text"
datasets = load_dataset(extension, data_files=data_files)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if model_args.model_name_or_path:
model = AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForCausalLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
column_names = datasets["train"].column_names
else:
column_names = datasets["validation"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
def tokenize_function(examples):
return tokenizer(examples[text_column_name])
if data_args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > 1024:
logger.warn(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --block_size xxx."
)
block_size = 1024
else:
if data_args.block_size > tokenizer.model_max_length:
logger.warn(
f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(data_args.block_size, tokenizer.model_max_length)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
def preprocess_function(examples):
examples = tokenizer(examples[text_column_name])
return group_texts(examples)
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
# for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
# to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
if training_args.do_train:
if "train" not in datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
remove_columns=column_names,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
if training_args.do_eval:
if "validation" not in datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = datasets["validation"]
if data_args.max_val_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
eval_dataset = eval_dataset.map(
preprocess_function,
batched=True,
remove_columns=column_names,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
# Data collator will default to DataCollatorWithPadding, so we change it.
data_collator=default_data_collator,
)
# Training
if training_args.do_train:
if last_checkpoint is not None:
checkpoint = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path):
checkpoint = model_args.model_name_or_path
else:
checkpoint = None
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_val_samples, len(eval_dataset))
perplexity = math.exp(metrics["eval_loss"])
metrics["perplexity"] = perplexity
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
|
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)
if "validation" not in datasets.keys():
datasets["validation"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]",
)
datasets["train"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]",
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = (
data_args.train_file.split(".")[-1]
if data_args.train_file is not None
else data_args.validation_file.split(".")[-1]
)
if extension == "txt":
extension = "text"
datasets = load_dataset(extension, data_files=data_files)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if model_args.model_name_or_path:
model = AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForCausalLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
column_names = datasets["train"].column_names
else:
column_names = datasets["validation"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
def tokenize_function(examples):
return tokenizer(examples[text_column_name])
if data_args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > 1024:
logger.warn(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --block_size xxx."
)
block_size = 1024
else:
if data_args.block_size > tokenizer.model_max_length:
logger.warn(
f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(data_args.block_size, tokenizer.model_max_length)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
def preprocess_function(examples):
examples = tokenizer(examples[text_column_name])
return group_texts(examples)
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
# for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
# to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
if training_args.do_train:
if "train" not in datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
remove_columns=column_names,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
if training_args.do_eval:
if "validation" not in datasets:
raise ValueError("--do_eval requires a validation dataset")
if training_args.do_eval:
if data_args.max_val_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
eval_dataset = eval_dataset.map(
preprocess_function,
batched=True,
remove_columns=column_names,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
# Data collator will default to DataCollatorWithPadding, so we change it.
data_collator=default_data_collator,
)
# Training
if training_args.do_train:
if last_checkpoint is not None:
checkpoint = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path):
checkpoint = model_args.model_name_or_path
else:
checkpoint = None
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_val_samples, len(eval_dataset))
perplexity = math.exp(metrics["eval_loss"])
metrics["perplexity"] = perplexity
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
|
10,423 |
def list_deprecations(argument_spec, params, prefix=''):
"""Return a list of deprecations
:arg argument_spec: An argument spec dictionary from a module
:arg params: Dictionary of all module parameters
:returns: List of dictionaries containing a message and version in which
the deprecated parameter will be removed, or an empty list::
[{'msg': "Param 'deptest' is deprecated. See the module docs for more information", 'version': '2.9'}]
"""
deprecations = []
for arg_name, arg_opts in argument_spec.items():
if arg_name in params:
if arg_opts.get('removed_in_version') is not None:
deprecations.append({
'msg': "Param '%s' is deprecated. See the module docs for more information" % (prefix + arg_name),
'version': arg_opts.get('removed_in_version')
})
# Check sub-argument spec
sub_argument_spec = arg_opts.get('options')
if sub_argument_spec is not None:
sub_arguments = params[arg_name]
if isinstance(sub_arguments, Mapping):
sub_arguments = [sub_arguments]
if isinstance(sub_arguments, list):
sub_prefix = prefix + arg_name + "' -> '"
for sub_params in sub_arguments:
if isinstance(sub_params, Mapping):
deprecations.extend(list_deprecations(sub_argument_spec, sub_params, prefix=sub_prefix))
return deprecations
|
def list_deprecations(argument_spec, params, prefix=''):
"""Return a list of deprecations
:arg argument_spec: An argument spec dictionary from a module
:arg params: Dictionary of all module parameters
:returns: List of dictionaries containing a message and version in which
the deprecated parameter will be removed, or an empty list::
[{'msg': "Param 'deptest' is deprecated. See the module docs for more information", 'version': '2.9'}]
"""
deprecations = []
for arg_name, arg_opts in argument_spec.items():
if arg_name in params:
if arg_opts.get('removed_in_version') is not None:
deprecations.append({
'msg': "Param '%s' is deprecated. See the module docs for more information" % (prefix + arg_name),
'version': arg_opts.get('removed_in_version')
})
# Check sub-argument spec
sub_argument_spec = arg_opts.get('options')
if sub_argument_spec is not None:
sub_arguments = params[arg_name]
if isinstance(sub_arguments, Mapping):
sub_arguments = [sub_arguments]
if isinstance(sub_arguments, list):
sub_prefix = "%s%s ->" % (prefix, arg_name)
for sub_params in sub_arguments:
if isinstance(sub_params, Mapping):
deprecations.extend(list_deprecations(sub_argument_spec, sub_params, prefix=sub_prefix))
return deprecations
|
36,329 |
def test_uuid_field(app):
field = doc.OAPIUUID()
assert field.serialize() == {"type": "string", "format": "uuid"}
@app.get("/<id:uuid>")
@doc.response(204, {})
def test(request):
return HTTPResponse(status=204)
_, response = app.test_client.get("/swagger/swagger.json")
assert response.status == 200
assert response.content_type == "application/json"
swagger_json = response.json
path = swagger_json["paths"]["/{id}"]["get"]
assert path["parameters"][0] == {
"in": "path",
"name": "id",
"type": "string",
"format": "uuid",
"required": True,
}
@app.get("/")
@doc.consumes(field, location="formData", required=True)
@doc.response(204, {})
def test(request):
return HTTPResponse(status=204)
_, response = app.test_client.get("/swagger/swagger.json")
assert response.status == 200
assert response.content_type == "application/json"
swagger_json = response.json
path = swagger_json["paths"]["/"]["get"]
assert path["parameters"][0] == {
"in": "formData",
"name": None,
"type": "string",
"format": "uuid",
"required": True,
}
|
def test_uuid_field(app):
field = doc.UUID()
assert field.serialize() == {"type": "string", "format": "uuid"}
@app.get("/<id:uuid>")
@doc.response(204, {})
def test(request):
return HTTPResponse(status=204)
_, response = app.test_client.get("/swagger/swagger.json")
assert response.status == 200
assert response.content_type == "application/json"
swagger_json = response.json
path = swagger_json["paths"]["/{id}"]["get"]
assert path["parameters"][0] == {
"in": "path",
"name": "id",
"type": "string",
"format": "uuid",
"required": True,
}
@app.get("/")
@doc.consumes(field, location="formData", required=True)
@doc.response(204, {})
def test(request):
return HTTPResponse(status=204)
_, response = app.test_client.get("/swagger/swagger.json")
assert response.status == 200
assert response.content_type == "application/json"
swagger_json = response.json
path = swagger_json["paths"]["/"]["get"]
assert path["parameters"][0] == {
"in": "formData",
"name": None,
"type": "string",
"format": "uuid",
"required": True,
}
|
2,474 |
def generate_data(case):
"""Generate regression/classification data."""
if case == "regression":
X, y = datasets.load_diabetes(return_X_y=True)
train_size = 0.8
elif case == "classification":
X, y = datasets.fetch_20newsgroups_vectorized(subset="all", return_X_y=True)
train_size = 0.4
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=train_size, random_state=0
)
data = {"X_train": X_train, "X_test": X_test, "y_train": y_train, "y_test": y_test}
return data
|
def generate_data(case):
"""Generate regression/classification data."""
if case == "regression":
X, y = datasets.load_diabetes(return_X_y=True)
train_size = 0.8
elif case == "classification":
X, y = datasets.fetch_20newsgroups_vectorized(subset="all", return_X_y=True)
train_size = 0.4 # to make the example run faster
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=train_size, random_state=0
)
data = {"X_train": X_train, "X_test": X_test, "y_train": y_train, "y_test": y_test}
return data
|
58,235 |
def get_containter_id():
# type: () -> str
container_info = get_container_info()
if container_info and container_info.container_id:
return container_info.container_id
return ""
|
def get_containter_id():
# type: () -> str
container_info = get_container_info()
if container_info:
return container_info.container_id or ""
return ""
|
6,453 |
def update_qty_columns(row_to_update, data_row):
fields = ["qty", "ordered_qty", "received_qty", "pending_qty", "qty_to_order"]
for field in fields:
row_to_update[field] += flt(data_row[field])
|
def update_qty_columns(row_to_update, data_row):
fields = ["qty", "ordered_qty", "received_qty", "qty_to_receive", "qty_to_order"]
for field in fields:
row_to_update[field] += flt(data_row[field])
|
41,536 |
def dice_score(im1, im2, eps=1.0):
"""
Computes the Dice coefficient between im1 and im2.
"""
im1 = np.asarray(im1).astype(np.bool)
im2 = np.asarray(im2).astype(np.bool)
if im1.shape != im2.shape:
raise ValueError("Shape mismatch: im1 and im2 must have the same shape.")
im_sum = im1.sum() + im2.sum() + eps
intersection = np.logical_and(im1, im2)
return 2. * intersection.sum() / im_sum
|
def dice_score(im1, im2, eps=1.0):
"""
Computes the Dice coefficient between im1 and im2.
"""
im1 = np.asarray(im1).astype(np.bool)
im2 = np.asarray(im2).astype(np.bool)
if im1.shape != im2.shape:
raise ValueError("Shape mismatch: im1 and im2 must have the same shape.")
im_sum = im1.sum() + im2.sum() + eps
intersection = np.logical_and(im1, im2)
return (2. * intersection.sum() + eps)/ im_sum
|
40,548 |
def get_appid():
return f"azure-cli-ext/{CLI_REPORTED_VERSION}"
|
def get_appid():
return f"az-cli-ext/{CLI_REPORTED_VERSION}"
|
7,269 |
def local_binary_pattern(image, P, R, method='default'):
"""Gray scale and rotation invariant LBP (Local Binary Patterns).
LBP is an invariant descriptor that can be used for texture classification.
Parameters
----------
image : (N, M) array
Graylevel image.
P : int
Number of circularly symmetric neighbour set points (quantization of
the angular space).
R : float
Radius of circle (spatial resolution of the operator).
method : {'default', 'ror', 'uniform', 'var'}
Method to determine the pattern:
``default``
Original local binary pattern which is gray scale but not
rotation invariant.
``ror``
Extension of default implementation which is gray scale and
rotation invariant.
``uniform``
Improved rotation invariance with uniform patterns and finer
quantization of the angular space which is gray scale and
rotation invariant.
``nri_uniform``
Non rotation-invariant uniform patterns variant which is
only gray scale invariant [2]_.
``var``
Rotation invariant variance measures of the contrast of local
image texture which is rotation but not gray scale invariant.
Returns
-------
output : (N, M) array
LBP image.
References
----------
.. [1] Multiresolution Gray-Scale and Rotation Invariant Texture
Classification with Local Binary Patterns.
Timo Ojala, Matti Pietikainen, Topi Maenpaa.
http://www.ee.oulu.fi/research/mvmp/mvg/files/pdf/pdf_94.pdf, 2002.
.. [2] Face recognition with local binary patterns.
Timo Ahonen, Abdenour Hadid, Matti Pietikainen,
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.214.6851,
2004.
"""
check_nD(image, 2)
methods = {
'default': ord('D'),
'ror': ord('R'),
'uniform': ord('U'),
'nri_uniform': ord('N'),
'var': ord('V')
}
image = np.ascontiguousarray(image, dtype=np.double)
output = _local_binary_pattern(image, P, R, methods[method.lower()])
return output
|
def local_binary_pattern(image, P, R, method='default'):
"""Gray scale and rotation invariant LBP (Local Binary Patterns).
LBP is an invariant descriptor that can be used for texture classification.
Parameters
----------
image : (N, M) array
Graylevel image.
P : int
Number of circularly symmetric neighbour set points (quantization of
the angular space).
R : float
Radius of circle (spatial resolution of the operator).
method : {'default', 'ror', 'uniform', 'var'}
Method to determine the pattern:
``default``
Original local binary pattern which is gray scale but not
rotation invariant.
``ror``
Extension of default implementation which is grayscale and
rotation invariant.
``uniform``
Improved rotation invariance with uniform patterns and finer
quantization of the angular space which is gray scale and
rotation invariant.
``nri_uniform``
Non rotation-invariant uniform patterns variant which is
only gray scale invariant [2]_.
``var``
Rotation invariant variance measures of the contrast of local
image texture which is rotation but not gray scale invariant.
Returns
-------
output : (N, M) array
LBP image.
References
----------
.. [1] Multiresolution Gray-Scale and Rotation Invariant Texture
Classification with Local Binary Patterns.
Timo Ojala, Matti Pietikainen, Topi Maenpaa.
http://www.ee.oulu.fi/research/mvmp/mvg/files/pdf/pdf_94.pdf, 2002.
.. [2] Face recognition with local binary patterns.
Timo Ahonen, Abdenour Hadid, Matti Pietikainen,
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.214.6851,
2004.
"""
check_nD(image, 2)
methods = {
'default': ord('D'),
'ror': ord('R'),
'uniform': ord('U'),
'nri_uniform': ord('N'),
'var': ord('V')
}
image = np.ascontiguousarray(image, dtype=np.double)
output = _local_binary_pattern(image, P, R, methods[method.lower()])
return output
|
51,535 |
def _form_loopy_kernel(kernel_domains, instructions, measure, args, **kwargs):
kargs = []
for var, (func, intent) in args.items():
if isinstance(func, constant.Constant):
if intent is not READ:
raise RuntimeError("Only READ access is allowed to Constant")
# Constants modelled as Globals, so no need for double
# indirection
ndof = func.dat.cdim
kargs.append(loopy.GlobalArg(var, dtype=func.dat.dtype, shape=(ndof,)))
else:
# Do we have a component of a mixed function?
if isinstance(func, Indexed):
c, i = func.ufl_operands
idx = i._indices[0]._value
ndof = c.function_space()[idx].finat_element.space_dimension()
cdim = c.dat[idx].cdim
dtype = c.dat[idx].dtype
else:
if func.function_space().ufl_element().family() == "Real":
ndof = func.function_space().dim() # == 1
kargs.append(loopy.GlobalArg(var, dtype=func.dat.dtype, shape=(ndof,)))
continue
else:
if len(func.function_space()) > 1:
raise NotImplementedError("Must index mixed function in par_loop.")
ndof = func.function_space().finat_element.space_dimension()
cdim = func.dat.cdim
dtype = func.dat.dtype
if measure.integral_type() == 'interior_facet':
ndof *= 2
# FIXME: shape for facets [2][ndof]?
kargs.append(loopy.GlobalArg(var, dtype=dtype, shape=(ndof, cdim)))
kernel_domains = kernel_domains.replace(var+".dofs", str(ndof))
if kernel_domains == "":
kernel_domains = "[] -> {[]}"
try:
key = (kernel_domains, tuple(instructions), tuple(map(tuple, kwargs.items())))
if kernel_cache is not None:
return kernel_cache[key]
else:
raise KeyError("No cache")
except KeyError:
kargs.append(...)
knl = loopy.make_function(kernel_domains, instructions, kargs, seq_dependencies=True,
name="par_loop_kernel", silenced_warnings=["summing_if_branches_ops"], target=loopy.CTarget())
if kernel_cache is not None:
return kernel_cache.setdefault(key, pyop2.Kernel(knl, "par_loop_kernel", **kwargs))
else:
return knl
|
def _form_loopy_kernel(kernel_domains, instructions, measure, args, **kwargs):
kargs = []
for var, (func, intent) in args.items():
if isinstance(func, constant.Constant):
if intent is not READ:
raise RuntimeError("Only READ access is allowed to Constant")
# Constants modelled as Globals, so no need for double
# indirection
ndof = func.dat.cdim
kargs.append(loopy.GlobalArg(var, dtype=func.dat.dtype, shape=(ndof,)))
else:
# Do we have a component of a mixed function?
if isinstance(func, Indexed):
c, i = func.ufl_operands
idx = i._indices[0]._value
ndof = c.function_space()[idx].finat_element.space_dimension()
cdim = c.dat[idx].cdim
dtype = c.dat[idx].dtype
else:
if func.function_space().ufl_element().family() == "Real":
ndof = func.function_space().dim() # == 1
kargs.append(loopy.GlobalArg(var, dtype=func.dat.dtype, shape=(ndof,)))
continue
else:
if len(func.function_space()) > 1:
raise NotImplementedError("Must index mixed function in par_loop.")
ndof = func.function_space().finat_element.space_dimension()
cdim = func.dat.cdim
dtype = func.dat.dtype
if measure.integral_type() == 'interior_facet':
ndof *= 2
# FIXME: shape for facets [2][ndof]?
kargs.append(loopy.GlobalArg(var, dtype=dtype, shape=(ndof, cdim)))
kernel_domains = kernel_domains.replace(var+".dofs", str(ndof))
if kernel_domains == "":
kernel_domains = "[] -> {[]}"
try:
key = (kernel_domains, tuple(instructions), tuple(map(tuple, kwargs.items())))
if kernel_cache is not None:
return kernel_cache[key]
else:
raise KeyError("No cache")
except KeyError:
kargs.append(...)
knl = loopy.make_function(kernel_domains, instructions, kargs, seq_dependencies=True,
name="par_loop_kernel", silenced_warnings=["summing_if_branches_ops"], target=loopy.CTarget())
knl = pyop2.Kernel(knl, "par_loop_kernel", **kwargs)
if kernel_cache is not None:
return kernel_cache.setdefault(key, knl)
else:
return knl
|
29,499 |
def sleep_untill_droplet_action_is_completed(droplet: digitalocean.Droplet, action_type: str) -> None:
incomplete = True
while incomplete:
for action in droplet.get_actions():
action.load()
print(f"...[{action.type}]: {action.status}")
if action.type == action_type and action.status == 'completed':
incomplete = False
break
if incomplete:
time.sleep(5)
droplet.load()
|
def sleep_until_droplet_action_is_completed(droplet: digitalocean.Droplet, action_type: str) -> None:
incomplete = True
while incomplete:
for action in droplet.get_actions():
action.load()
print(f"...[{action.type}]: {action.status}")
if action.type == action_type and action.status == 'completed':
incomplete = False
break
if incomplete:
time.sleep(5)
droplet.load()
|
6,598 |
def get_tax_data(doc):
from_address = get_company_address_details(doc)
from_shipping_state = from_address.get("state")
from_country_code = frappe.db.get_value("Country", from_address.country, "code")
from_country_code = from_country_code.upper()
to_address = get_shipping_address_details(doc)
to_shipping_state = to_address.get("state")
to_country_code = frappe.db.get_value("Country", to_address.country, "code")
to_country_code = to_country_code.upper()
shipping = sum([tax.tax_amount for tax in doc.taxes if tax.account_head == SHIP_ACCOUNT_HEAD])
line_items = [get_line_item_dict(item,doc.docstatus) for item in doc.items]
if from_shipping_state not in SUPPORTED_STATE_CODES:
from_shipping_state = get_state_code(from_address, 'Company')
if to_shipping_state not in SUPPORTED_STATE_CODES:
to_shipping_state = get_state_code(to_address, 'Shipping')
tax_dict = {
'from_country': from_country_code,
'from_zip': from_address.pincode,
'from_state': from_shipping_state,
'from_city': from_address.city,
'from_street': from_address.address_line1,
'to_country': to_country_code,
'to_zip': to_address.pincode,
'to_city': to_address.city,
'to_street': to_address.address_line1,
'to_state': to_shipping_state,
'shipping': shipping,
'amount': doc.net_total,
'plugin': 'erpnext',
'line_items': line_items
}
return tax_dict
|
def get_tax_data(doc):
from_address = get_company_address_details(doc)
from_shipping_state = from_address.get("state")
from_country_code = frappe.db.get_value("Country", from_address.country, "code")
from_country_code = from_country_code.upper()
to_address = get_shipping_address_details(doc)
to_shipping_state = to_address.get("state")
to_country_code = frappe.db.get_value("Country", to_address.country, "code")
to_country_code = to_country_code.upper()
shipping = sum([tax.tax_amount for tax in doc.taxes if tax.account_head == SHIP_ACCOUNT_HEAD])
line_items = [get_line_item_dict(item, doc.docstatus) for item in doc.items]
if from_shipping_state not in SUPPORTED_STATE_CODES:
from_shipping_state = get_state_code(from_address, 'Company')
if to_shipping_state not in SUPPORTED_STATE_CODES:
to_shipping_state = get_state_code(to_address, 'Shipping')
tax_dict = {
'from_country': from_country_code,
'from_zip': from_address.pincode,
'from_state': from_shipping_state,
'from_city': from_address.city,
'from_street': from_address.address_line1,
'to_country': to_country_code,
'to_zip': to_address.pincode,
'to_city': to_address.city,
'to_street': to_address.address_line1,
'to_state': to_shipping_state,
'shipping': shipping,
'amount': doc.net_total,
'plugin': 'erpnext',
'line_items': line_items
}
return tax_dict
|
58,877 |
def non_negative_tucker_hals(
tensor,
rank,
n_iter_max=100,
init="svd",
svd="truncated_svd",
tol=1e-8,
sparsity_coefficients=None,
core_sparsity_coefficient=None,
fixed_modes=None,
random_state=None,
verbose=False,
normalize_factors=False,
return_errors=False,
exact=False,
algorithm="fista",
):
r"""Non-negative Tucker decomposition with HALS
Uses HALS to update each factor columnwise and uses
fista or active set algorithm to update the core, see [1]_
Parameters
----------
tensor : ndarray
rank : None, int or int list
size of the core tensor, ``(len(ranks) == tensor.ndim)``
if int, the same rank is used for all modes
n_iter_max : int
maximum number of iteration
init : {'svd', 'random'}, optional
svd : str, default is 'truncated_svd'
function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
tol : float, optional
tolerance: the algorithm stops when the variation in
the reconstruction error is less than the tolerance
Default: 1e-8
sparsity_coefficients : array of float (as much as the number of modes)
The sparsity coefficients are used for each factor
If set to None, the algorithm is computed without sparsity
Default: None
core_sparsity_coefficient : array of float. This coefficient imposes sparsity on core
when it is updated with fista.
Default: None
fixed_modes : array of integers (between 0 and the number of modes)
Has to be set not to update a factor, 0 and 1 for U and V respectively
Default: None
verbose : boolean
Indicates whether the algorithm prints the successive
reconstruction errors or not
Default: False
normalize_factors : if True, aggregates the norms of the factors in the core.
return_errors : boolean
Indicates whether the algorithm should return all reconstruction errors
and computation time of each iteration or not
Default: False
exact : If it is True, the HALS nnls subroutines give results with high precision but it has a higher computational cost.
If it is False, the algorithm gives an approximate solution.
Default: False
algorithm : {'fista', 'active_set'}
Non negative least square solution to update the core.
Default: 'fista'
Returns
-------
factors : ndarray list
list of positive factors of the CP decomposition
element `i` is of shape ``(tensor.shape[i], rank)``
errors: list
A list of reconstruction errors at each iteration of the algorithm.
Notes
-----
Tucker decomposes a tensor into a core tensor and list of factors:
.. math::
tensor = [| core; factors[0], ... ,factors[-1] |],
We solve the following problem for each factor:
.. math::
\min_{tensor >= 0} ||tensor_i - factors[i]\times core_i \times (\prod_{i\neq j}(factors[j]))^T||^2,
If we define two variables such as:
.. math::
U = core_i \times (\prod_{i \neq j}(factors[j] \times factors[j]^T)), \\
M = tensor_i,
Gradient of the problem becomes:
.. math::
\delta = -U^TM + factors[i] \times U^TU,
In order to calculate UTU and UTM, we define two variables:
.. math::
CoreCross = \prod_{i\neq j}(core_i \times (\prod_{i\neq j}(factors[j]\times factors[j]^T)) \\
TensorCross = \prod_{i\neq j} tensor_i \times factors[i],
Then UTU and UTM becomes:
.. math::
U^TU = CoreCross_j \times core_j^T, \\
U^TM = (TensorCross_j \times core_j^T)^T,
References
----------
.. [1] G.Kolda and B.W.Bader, "Tensor Decompositions and Applications",
SIAM REVIEW, vol. 51, n. 3, pp. 455-500, 2009.
"""
rank = validate_tucker_rank(tl.shape(tensor), rank=rank)
n_modes = tl.ndim(tensor)
if sparsity_coefficients is None or not isinstance(sparsity_coefficients, Iterable):
sparsity_coefficients = [sparsity_coefficients] * n_modes
if fixed_modes is None:
fixed_modes = []
if tl.ndim(tensor) - 1 in fixed_modes:
warnings.warn(
"You asked for fixing the last mode, which is not supported.\n The last mode will not be fixed. Consider using tl.moveaxis()"
)
fixed_modes.remove(tl.ndim(tensor) - 1)
# Avoiding errors
for fixed_value in fixed_modes:
sparsity_coefficients[fixed_value] = None
# Generating the mode update sequence
modes = [mode for mode in range(tl.ndim(tensor)) if mode not in fixed_modes]
nn_core, nn_factors = initialize_tucker(
tensor,
rank,
modes,
init=init,
svd=svd,
random_state=random_state,
non_negative=True,
)
# initialisation - declare local variables
norm_tensor = tl.norm(tensor, 2)
rec_errors = []
# Iterate over one step of NTD
for iteration in range(n_iter_max):
# One pass of least squares on each updated mode
for mode in modes:
# Computing Hadamard of cross-products
pseudo_inverse = nn_factors.copy()
for i, factor in enumerate(nn_factors):
if i != mode:
pseudo_inverse[i] = tl.dot(tl.conj(tl.transpose(factor)), factor)
# UtU
core_cross = multi_mode_dot(nn_core, pseudo_inverse, skip=mode)
UtU = tl.dot(unfold(core_cross, mode), tl.transpose(unfold(nn_core, mode)))
# UtM
tensor_cross = multi_mode_dot(tensor, nn_factors, skip=mode, transpose=True)
MtU = tl.dot(
unfold(tensor_cross, mode), tl.transpose(unfold(nn_core, mode))
)
UtM = tl.transpose(MtU)
# Call the hals resolution with nnls, optimizing the current mode
nn_factor, _, _, _ = hals_nnls(
UtM,
UtU,
tl.transpose(nn_factors[mode]),
n_iter_max=100,
sparsity_coefficient=sparsity_coefficients[mode],
exact=exact,
)
nn_factors[mode] = tl.transpose(nn_factor)
# updating core
if algorithm == "fista":
pseudo_inverse[-1] = tl.dot(tl.transpose(nn_factors[-1]), nn_factors[-1])
core_estimation = multi_mode_dot(tensor, nn_factors, transpose=True)
learning_rate = 1
for MtM in pseudo_inverse:
learning_rate *= 1 / (tl.truncated_svd(MtM)[1][0])
nn_core = fista(
core_estimation,
pseudo_inverse,
x=nn_core,
n_iter_max=n_iter_max,
sparsity_coef=core_sparsity_coefficient,
lr=learning_rate,
)
if algorithm == "active_set":
pseudo_inverse[-1] = tl.dot(tl.transpose(nn_factors[-1]), nn_factors[-1])
core_estimation_vec = tl.base.tensor_to_vec(
tl.tenalg.mode_dot(
tensor_cross, tl.transpose(nn_factors[modes[-1]]), modes[-1]
)
)
pseudo_inverse_kr = tl.tenalg.kronecker(pseudo_inverse)
vectorcore = active_set_nnls(
core_estimation_vec, pseudo_inverse_kr, x=nn_core, n_iter_max=n_iter_max
)
nn_core = tl.reshape(vectorcore, tl.shape(nn_core))
# Adding the l1 norm value to the reconstruction error
sparsity_error = 0
for index, sparse in enumerate(sparsity_coefficients):
if sparse:
sparsity_error += 2 * (sparse * tl.norm(nn_factors[index], order=1))
# error computation
rec_error = (
tl.norm(tensor - tucker_to_tensor((nn_core, nn_factors)), 2) / norm_tensor
)
rec_errors.append(rec_error)
if iteration > 1:
if verbose:
print(
"reconstruction error={}, variation={}.".format(
rec_errors[-1], rec_errors[-2] - rec_errors[-1]
)
)
if tol and abs(rec_errors[-2] - rec_errors[-1]) < tol:
if verbose:
print("converged in {} iterations.".format(iteration))
break
if normalize_factors:
nn_core, nn_factors = tucker_normalize((nn_core, nn_factors))
tensor = TuckerTensor((nn_core, nn_factors))
if return_errors:
return tensor, rec_errors
else:
return tensor
|
def non_negative_tucker_hals(
tensor,
rank,
n_iter_max=100,
init="svd",
svd="truncated_svd",
tol=1e-8,
sparsity_coefficients=None,
core_sparsity_coefficient=None,
fixed_modes=None,
random_state=None,
verbose=False,
normalize_factors=False,
return_errors=False,
exact=False,
algorithm="fista",
):
r"""Non-negative Tucker decomposition with HALS
Uses HALS to update each factor columnwise and uses
fista or active set algorithm to update the core, see [1]_
Parameters
----------
tensor : ndarray
rank : None, int or int list
size of the core tensor, ``(len(ranks) == tensor.ndim)``
if int, the same rank is used for all modes
n_iter_max : int
maximum number of iteration
init : {'svd', 'random'}, optional
svd : str, default is 'truncated_svd'
function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
tol : float, optional
tolerance: the algorithm stops when the variation in
the reconstruction error is less than the tolerance
Default: 1e-8
sparsity_coefficients : array of float (as much as the number of modes)
The sparsity coefficients are used for each factor
If set to None, the algorithm is computed without sparsity
Default: None
core_sparsity_coefficient : array of float. This coefficient imposes sparsity on core
when it is updated with fista.
Default: None
fixed_modes : array of integers (between 0 and the number of modes)
Has to be set not to update a factor, 0 and 1 for U and V respectively
Default: None
verbose : boolean
Indicates whether the algorithm prints the successive
reconstruction errors or not
Default: False
normalize_factors : if True, aggregates the norms of the factors in the core.
return_errors : boolean
Indicates whether the algorithm should return all reconstruction errors
and computation time of each iteration or not
Default: False
exact : If it is True, the HALS nnls subroutines give results with high precision but it has a higher computational cost.
If it is False, the algorithm gives an approximate solution.
Default: False
algorithm : {'fista', 'active_set'}
Non negative least square solution to update the core.
Default: 'fista'
Returns
-------
factors : ndarray list
list of positive factors of the CP decomposition
element `i` is of shape ``(tensor.shape[i], rank)``
errors: list
A list of reconstruction errors at each iteration of the algorithm.
Notes
-----
Tucker decomposes a tensor into a core tensor and list of factors:
.. math::
tensor = [| core; factors[0], ... ,factors[-1] |],
We solve the following problem for each factor:
.. math::
\min_{tensor >= 0} ||tensor_i - factors[i]\times core_i \times (\prod_{i\neq j}(factors[j]))^T||^2,
If we define two variables such as:
.. math::
U = core_i \times (\prod_{i \neq j}(factors[j] \times factors[j]^T)), \\
M = tensor_i,
Gradient of the problem becomes:
.. math::
\delta = -U^TM + factors[i] \times U^TU,
In order to calculate UTU and UTM, we define two variables:
.. math::
CoreCross = \prod_{i\neq j}(core_i \times (\prod_{i\neq j}(factors[j]\times factors[j]^T)) \\
TensorCross = \prod_{i\neq j} tensor_i \times factors[i],
Then UTU and UTM becomes:
.. math::
U^TU = CoreCross_j \times core_j^T, \\
U^TM = (TensorCross_j \times core_j^T)^T,
References
----------
.. [1] G.Kolda and B.W.Bader, "Tensor Decompositions and Applications",
SIAM REVIEW, vol. 51, n. 3, pp. 455-500, 2009.
"""
rank = validate_tucker_rank(tl.shape(tensor), rank=rank)
n_modes = tl.ndim(tensor)
if sparsity_coefficients is None or not isinstance(sparsity_coefficients, Iterable):
sparsity_coefficients = [sparsity_coefficients] * n_modes
if fixed_modes is None:
fixed_modes = []
if tl.ndim(tensor) - 1 in fixed_modes:
warnings.warn(
"You asked for fixing the last mode, which is not supported. The last mode will not be fixed."
" Consider using tl.moveaxis() to permute it to another position and keep it fixed there."
)
fixed_modes.remove(tl.ndim(tensor) - 1)
# Avoiding errors
for fixed_value in fixed_modes:
sparsity_coefficients[fixed_value] = None
# Generating the mode update sequence
modes = [mode for mode in range(tl.ndim(tensor)) if mode not in fixed_modes]
nn_core, nn_factors = initialize_tucker(
tensor,
rank,
modes,
init=init,
svd=svd,
random_state=random_state,
non_negative=True,
)
# initialisation - declare local variables
norm_tensor = tl.norm(tensor, 2)
rec_errors = []
# Iterate over one step of NTD
for iteration in range(n_iter_max):
# One pass of least squares on each updated mode
for mode in modes:
# Computing Hadamard of cross-products
pseudo_inverse = nn_factors.copy()
for i, factor in enumerate(nn_factors):
if i != mode:
pseudo_inverse[i] = tl.dot(tl.conj(tl.transpose(factor)), factor)
# UtU
core_cross = multi_mode_dot(nn_core, pseudo_inverse, skip=mode)
UtU = tl.dot(unfold(core_cross, mode), tl.transpose(unfold(nn_core, mode)))
# UtM
tensor_cross = multi_mode_dot(tensor, nn_factors, skip=mode, transpose=True)
MtU = tl.dot(
unfold(tensor_cross, mode), tl.transpose(unfold(nn_core, mode))
)
UtM = tl.transpose(MtU)
# Call the hals resolution with nnls, optimizing the current mode
nn_factor, _, _, _ = hals_nnls(
UtM,
UtU,
tl.transpose(nn_factors[mode]),
n_iter_max=100,
sparsity_coefficient=sparsity_coefficients[mode],
exact=exact,
)
nn_factors[mode] = tl.transpose(nn_factor)
# updating core
if algorithm == "fista":
pseudo_inverse[-1] = tl.dot(tl.transpose(nn_factors[-1]), nn_factors[-1])
core_estimation = multi_mode_dot(tensor, nn_factors, transpose=True)
learning_rate = 1
for MtM in pseudo_inverse:
learning_rate *= 1 / (tl.truncated_svd(MtM)[1][0])
nn_core = fista(
core_estimation,
pseudo_inverse,
x=nn_core,
n_iter_max=n_iter_max,
sparsity_coef=core_sparsity_coefficient,
lr=learning_rate,
)
if algorithm == "active_set":
pseudo_inverse[-1] = tl.dot(tl.transpose(nn_factors[-1]), nn_factors[-1])
core_estimation_vec = tl.base.tensor_to_vec(
tl.tenalg.mode_dot(
tensor_cross, tl.transpose(nn_factors[modes[-1]]), modes[-1]
)
)
pseudo_inverse_kr = tl.tenalg.kronecker(pseudo_inverse)
vectorcore = active_set_nnls(
core_estimation_vec, pseudo_inverse_kr, x=nn_core, n_iter_max=n_iter_max
)
nn_core = tl.reshape(vectorcore, tl.shape(nn_core))
# Adding the l1 norm value to the reconstruction error
sparsity_error = 0
for index, sparse in enumerate(sparsity_coefficients):
if sparse:
sparsity_error += 2 * (sparse * tl.norm(nn_factors[index], order=1))
# error computation
rec_error = (
tl.norm(tensor - tucker_to_tensor((nn_core, nn_factors)), 2) / norm_tensor
)
rec_errors.append(rec_error)
if iteration > 1:
if verbose:
print(
"reconstruction error={}, variation={}.".format(
rec_errors[-1], rec_errors[-2] - rec_errors[-1]
)
)
if tol and abs(rec_errors[-2] - rec_errors[-1]) < tol:
if verbose:
print("converged in {} iterations.".format(iteration))
break
if normalize_factors:
nn_core, nn_factors = tucker_normalize((nn_core, nn_factors))
tensor = TuckerTensor((nn_core, nn_factors))
if return_errors:
return tensor, rec_errors
else:
return tensor
|
54,497 |
def run_tpe(k: int, sequence_dict: Dict[int, List[int]], hash_dict: Dict[int, int]) -> None:
hash_dict[k] = hash("nondeterministic hash")
sampler = TPESampler(n_startup_trials=1, seed=2, multivariate=True, group=True)
study = create_study(sampler=sampler)
sequence = []
for _ in range(10):
trial = study.ask()
picked = [i for i in range(10) if trial.suggest_int(str(i), 0, 1) == 1]
study.tell(trial, len(picked))
sequence.extend(picked)
sequence_dict[k] = sequence
|
def run_tpe(k: int, sequence_dict: Dict[int, List[int]], hash_dict: Dict[int, int]) -> None:
hash_dict[k] = hash("nondeterministic hash")
sampler = TPESampler(n_startup_trials=1, seed=2, multivariate=True, group=True)
study = create_study(sampler=sampler)
study.optimize(lambda t: np.sum(t.suggest_int(f"x{i}", 0, 10) for i in range(10)), n_trials=2)
sequence_dict[k] = list(study.trials[-1].params.values())
|
31,009 |
def test_get_multiple_packs_dirs(requests_mock):
"""
Scenario: Get a pack dir name from pull request files
Given
- A pull request
- A file in the pull request is in a pack
When
- Getting the pack dir name from a pull request
Then
- Ensure the pack dir name is returned correctly
"""
branch = 'contrib_branch'
pr_number = '1'
repo = 'contrib_repo'
requests_mock.get(
'https://api.github.com/repos/demisto/content/pulls/1/files',
[{'json': github_response_1, 'status_code': 200},
{'json': github_response_2, 'status_code': 200},
{'json': github_response_3, 'status_code': 200},
{'json': github_response_4, 'status_code': 200}]
)
pack_dir = get_pack_dir(branch, pr_number, repo)
assert pack_dir == ['Slack', 'Slack1']
|
def test_get_multiple_packs_dirs(requests_mock):
"""
Scenario: Get a pack dir name from pull request files
Given
- A pull request
- Files in the pull request are in a pack
When
- Getting the pack dir name from a pull request
Then
- Ensure the pack dir name is returned correctly
"""
branch = 'contrib_branch'
pr_number = '1'
repo = 'contrib_repo'
requests_mock.get(
'https://api.github.com/repos/demisto/content/pulls/1/files',
[{'json': github_response_1, 'status_code': 200},
{'json': github_response_2, 'status_code': 200},
{'json': github_response_3, 'status_code': 200},
{'json': github_response_4, 'status_code': 200}]
)
pack_dir = get_pack_dir(branch, pr_number, repo)
assert pack_dir == ['Slack', 'Slack1']
|
29,718 |
def _running_process_matches(handle):
"""Check whether the current process is same as of handle's
Parameters
----------
handle: ``pyvnml.nvml.LP_struct_c_nvmlDevice_t``
NVML handle to CUDA device
Returns
-------
out: bool
``True`` if device handle's has a CUDA context on the running process,
or ``False`` otherwise.
"""
init_once()
if hasattr(pynvml, "nvmlDeviceGetComputeRunningProcesses_v2"):
running_processes = pynvml.nvmlDeviceGetComputeRunningProcesses_v2(handle)
else:
running_processes = pynvml.nvmlDeviceGetComputeRunningProcesses(handle)
for proc in running_processes:
if os.getpid() == proc.pid:
return True
return False
|
def _running_process_matches(handle):
"""Check whether the current process is same as of handle's
Parameters
----------
handle: ``pyvnml.nvml.LP_struct_c_nvmlDevice_t``
NVML handle to CUDA device
Returns
-------
out : bool
``True`` if device handle's has a CUDA context on the running process,
or ``False`` otherwise.
"""
init_once()
if hasattr(pynvml, "nvmlDeviceGetComputeRunningProcesses_v2"):
running_processes = pynvml.nvmlDeviceGetComputeRunningProcesses_v2(handle)
else:
running_processes = pynvml.nvmlDeviceGetComputeRunningProcesses(handle)
for proc in running_processes:
if os.getpid() == proc.pid:
return True
return False
|
11,296 |
def _get_client_args(**kwargs):
# type: (dict) -> Optional[dict]
identity_config = kwargs.pop("identity_config", None) or {}
url = os.environ.get(EnvironmentVariables.IDENTITY_ENDPOINT)
secret = os.environ.get(EnvironmentVariables.IDENTITY_HEADER)
thumbprint = os.environ.get(EnvironmentVariables.IDENTITY_SERVER_THUMBPRINT)
if url and secret and thumbprint:
version = "2019-07-01-preview"
base_headers = {"Secret": secret}
connection_verify = False
else:
# Service Fabric managed identity isn't available in this environment
return None
return dict(
kwargs,
_identity_config=identity_config,
base_headers=base_headers,
connection_verify=connection_verify,
request_factory=functools.partial(_get_request, url, version),
)
|
def _get_client_args(**kwargs):
# type: (**Any) -> Optional[dict]
identity_config = kwargs.pop("identity_config", None) or {}
url = os.environ.get(EnvironmentVariables.IDENTITY_ENDPOINT)
secret = os.environ.get(EnvironmentVariables.IDENTITY_HEADER)
thumbprint = os.environ.get(EnvironmentVariables.IDENTITY_SERVER_THUMBPRINT)
if url and secret and thumbprint:
version = "2019-07-01-preview"
base_headers = {"Secret": secret}
connection_verify = False
else:
# Service Fabric managed identity isn't available in this environment
return None
return dict(
kwargs,
_identity_config=identity_config,
base_headers=base_headers,
connection_verify=connection_verify,
request_factory=functools.partial(_get_request, url, version),
)
|
8,925 |
def ctcp(function=None, *command_list):
"""Decorate a callable to trigger on CTCP commands (mostly, ``ACTION``).
:param str ctcp_command: one or more CTCP command(s) on which to trigger
(really, the only useful value is ``ACTION``)
.. versionadded:: 7.1
This is now ``ctcp`` instead of ``intent``, and it can be called
without argument, assuming ``ACTION`` in that case.
.. note::
This used to be ``@intent``, for a long dead feature in the IRCv3 spec.
It is now replaced by ``@ctcp``, which can be used without arguments.
In that case, Sopel will assume to trigger on ``ACTION``.
As ``sopel.module`` will be removed in Sopel 9, so will ``@intent``.
"""
default_commands = ('ACTION',) + command_list
if function is None:
return ctcp(*default_commands) # called as ``@ctcp()``
elif callable(function):
# called as ``@ctcp`` or ``@ctcp(function)``
# or even ``@ctcp(function, 'ACTION', ...)``
return ctcp(*default_commands)(function)
# function is not None, and it is not a callable
# called as ``@ctcp('ACTION', ...)``
ctcp_commands = (function,) + command_list
def add_attribute(function):
function._sopel_callable = True
if not hasattr(function, "intents"):
function.intents = []
for name in ctcp_commands:
if name not in function.intents:
function.intents.append(name)
return function
return add_attribute
|
def ctcp(function=None, *command_list):
"""Decorate a callable to trigger on CTCP commands (mostly, ``ACTION``).
:param str command_list: one or more CTCP command(s) on which to trigger
(really, the only useful value is ``ACTION``)
.. versionadded:: 7.1
This is now ``ctcp`` instead of ``intent``, and it can be called
without argument, assuming ``ACTION`` in that case.
.. note::
This used to be ``@intent``, for a long dead feature in the IRCv3 spec.
It is now replaced by ``@ctcp``, which can be used without arguments.
In that case, Sopel will assume to trigger on ``ACTION``.
As ``sopel.module`` will be removed in Sopel 9, so will ``@intent``.
"""
default_commands = ('ACTION',) + command_list
if function is None:
return ctcp(*default_commands) # called as ``@ctcp()``
elif callable(function):
# called as ``@ctcp`` or ``@ctcp(function)``
# or even ``@ctcp(function, 'ACTION', ...)``
return ctcp(*default_commands)(function)
# function is not None, and it is not a callable
# called as ``@ctcp('ACTION', ...)``
ctcp_commands = (function,) + command_list
def add_attribute(function):
function._sopel_callable = True
if not hasattr(function, "intents"):
function.intents = []
for name in ctcp_commands:
if name not in function.intents:
function.intents.append(name)
return function
return add_attribute
|
10,647 |
def silence(name, silence=True):
''' Silence named warning on all Bokeh models.
Args:
name (str) : Names of warnings to silence
silence (bool) : Whether or not to silence the warning
Returns:
set conatining names of silenced warnings
This function adds or removes names from a set of silencers which
is refered to when running ``check_integrity``. If a warning with a particular
name is added to the silencers - then it will never be raised.
.. code-block:: python
>>> silence('EMPTY_LAYOUT', True)
{'EMPTY_LAYOUT'}
>>> empty_row = Row
>>> check_integrity([empty_row])
'''
if isinstance(name, int):
raise ValueError('Input to silence should be name of warning - not code')
if silence:
__silencers__.add(name)
else:
__silencers__.remove(name)
return __silencers__
|
def silence(name, silence=True):
''' Silence named warning on all Bokeh models.
Args:
name (str) : Names of warnings to silence
silence (bool) : Whether or not to silence the warning
Returns:
A set containing the names of all silenced warnings
This function adds or removes names from a set of silencers which
is refered to when running ``check_integrity``. If a warning with a particular
name is added to the silencers - then it will never be raised.
.. code-block:: python
>>> silence('EMPTY_LAYOUT', True)
{'EMPTY_LAYOUT'}
>>> empty_row = Row
>>> check_integrity([empty_row])
'''
if isinstance(name, int):
raise ValueError('Input to silence should be name of warning - not code')
if silence:
__silencers__.add(name)
else:
__silencers__.remove(name)
return __silencers__
|
3,819 |
def test_generator_ctor_old_style_pickle():
rg = np.random.Generator(np.random.PCG64DXSM(0))
rg.standard_normal(1)
# Directly call reduce which is used in pickline
ctor, args, state_a = rg.__reduce__()
# Simulate unpickling an old pickle that only has the name
assert args[:1] == ("PCG64DXSM",)
b = ctor(*args[:1])
b.bit_generator.state = state_a
state_b = b.bit_generator.state
assert state_a == state_b
|
def test_generator_ctor_old_style_pickle():
rg = np.random.Generator(np.random.PCG64DXSM(0))
rg.standard_normal(1)
# Directly call reduce which is used in pickling
ctor, args, state_a = rg.__reduce__()
# Simulate unpickling an old pickle that only has the name
assert args[:1] == ("PCG64DXSM",)
b = ctor(*args[:1])
b.bit_generator.state = state_a
state_b = b.bit_generator.state
assert state_a == state_b
|
55,615 |
def SAL(X_f, X_o,
minref=0.1,
maxref=150,
mindiff=10,
minsize=10,
minmax=0.1,
mindis=10,
):
"""This function calculates the components of Structure Amplitude Location (SAL) approach based on Wernli et al
(2008). Note that we used the thunderstorm detection algorithm developed by Feldmann et al (2021) to detect precipitation objects.
This approach uses multi-threshold algorithm to detect objects, instead of having a single threshold (f).
Parameters
----------
df_obs: 2-d ndarray for the observation data.
df_forc: 2-d ndarray for the prediction data.
maximum_distance: maximum distance of the study area.
If the projection is rectangular (e.g., UTM), this value is the diagonal of the study area.
If the projection is not rectangular (e.g., lon/lat), 'max_dist' function calculates this value.
minref: minimum precipitation value for detecting object(s), If r star is lower than this threshold.
The default is 0.1 mm.
Returns
-------
sal:
A dataframe with all three components of SAL.
References
----------
:cite: Wernli, H., Hofmann, C., & Zimmer, M. (2009).
:cite: Feldmann, M., Germann, U., Gabella, M., & Berne, A. (2021).
See also
--------
pysteps.feature.tstorm
"""
if np.nanmax(X_o >= 0.1) & np.nanmax(
X_f >= 0.1
): # to avoid errors of nan values or very low precipitation
s = s_param(X_o, X_f, minref, maxref, mindiff, minsize, minmax, mindis)
a = Amplitude(X_o, X_f)
l = l1_param(X_o, X_f) + l2_param(
X_o, X_f, minref, maxref, mindiff, minsize, minmax, mindis
)
else:
s = np.nan
a = np.nan
l = np.nan
dic = {"S": s, "A": a, "L": l}
sal = pd.DataFrame(dic, index=[1])
sal.index.name = "step"
return sal
|
def SAL(X_f, X_o,
minref=0.1,
maxref=150,
mindiff=10,
minsize=10,
minmax=0.1,
mindis=10,
):
"""This function calculates the components of Structure Amplitude Location (SAL) approach based on Wernli et al
(2008). Note that we used the thunderstorm detection algorithm developed by Feldmann et al (2021) to detect precipitation objects.
This approach uses multi-threshold algorithm to detect objects, instead of having a single threshold (f).
Parameters
----------
df_obs: 2-d ndarray
Observation data.
df_forc: 2-d ndarray
Prediction data.
maximum_distance: float
Maximum distance of the study area in XXX units.
If the projection is rectangular (e.g., UTM), this value is the diagonal of the study area.
If the projection is not rectangular (e.g., lon/lat), 'max_dist' function calculates this value.
minref: float
Minimum precipitation value for detecting object(s), If r star is lower than this threshold.
The default is 0.1 mm.
Returns
-------
sal:
A dataframe with all three components of SAL.
References
----------
:cite: Wernli, H., Hofmann, C., & Zimmer, M. (2009).
:cite: Feldmann, M., Germann, U., Gabella, M., & Berne, A. (2021).
See also
--------
pysteps.feature.tstorm
"""
if np.nanmax(X_o >= 0.1) & np.nanmax(
X_f >= 0.1
): # to avoid errors of nan values or very low precipitation
s = s_param(X_o, X_f, minref, maxref, mindiff, minsize, minmax, mindis)
a = Amplitude(X_o, X_f)
l = l1_param(X_o, X_f) + l2_param(
X_o, X_f, minref, maxref, mindiff, minsize, minmax, mindis
)
else:
s = np.nan
a = np.nan
l = np.nan
dic = {"S": s, "A": a, "L": l}
sal = pd.DataFrame(dic, index=[1])
sal.index.name = "step"
return sal
|
58,328 |
def rk4(f, x, t, dt, stages=4, s=0.0):
"""Runge-Kutta (explicit, non-adaptive) numerical (S)ODE solvers.
The rule has strong / weak convergence order 1.0 for generic SDEs and order 4.0
convergence for ODEs when stages=4. For stages=1, this becomes the Euler-Maruyama
schemefor SDEs (s > 0.0) with strong / weak convergence order 1.0 for SDEs with
additive noise as defined in the below. See `bib.grudzien2020numerical`.
Parameters
----------
f : function
The time derivative of the dynamical system. Must be of the form `f(t, x)`
x : ndarray or float
State vector of the forcing term
t : float
Starting time of the integration
dt : float
Integration time step.
stages : int, optional
The number of stages of the RK method. Default: 4. When stages=1, this becomes
Euler / Euler-Maruyama.
s : float
The diffusion coeffient for models with additive noise. Default: 0 for
deterministic integration.
Returns
-------
ndarray
State vector at the new time, `t+dt`
"""
if s > 0.0:
# non-trivial diffusion, this defines the SDE integration with additive noise
# generate perturbation for Brownian motion
dims = np.shape(x)
if len(dims) > 1:
N_e, N_x , = dims
W = np.sqrt(dt) * np.random.standard_normal(N_e, N_x)
else:
N_x , = dims
W = np.sqrt(dt) * np.random.standard_normal(N_x)
if stages >=1: k1 = dt * f(t , x) + s * W # noqa
if stages >=2: k2 = dt * f(t+dt/2.0, x+k1/2.0) + s * W # noqa
if stages ==3: k3 = dt * f(t+dt , x+k2*2.0-k1) + s * W # noqa
if stages ==4: # noqa
k3 = dt * f(t+dt/2.0, x+k2/2.0) + s * W # noqa
k4 = dt * f(t+dt , x+k3) + s * W # noqa
if stages ==1: return x + k1 # noqa
elif stages ==2: return x + k2 # noqa
elif stages ==3: return x + (k1 + 4.0*k2 + k3)/6.0 # noqa
elif stages ==4: return x + (k1 + 2.0*(k2 + k3) + k4)/6.0 # noqa
else: raise NotImplementedError # noqa
else:
# deterministic integration
if stages >=1: k1 = dt * f(t , x) # noqa
if stages >=2: k2 = dt * f(t+dt/2.0, x+k1/2.0) # noqa
if stages ==3: k3 = dt * f(t+dt , x+k2*2.0-k1) # noqa
if stages ==4: # noqa
k3 = dt * f(t+dt/2.0, x+k2/2.0) # noqa
k4 = dt * f(t+dt , x+k3) # noqa
if stages ==1: return x + k1 # noqa
elif stages ==2: return x + k2 # noqa
elif stages ==3: return x + (k1 + 4.0*k2 + k3)/6.0 # noqa
elif stages ==4: return x + (k1 + 2.0*(k2 + k3) + k4)/6.0 # noqa
else: raise NotImplementedError # noqa
# fmt: on
|
def rk4(f, x, t, dt, stages=4, s=0.0):
"""Runge-Kutta (explicit, non-adaptive) numerical (S)ODE solvers.
The rule has strong / weak convergence order 1.0 for generic SDEs and order 4.0
convergence for ODEs when stages=4. For stages=1, this becomes the Euler-Maruyama
schemefor SDEs (s > 0.0) with strong / weak convergence order 1.0 for SDEs with
additive noise as defined in the below. See `bib.grudzien2020numerical`.
Parameters
----------
f : function
The time derivative of the dynamical system. Must be of the form `f(t, x)`
x : ndarray or float
State vector of the forcing term
t : float
Starting time of the integration
dt : float
Integration time step.
stages : int, optional
The number of stages of the RK method. Default: 4. When stages=1, this becomes
Euler / Euler-Maruyama.
s : float
The diffusion coeffient (std. dev) for models with additive noise.
Default: 0, yielding deterministic integration.
Returns
-------
ndarray
State vector at the new time, `t+dt`
"""
if s > 0.0:
# non-trivial diffusion, this defines the SDE integration with additive noise
# generate perturbation for Brownian motion
dims = np.shape(x)
if len(dims) > 1:
N_e, N_x , = dims
W = np.sqrt(dt) * np.random.standard_normal(N_e, N_x)
else:
N_x , = dims
W = np.sqrt(dt) * np.random.standard_normal(N_x)
if stages >=1: k1 = dt * f(t , x) + s * W # noqa
if stages >=2: k2 = dt * f(t+dt/2.0, x+k1/2.0) + s * W # noqa
if stages ==3: k3 = dt * f(t+dt , x+k2*2.0-k1) + s * W # noqa
if stages ==4: # noqa
k3 = dt * f(t+dt/2.0, x+k2/2.0) + s * W # noqa
k4 = dt * f(t+dt , x+k3) + s * W # noqa
if stages ==1: return x + k1 # noqa
elif stages ==2: return x + k2 # noqa
elif stages ==3: return x + (k1 + 4.0*k2 + k3)/6.0 # noqa
elif stages ==4: return x + (k1 + 2.0*(k2 + k3) + k4)/6.0 # noqa
else: raise NotImplementedError # noqa
else:
# deterministic integration
if stages >=1: k1 = dt * f(t , x) # noqa
if stages >=2: k2 = dt * f(t+dt/2.0, x+k1/2.0) # noqa
if stages ==3: k3 = dt * f(t+dt , x+k2*2.0-k1) # noqa
if stages ==4: # noqa
k3 = dt * f(t+dt/2.0, x+k2/2.0) # noqa
k4 = dt * f(t+dt , x+k3) # noqa
if stages ==1: return x + k1 # noqa
elif stages ==2: return x + k2 # noqa
elif stages ==3: return x + (k1 + 4.0*k2 + k3)/6.0 # noqa
elif stages ==4: return x + (k1 + 2.0*(k2 + k3) + k4)/6.0 # noqa
else: raise NotImplementedError # noqa
# fmt: on
|
30,952 |
def update_user_iam(default_base_dn, default_page_size, args):
assert conn is not None
user_profile = args.get("user-profile")
user_profile_delta = args.get('user-profile-delta')
iam_user_profile = IAMUserProfile(user_profile=user_profile, user_profile_delta=user_profile_delta)
ad_user = iam_user_profile.map_object(mapper_name=OUTGOING_MAPPER)
try:
# check it user exists and if it doesn't, create it
sam_account_name = ad_user.get("samaccountname")
new_ou = ad_user.get("ou")
user_exists = check_if_user_exists_by_samaccountname(default_base_dn, default_page_size, sam_account_name)
if not user_exists and args.get('create-if-not-exists') == "true":
create_user_iam(default_base_dn, default_page_size, args)
elif user_exists:
dn = user_dn(sam_account_name, default_base_dn)
# remove fields that can't be modified
# notice that we are changing the ou and that effects the dn and cn
if ad_user.get("dn"):
ad_user.pop("dn")
if ad_user.get("samaccountname"):
ad_user.pop("samaccountname")
if ad_user.get("cn"):
ad_user.pop("cn")
fail_to_modify = []
for key in ad_user:
modification = {key: [('MODIFY_REPLACE', ad_user.get(key))]}
success = conn.modify(dn, modification)
if not success:
fail_to_modify.append(key)
ou_modified_succeed = modify_user_ou(dn, new_ou)
if not ou_modified_succeed:
fail_to_modify.append("ou")
if fail_to_modify:
error_list = ','.join(fail_to_modify)
error_message = f"Fail to modify the following attributes: {error_list}"
iam_user_profile.set_result(success=False, error_message=error_message)
else:
active = ad_user.get('userAccountControl') not in INACTIVE_LIST_OPTIONS
iam_user_profile.set_result(success=True,
email=ad_user.get('email'),
username=ad_user.get('name'),
details=ad_user,
active=active)
return_results(iam_user_profile)
except Exception as e:
iam_user_profile.set_result(success=False, error_message=str(e))
return_results(iam_user_profile)
|
def update_user_iam(default_base_dn, default_page_size, args):
assert conn is not None
user_profile = args.get("user-profile")
user_profile_delta = args.get('user-profile-delta')
iam_user_profile = IAMUserProfile(user_profile=user_profile, user_profile_delta=user_profile_delta)
ad_user = iam_user_profile.map_object(mapper_name=OUTGOING_MAPPER)
try:
# check it user exists and if it doesn't, create it
sam_account_name = ad_user.get("samaccountname")
new_ou = ad_user.get("ou")
user_exists = check_if_user_exists_by_samaccountname(default_base_dn, default_page_size, sam_account_name)
if not user_exists and args.get('create-if-not-exists') == "true":
create_user_iam(default_base_dn, default_page_size, args)
elif user_exists:
dn = user_dn(sam_account_name, default_base_dn)
# remove fields that can't be modified
# notice that we are changing the ou and that effects the dn and cn
if ad_user.get("dn"):
ad_user.pop("dn")
if ad_user.get("samaccountname"):
ad_user.pop("samaccountname")
if ad_user.get("cn"):
ad_user.pop("cn")
fail_to_modify = []
for key in ad_user:
modification = {key: [('MODIFY_REPLACE', ad_user.get(key))]}
success = conn.modify(dn, modification)
if not success:
fail_to_modify.append(key)
ou_modified_succeed = modify_user_ou(dn, new_ou)
if not ou_modified_succeed:
fail_to_modify.append("ou")
if fail_to_modify:
error_list = '\n'.join(fail_to_modify)
error_message = f"Fail to modify the following attributes: {error_list}"
iam_user_profile.set_result(success=False, error_message=error_message)
else:
active = ad_user.get('userAccountControl') not in INACTIVE_LIST_OPTIONS
iam_user_profile.set_result(success=True,
email=ad_user.get('email'),
username=ad_user.get('name'),
details=ad_user,
active=active)
return_results(iam_user_profile)
except Exception as e:
iam_user_profile.set_result(success=False, error_message=str(e))
return_results(iam_user_profile)
|
23,294 |
def get_page_tree(topdir,proj_copy_subdir,md,parent=None):
# look for files within topdir
filelist = sorted(os.listdir(topdir))
if 'index.md' in filelist:
# process index.md
try:
node = PageNode(md,os.path.join(topdir,'index.md'),proj_copy_subdir,parent)
except Exception as e:
print("Warning: Error parsing {}.\n\t{}".format(os.path.relpath(os.path.join(topdir,'index.md')),e.args[0]))
return None
filelist.remove('index.md')
else:
print('Warning: No index.md file in directory {}'.format(topdir))
return None
for name in filelist:
if name[0] != '.' and name[-1] != '~':
if os.path.isdir(os.path.join(topdir,name)):
# recurse into subdirectories
traversedir = True
if not parent==None:
traversedir = not name in parent.copy_subdir
if traversedir:
subnode = get_page_tree( os.path.join(topdir,name),
proj_copy_subdir, md, node )
if subnode: node.subpages.append(subnode)
elif name[-3:] == '.md':
# process subpages
try:
node.subpages.append(PageNode(md,os.path.join(topdir,name),proj_copy_subdir,node))
except Exception as e:
print("Warning: Error parsing {}.\n\t{}".format(os.path.relpath(os.path.join(topdir,name)),e.args[0]))
continue
else:
node.files.append(name)
return node
|
def get_page_tree(topdir,proj_copy_subdir,md,parent=None):
# look for files within topdir
filelist = sorted(os.listdir(topdir))
if 'index.md' in filelist:
# process index.md
try:
node = PageNode(md,os.path.join(topdir,'index.md'),proj_copy_subdir,parent)
except Exception as e:
print("Warning: Error parsing {}.\n\t{}".format(os.path.relpath(os.path.join(topdir,'index.md')),e.args[0]))
return None
filelist.remove('index.md')
else:
print('Warning: No index.md file in directory {}'.format(topdir))
return None
for name in filelist:
if name[0] != '.' and name[-1] != '~':
if os.path.isdir(os.path.join(topdir,name)):
# recurse into subdirectories
traversedir = True
if not parent==None:
traversedir = name not in parent.copy_subdir
if traversedir:
subnode = get_page_tree( os.path.join(topdir,name),
proj_copy_subdir, md, node )
if subnode: node.subpages.append(subnode)
elif name[-3:] == '.md':
# process subpages
try:
node.subpages.append(PageNode(md,os.path.join(topdir,name),proj_copy_subdir,node))
except Exception as e:
print("Warning: Error parsing {}.\n\t{}".format(os.path.relpath(os.path.join(topdir,name)),e.args[0]))
continue
else:
node.files.append(name)
return node
|
44,084 |
def generate_moment(basis_a, basis_b, e, idx):
r"""Return a function that computes the multipole moment integral for two contracted Gaussians.
The multipole moment integral for two primitive Gaussian functions is computed as
.. math::
S^e = \left \langle G_i | q_C^e | G_j \right \rangle
\left \langle G_k | G_l \right \rangle
\left \langle G_m | G_n \right \rangle,
where :math:`G_{i-n}` is a one-dimensional Gaussian function, :math:`q = x, y, z` is the
dimension at which the integral is evaluated, :math:`C` is the origin of the Cartesian
coordinates and :math:`e` is the multipole moment order. For contracted Gaussians, such
integrals will be computed over primitive Gaussians, multiplied by the normalized contraction
coefficients and finally summed over.
The ``idx`` argument determines the dimension :math:`q` at which the integral is computed. It
can be :math:`0, 1, 2` for :math:`x, y, z` components, respectively.
Args:
basis_a (BasisFunction): first basis function
basis_b (BasisFunction): second basis function
e (integer): order of the multipole moment
idx (integer): index determining the dimension of the multipole moment integral
Returns:
function: function that computes the multipole moment integral
**Example**
>>> symbols = ['H', 'Li']
>>> geometry = np.array([[0.0, 0.0, 0.0], [2.0, 0.0, 0.0]], requires_grad = False)
>>> mol = qml.hf.Molecule(symbols, geometry)
>>> args = []
>>> e, idx = 1, 0
>>> generate_moment(mol.basis_set[0], mol.basis_set[1], e, idx)(*args)
3.12846324e-01
"""
def moment_integral(*args):
r"""Normalize and compute the multipole moment integral for two contracted Gaussians.
Args:
args (array[float]): initial values of the differentiable parameters
Returns:
array[float]: the multipole moment integral between two contracted Gaussian orbitals
"""
args_a = [i[0] for i in args]
args_b = [i[1] for i in args]
la = basis_a.l
lb = basis_b.l
alpha, ca, ra = _generate_params(basis_a.params, args_a)
beta, cb, rb = _generate_params(basis_b.params, args_b)
ca = ca * primitive_norm(basis_a.l, alpha)
cb = cb * primitive_norm(basis_b.l, beta)
na = contracted_norm(basis_a.l, alpha, ca)
nb = contracted_norm(basis_b.l, beta, cb)
p = alpha[:, anp.newaxis] + beta
q = anp.sqrt(anp.pi / p)
rc = (
alpha[:, anp.newaxis] * ra[:, anp.newaxis, anp.newaxis]
+ beta * rb[:, anp.newaxis, anp.newaxis]
) / p
i, j, k = anp.roll(anp.array([0, 2, 1]), idx)
s = (
gaussian_moment(la[i], lb[i], ra[i], rb[i], alpha[:, anp.newaxis], beta, e, rc[i])
* expansion(la[j], lb[j], ra[j], rb[j], alpha[:, anp.newaxis], beta, 0)
* q
* expansion(la[k], lb[k], ra[k], rb[k], alpha[:, anp.newaxis], beta, 0)
* q
)
return (na * nb * (ca[:, anp.newaxis] * cb) * s).sum()
return moment_integral
|
def generate_moment(basis_a, basis_b, e, idx):
r"""Return a function that computes the multipole moment integral for two contracted Gaussians.
The multipole moment integral for two primitive Gaussian functions is computed as
.. math::
S^e = \left \langle G_i | q_C^e | G_j \right \rangle
\left \langle G_k | G_l \right \rangle
\left \langle G_m | G_n \right \rangle,
where :math:`G_{i-n}` is a one-dimensional Gaussian function, :math:`q = x, y, z` is the
dimension at which the integral is evaluated, :math:`C` is the origin of the Cartesian
coordinates and :math:`e` is the multipole moment order. For contracted Gaussians, such
integrals will be computed over primitive Gaussians, multiplied by the normalized contraction
coefficients and finally summed over.
The ``idx`` argument determines the coordinate :math:`q` at which the integral is computed. It
can be :math:`0, 1, 2` for :math:`x, y, z` components, respectively.
Args:
basis_a (BasisFunction): first basis function
basis_b (BasisFunction): second basis function
e (integer): order of the multipole moment
idx (integer): index determining the dimension of the multipole moment integral
Returns:
function: function that computes the multipole moment integral
**Example**
>>> symbols = ['H', 'Li']
>>> geometry = np.array([[0.0, 0.0, 0.0], [2.0, 0.0, 0.0]], requires_grad = False)
>>> mol = qml.hf.Molecule(symbols, geometry)
>>> args = []
>>> e, idx = 1, 0
>>> generate_moment(mol.basis_set[0], mol.basis_set[1], e, idx)(*args)
3.12846324e-01
"""
def moment_integral(*args):
r"""Normalize and compute the multipole moment integral for two contracted Gaussians.
Args:
args (array[float]): initial values of the differentiable parameters
Returns:
array[float]: the multipole moment integral between two contracted Gaussian orbitals
"""
args_a = [i[0] for i in args]
args_b = [i[1] for i in args]
la = basis_a.l
lb = basis_b.l
alpha, ca, ra = _generate_params(basis_a.params, args_a)
beta, cb, rb = _generate_params(basis_b.params, args_b)
ca = ca * primitive_norm(basis_a.l, alpha)
cb = cb * primitive_norm(basis_b.l, beta)
na = contracted_norm(basis_a.l, alpha, ca)
nb = contracted_norm(basis_b.l, beta, cb)
p = alpha[:, anp.newaxis] + beta
q = anp.sqrt(anp.pi / p)
rc = (
alpha[:, anp.newaxis] * ra[:, anp.newaxis, anp.newaxis]
+ beta * rb[:, anp.newaxis, anp.newaxis]
) / p
i, j, k = anp.roll(anp.array([0, 2, 1]), idx)
s = (
gaussian_moment(la[i], lb[i], ra[i], rb[i], alpha[:, anp.newaxis], beta, e, rc[i])
* expansion(la[j], lb[j], ra[j], rb[j], alpha[:, anp.newaxis], beta, 0)
* q
* expansion(la[k], lb[k], ra[k], rb[k], alpha[:, anp.newaxis], beta, 0)
* q
)
return (na * nb * (ca[:, anp.newaxis] * cb) * s).sum()
return moment_integral
|
8,588 |
def get_dot_vim():
"""Returns the likely place for ~/.vim for the current setup."""
home = vim.eval("$HOME")
candidates = []
if platform.system() == "Windows":
candidates.append(os.path.join(home, "vimfiles"))
if vim.eval("has('nvim')") == "1":
xdg_home_config = vim.eval("$XDG_CONFIG_HOME") or os.path.join(home, ".config")
candidates.append(os.path.join(xdg_home_config, "nvim"))
if vim.eval("$XDG_CONFIG_HOME"):
xdg_home_config = vim.eval("$XDG_CONFIG_HOME") or os.path.join(home, ".config")
candidates.append(os.path.join(xdg_home_config, "vim"))
candidates.append(os.path.join(home, ".vim"))
if "MYVIMRC" in os.environ:
my_vimrc = os.path.expandvars(os.environ["MYVIMRC"])
candidates.append(normalize_file_path(os.path.dirname(my_vimrc)))
for candidate in candidates:
if os.path.isdir(candidate):
return normalize_file_path(candidate)
raise RuntimeError(
"Unable to find user configuration directory. I tried '%s'." % candidates
)
|
def get_dot_vim():
"""Returns the likely place for ~/.vim for the current setup."""
home = vim.eval("$HOME")
candidates = []
if platform.system() == "Windows":
candidates.append(os.path.join(home, "vimfiles"))
if vim.eval("has('nvim')") == "1" or vim.eval("$XDG_CONFIG_HOME"):
xdg_home_config = vim.eval("$XDG_CONFIG_HOME") or os.path.join(home, ".config")
candidates.append(os.path.join(xdg_home_config, "nvim"))
if vim.eval("$XDG_CONFIG_HOME"):
xdg_home_config = vim.eval("$XDG_CONFIG_HOME") or os.path.join(home, ".config")
candidates.append(os.path.join(xdg_home_config, "vim"))
candidates.append(os.path.join(home, ".vim"))
if "MYVIMRC" in os.environ:
my_vimrc = os.path.expandvars(os.environ["MYVIMRC"])
candidates.append(normalize_file_path(os.path.dirname(my_vimrc)))
for candidate in candidates:
if os.path.isdir(candidate):
return normalize_file_path(candidate)
raise RuntimeError(
"Unable to find user configuration directory. I tried '%s'." % candidates
)
|
3,121 |
def maybe_cast_result_dtype(dtype: DtypeObj, how: str) -> DtypeObj:
"""
Get the desired dtype of a result based on the
input dtype and how it was computed.
Parameters
----------
dtype : DtypeObj
Input dtype.
how : str
How the result was computed.
Returns
-------
DtypeObj
The desired dtype of the result.
"""
from pandas.core.arrays.boolean import BooleanDtype
from pandas.core.arrays.floating import Float64Dtype
from pandas.core.arrays.integer import Int64Dtype
if how in ["add", "cumsum", "sum"] and (dtype == np.dtype(bool)):
return np.dtype(np.int64)
elif how in ["add", "cumsum", "sum"] and isinstance(dtype, BooleanDtype):
return Int64Dtype()
elif how in ["mean", "median", "var"] and isinstance(dtype, Int64Dtype):
return Float64Dtype()
return dtype
|
def maybe_cast_result_dtype(dtype: DtypeObj, how: str) -> DtypeObj:
"""
Get the desired dtype of a result based on the
input dtype and how it was computed.
Parameters
----------
dtype : DtypeObj
Input dtype.
how : str
How the result was computed.
Returns
-------
DtypeObj
The desired dtype of the result.
"""
from pandas.core.arrays.boolean import BooleanDtype
from pandas.core.arrays.floating import Float64Dtype
from pandas.core.arrays.integer import Int64Dtype
if how in ["add", "cumsum", "sum"] and (dtype == np.dtype(bool)):
return np.dtype(np.int64)
elif how in ["add", "cumsum", "sum", "prod"] and isinstance(dtype, BooleanDtype):
return Int64Dtype()
elif how in ["mean", "median", "var"] and isinstance(dtype, Int64Dtype):
return Float64Dtype()
return dtype
|
47,671 |
def get_user_input():
"""
Ask the user for the necessary inputs to add the new model.
"""
model_types = list(auto_module.configuration_auto.MODEL_NAMES_MAPPING.keys())
# Get old model type
valid_model_type = False
while not valid_model_type:
old_model_type = input(
"""What is the model you would like to duplicate? Please provide the lowercase `model_type`
- this can be found inside the configuration of any checkpoint of that model or in the name of the docs URL
(e.g. roberta):"""
)
if old_model_type in model_types:
valid_model_type = True
else:
print(f"{old_model_type} is not a valid model type.")
near_choices = difflib.get_close_matches(old_model_type, model_types)
if len(near_choices) >= 1:
if len(near_choices) > 1:
near_choices = " or ".join(near_choices)
print(f"Did you mean {near_choices}?")
old_model_info = retrieve_info_for_model(old_model_type)
old_tokenizer_class = old_model_info["model_patterns"].tokenizer_class
old_feature_extractor_class = old_model_info["model_patterns"].feature_extractor_class
old_processor_class = old_model_info["model_patterns"].processor_class
old_frameworks = old_model_info["frameworks"]
old_checkpoint = None
if len(old_model_info["model_patterns"].checkpoint) == 0:
old_checkpoint = get_user_field(
"We couldn't find the name of the base checkpoint for that model, please enter it here."
)
model_name = get_user_field(
"What is the name (with no special casing) for your new model in the paper (e.g. RoBERTa)? "
)
default_patterns = ModelPatterns(model_name, model_name)
model_type = get_user_field(
"What identifier would you like to use for the `model_type` of this model? ",
default_value=default_patterns.model_type,
)
model_lower_cased = get_user_field(
"What lowercase name would you like to use for the module (folder) of this model? ",
default_value=default_patterns.model_lower_cased,
)
model_camel_cased = get_user_field(
"What prefix (camel-cased) would you like to use for the model classes of this model (e.g. Roberta)? ",
default_value=default_patterns.model_camel_cased,
)
model_upper_cased = get_user_field(
"What prefix (upper-cased) would you like to use for the constants relative to this model? ",
default_value=default_patterns.model_upper_cased,
)
config_class = get_user_field(
"What will be the name of the config class for this model? ", default_value=f"{model_camel_cased}Config"
)
checkpoint = get_user_field(
"Please give a checkpoint identifier (on the model Hub) for this new model (e.g. facebook/roberta-base): "
)
old_processing_classes = [
c for c in [old_feature_extractor_class, old_tokenizer_class, old_processor_class] if c is not None
]
old_processing_classes = ", ".join(old_processing_classes)
keep_processing = get_user_field(
f"Will your new model use the same processing class as {old_model_type} ({old_processing_classes}) (yes/no)? ",
convert_to=convert_to_bool,
fallback_message="Please answer yes/no, y/n, true/false or 1/0. ",
)
if keep_processing:
feature_extractor_class = old_feature_extractor_class
processor_class = old_processor_class
tokenizer_class = old_tokenizer_class
else:
if old_tokenizer_class is not None:
tokenizer_class = get_user_field(
"What will be the name of the tokenizer class for this model? ",
default_value=f"{model_camel_cased}Tokenizer",
)
else:
tokenizer_class = None
if old_feature_extractor_class is not None:
feature_extractor_class = get_user_field(
"What will be the name of the feature extractor class for this model? ",
default_value=f"{model_camel_cased}FeatureExtractor",
)
else:
feature_extractor_class = None
if old_processor_class is not None:
processor_class = get_user_field(
"What will be the name of the processor class for this model? ",
default_value=f"{model_camel_cased}Processor",
)
else:
processor_class = None
model_patterns = ModelPatterns(
model_name,
checkpoint,
model_type=model_type,
model_lower_cased=model_lower_cased,
model_camel_cased=model_camel_cased,
model_upper_cased=model_upper_cased,
config_class=config_class,
tokenizer_class=tokenizer_class,
feature_extractor_class=feature_extractor_class,
processor_class=processor_class,
)
add_copied_from = get_user_field(
"Should we add # Copied from statements when creating the new modeling file (yes/no)? ",
convert_to=convert_to_bool,
default_value="yes",
fallback_message="Please answer yes/no, y/n, true/false or 1/0.",
)
all_frameworks = get_user_field(
"Should we add a version of your new model in all the frameworks implemented by"
f" {old_model_type} ({old_frameworks}) (yes/no)? ",
convert_to=convert_to_bool,
default_value="yes",
fallback_message="Please answer yes/no, y/n, true/false or 1/0.",
)
if all_frameworks:
frameworks = None
else:
frameworks = get_user_field(
"Please enter the list of framworks you want (pt, tf, flax) separated by spaces",
is_valid_answer=lambda x: all(p in ["pt", "tf", "flax"] for p in x.split(" ")),
)
frameworks = list(set(frameworks.split(" ")))
return (old_model_type, model_patterns, add_copied_from, frameworks, old_checkpoint)
|
def get_user_input():
"""
Ask the user for the necessary inputs to add the new model.
"""
model_types = list(auto_module.configuration_auto.MODEL_NAMES_MAPPING.keys())
# Get old model type
valid_model_type = False
while not valid_model_type:
old_model_type = input(
"What is the model you would like to duplicate? Please provide the lowercase `model_type` (e.g. roberta):"
)
if old_model_type in model_types:
valid_model_type = True
else:
print(f"{old_model_type} is not a valid model type.")
near_choices = difflib.get_close_matches(old_model_type, model_types)
if len(near_choices) >= 1:
if len(near_choices) > 1:
near_choices = " or ".join(near_choices)
print(f"Did you mean {near_choices}?")
old_model_info = retrieve_info_for_model(old_model_type)
old_tokenizer_class = old_model_info["model_patterns"].tokenizer_class
old_feature_extractor_class = old_model_info["model_patterns"].feature_extractor_class
old_processor_class = old_model_info["model_patterns"].processor_class
old_frameworks = old_model_info["frameworks"]
old_checkpoint = None
if len(old_model_info["model_patterns"].checkpoint) == 0:
old_checkpoint = get_user_field(
"We couldn't find the name of the base checkpoint for that model, please enter it here."
)
model_name = get_user_field(
"What is the name (with no special casing) for your new model in the paper (e.g. RoBERTa)? "
)
default_patterns = ModelPatterns(model_name, model_name)
model_type = get_user_field(
"What identifier would you like to use for the `model_type` of this model? ",
default_value=default_patterns.model_type,
)
model_lower_cased = get_user_field(
"What lowercase name would you like to use for the module (folder) of this model? ",
default_value=default_patterns.model_lower_cased,
)
model_camel_cased = get_user_field(
"What prefix (camel-cased) would you like to use for the model classes of this model (e.g. Roberta)? ",
default_value=default_patterns.model_camel_cased,
)
model_upper_cased = get_user_field(
"What prefix (upper-cased) would you like to use for the constants relative to this model? ",
default_value=default_patterns.model_upper_cased,
)
config_class = get_user_field(
"What will be the name of the config class for this model? ", default_value=f"{model_camel_cased}Config"
)
checkpoint = get_user_field(
"Please give a checkpoint identifier (on the model Hub) for this new model (e.g. facebook/roberta-base): "
)
old_processing_classes = [
c for c in [old_feature_extractor_class, old_tokenizer_class, old_processor_class] if c is not None
]
old_processing_classes = ", ".join(old_processing_classes)
keep_processing = get_user_field(
f"Will your new model use the same processing class as {old_model_type} ({old_processing_classes}) (yes/no)? ",
convert_to=convert_to_bool,
fallback_message="Please answer yes/no, y/n, true/false or 1/0. ",
)
if keep_processing:
feature_extractor_class = old_feature_extractor_class
processor_class = old_processor_class
tokenizer_class = old_tokenizer_class
else:
if old_tokenizer_class is not None:
tokenizer_class = get_user_field(
"What will be the name of the tokenizer class for this model? ",
default_value=f"{model_camel_cased}Tokenizer",
)
else:
tokenizer_class = None
if old_feature_extractor_class is not None:
feature_extractor_class = get_user_field(
"What will be the name of the feature extractor class for this model? ",
default_value=f"{model_camel_cased}FeatureExtractor",
)
else:
feature_extractor_class = None
if old_processor_class is not None:
processor_class = get_user_field(
"What will be the name of the processor class for this model? ",
default_value=f"{model_camel_cased}Processor",
)
else:
processor_class = None
model_patterns = ModelPatterns(
model_name,
checkpoint,
model_type=model_type,
model_lower_cased=model_lower_cased,
model_camel_cased=model_camel_cased,
model_upper_cased=model_upper_cased,
config_class=config_class,
tokenizer_class=tokenizer_class,
feature_extractor_class=feature_extractor_class,
processor_class=processor_class,
)
add_copied_from = get_user_field(
"Should we add # Copied from statements when creating the new modeling file (yes/no)? ",
convert_to=convert_to_bool,
default_value="yes",
fallback_message="Please answer yes/no, y/n, true/false or 1/0.",
)
all_frameworks = get_user_field(
"Should we add a version of your new model in all the frameworks implemented by"
f" {old_model_type} ({old_frameworks}) (yes/no)? ",
convert_to=convert_to_bool,
default_value="yes",
fallback_message="Please answer yes/no, y/n, true/false or 1/0.",
)
if all_frameworks:
frameworks = None
else:
frameworks = get_user_field(
"Please enter the list of framworks you want (pt, tf, flax) separated by spaces",
is_valid_answer=lambda x: all(p in ["pt", "tf", "flax"] for p in x.split(" ")),
)
frameworks = list(set(frameworks.split(" ")))
return (old_model_type, model_patterns, add_copied_from, frameworks, old_checkpoint)
|
45,912 |
def histogram_matching(source: torch.Tensor, template: torch.Tensor) -> torch.Tensor:
"""Adjust the pixel values of an image to match its histogram towards a target image.
`Histogram matching <https://en.wikipedia.org/wiki/Histogram_matching>`_ is the transformation
of an image so that its histogram matches a specified histogram. In this implementation, the
histogram is computed over the flattened image array. Code refered to
`here <https://stackoverflow.com/questions/32655686/histogram-matching-of-two-images-in-python-2-x>`_.
Args:
source: Image to transform.
template: Template image. It can have different dimensions to source.
Returns:
matched: The transformed output image.
"""
oldshape = source.shape
source = source.ravel()
template = template.ravel()
# get the set of unique pixel values and their corresponding indices and counts.
s_values, bin_idx, s_counts = torch.unique(source, return_inverse=True, return_counts=True)
t_values, t_counts = torch.unique(template, return_counts=True)
# take the cumsum of the counts and normalize by the number of pixels to
# get the empirical cumulative distribution functions for the source and
# template images (maps pixel value --> quantile)
s_quantiles = torch.cumsum(s_counts, dim=0, dtype=source.dtype)
s_quantiles = s_quantiles / s_quantiles[-1]
t_quantiles = torch.cumsum(t_counts, dim=0, dtype=source.dtype)
t_quantiles = t_quantiles / t_quantiles[-1]
# interpolate linearly to find the pixel values in the template image
# that correspond most closely to the quantiles in the source image
interp_t_values = interp(s_quantiles, t_quantiles, t_values)
return interp_t_values[bin_idx].reshape(oldshape)
|
def histogram_matching(source: torch.Tensor, template: torch.Tensor) -> torch.Tensor:
"""Adjust the pixel values of an image to match its histogram towards a target image.
`Histogram matching <https://en.wikipedia.org/wiki/Histogram_matching>`_ is the transformation
of an image so that its histogram matches a specified histogram. In this implementation, the
histogram is computed over the flattened image array. Code referred to
`here <https://stackoverflow.com/questions/32655686/histogram-matching-of-two-images-in-python-2-x>`_.
Args:
source: Image to transform.
template: Template image. It can have different dimensions to source.
Returns:
matched: The transformed output image.
"""
oldshape = source.shape
source = source.ravel()
template = template.ravel()
# get the set of unique pixel values and their corresponding indices and counts.
s_values, bin_idx, s_counts = torch.unique(source, return_inverse=True, return_counts=True)
t_values, t_counts = torch.unique(template, return_counts=True)
# take the cumsum of the counts and normalize by the number of pixels to
# get the empirical cumulative distribution functions for the source and
# template images (maps pixel value --> quantile)
s_quantiles = torch.cumsum(s_counts, dim=0, dtype=source.dtype)
s_quantiles = s_quantiles / s_quantiles[-1]
t_quantiles = torch.cumsum(t_counts, dim=0, dtype=source.dtype)
t_quantiles = t_quantiles / t_quantiles[-1]
# interpolate linearly to find the pixel values in the template image
# that correspond most closely to the quantiles in the source image
interp_t_values = interp(s_quantiles, t_quantiles, t_values)
return interp_t_values[bin_idx].reshape(oldshape)
|
33,288 |
def _get_global_transform(tl):
# since there's no global scale in otio, use the first source with
# bounds as the global bounds
def find_display_bounds(tl):
for clip in tl.clip_if():
if hasattr(clip, "media_reference") and clip.media_reference:
if hasattr(clip.media_reference, "available_image_bounds"):
bounds = clip.media_reference.available_image_bounds
if bounds:
return bounds
return None
bounds = find_display_bounds(tl)
if not bounds:
return {}
translate = bounds.center()
scale = bounds.max - bounds.min
# RV's global coordinate system has a width and height of 1 where the
# width will be scaled to the image aspect ratio. So scale globally by
# height. The source width will later be scaled to aspect ratio.
global_scale = otio.schema.V2d(1.0 / scale.y, 1.0 / scale.y)
return {
'global_scale': global_scale,
'global_translate': translate * global_scale,
}
|
def _get_global_transform(tl):
# since there's no global scale in otio, use the first source with
# bounds as the global bounds
def find_display_bounds(tl):
for clip in tl.clip_if():
if hasattr(clip, "media_reference") and clip.media_reference:
if hasattr(clip.media_reference, "available_image_bounds"):
bounds = clip.media_reference.available_image_bounds
if bounds:
return bounds
return None
bounds = find_display_bounds(tl)
if bounds is None:
return {}
translate = bounds.center()
scale = bounds.max - bounds.min
# RV's global coordinate system has a width and height of 1 where the
# width will be scaled to the image aspect ratio. So scale globally by
# height. The source width will later be scaled to aspect ratio.
global_scale = otio.schema.V2d(1.0 / scale.y, 1.0 / scale.y)
return {
'global_scale': global_scale,
'global_translate': translate * global_scale,
}
|
31,641 |
def main():
install_logging('Prepare_Content_Packs_For_Testing.log')
option = option_handler()
packs_artifacts_path = option.artifacts_path
extract_destination_path = option.extract_path
storage_bucket_name = option.bucket_name
service_account = option.service_account
target_packs = option.pack_names if option.pack_names else ""
build_number = option.ci_build_number if option.ci_build_number else str(uuid.uuid4())
override_all_packs = option.override_all_packs
signature_key = option.key_string
packs_dependencies_mapping = load_json(option.pack_dependencies) if option.pack_dependencies else {}
storage_base_path = option.storage_base_path
remove_test_playbooks = option.remove_test_playbooks
is_bucket_upload_flow = option.bucket_upload
private_bucket_name = option.private_bucket_name
ci_branch = option.ci_branch
force_upload = option.force_upload
# google cloud storage client initialized
storage_client = init_storage_client(service_account)
storage_bucket = storage_client.bucket(storage_bucket_name)
if storage_base_path:
GCPConfig.STORAGE_BASE_PATH = storage_base_path
# Relevant when triggering test upload flow
if storage_bucket_name:
GCPConfig.PRODUCTION_BUCKET = storage_bucket_name
# download and extract index from public bucket
index_folder_path, index_blob, index_generation = download_and_extract_index(storage_bucket,
extract_destination_path)
# content repo client initialized
content_repo = get_content_git_client(CONTENT_ROOT_PATH)
current_commit_hash, previous_commit_hash = get_recent_commits_data(content_repo, index_folder_path,
is_bucket_upload_flow, ci_branch)
# detect packs to upload
pack_names = get_packs_names(target_packs, previous_commit_hash)
extract_packs_artifacts(packs_artifacts_path, extract_destination_path)
packs_list = [Pack(pack_name, os.path.join(extract_destination_path, pack_name)) for pack_name in pack_names
if os.path.exists(os.path.join(extract_destination_path, pack_name))]
diff_files_list = content_repo.commit(current_commit_hash).diff(content_repo.commit(previous_commit_hash))
# taking care of private packs
is_private_content_updated, private_packs, updated_private_packs_ids = handle_private_content(
index_folder_path, private_bucket_name, extract_destination_path, storage_client, pack_names
)
if not option.override_all_packs:
check_if_index_is_updated(index_folder_path, content_repo, current_commit_hash, previous_commit_hash,
storage_bucket, is_private_content_updated)
# initiate the statistics handler for marketplace packs
statistics_handler = StatisticsHandler(service_account, index_folder_path)
# clean index and gcs from non existing or invalid packs
clean_non_existing_packs(index_folder_path, private_packs, storage_bucket)
# Packages that depend on new packs that are not in the previous index.json
packs_missing_dependencies = []
# starting iteration over packs
for pack in packs_list:
task_status, user_metadata = pack.load_user_metadata()
if not task_status:
pack.status = PackStatus.FAILED_LOADING_USER_METADATA.value
pack.cleanup()
continue
task_status = pack.collect_content_items()
if not task_status:
pack.status = PackStatus.FAILED_COLLECT_ITEMS.name
pack.cleanup()
continue
task_status = pack.upload_integration_images(storage_bucket, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_IMAGES_UPLOAD.name
pack.cleanup()
continue
task_status = pack.upload_author_image(storage_bucket, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_AUTHOR_IMAGE_UPLOAD.name
pack.cleanup()
continue
task_status, modified_pack_files_paths, pack_was_modified = pack.detect_modified(
content_repo, index_folder_path, current_commit_hash, previous_commit_hash)
if not task_status:
pack.status = PackStatus.FAILED_DETECTING_MODIFIED_FILES.name
pack.cleanup()
continue
task_status = pack.format_metadata(user_metadata, index_folder_path, packs_dependencies_mapping, build_number,
current_commit_hash, pack_was_modified, statistics_handler, pack_names)
if pack.is_missing_dependencies:
# If the pack is dependent on a new pack (which is not yet in the index.json)
# we will note that it is missing dependencies.
# And finally after updating all the packages in index.json.
# We will go over the pack again to add what was missing
packs_missing_dependencies.append(pack)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_PARSING.name
pack.cleanup()
continue
task_status, not_updated_build = pack.prepare_release_notes(index_folder_path, build_number, pack_was_modified,
modified_pack_files_paths)
if not task_status:
pack.status = PackStatus.FAILED_RELEASE_NOTES.name
pack.cleanup()
continue
if not_updated_build:
pack.status = PackStatus.PACK_IS_NOT_UPDATED_IN_RUNNING_BUILD.name
pack.cleanup()
continue
task_status = pack.remove_unwanted_files(remove_test_playbooks)
if not task_status:
pack.status = PackStatus.FAILED_REMOVING_PACK_SKIPPED_FOLDERS
pack.cleanup()
continue
task_status = pack.sign_pack(signature_key)
if not task_status:
pack.status = PackStatus.FAILED_SIGNING_PACKS.name
pack.cleanup()
continue
task_status, zip_pack_path = pack.zip_pack()
if not task_status:
pack.status = PackStatus.FAILED_ZIPPING_PACK_ARTIFACTS.name
pack.cleanup()
continue
task_status, skipped_upload, _ = pack.upload_to_storage(zip_pack_path, pack.latest_version, storage_bucket,
override_all_packs or pack_was_modified)
if not task_status:
pack.status = PackStatus.FAILED_UPLOADING_PACK.name
pack.cleanup()
continue
task_status, exists_in_index = pack.check_if_exists_in_index(index_folder_path)
if not task_status:
pack.status = PackStatus.FAILED_SEARCHING_PACK_IN_INDEX.name
pack.cleanup()
continue
task_status = pack.prepare_for_index_upload()
if not task_status:
pack.status = PackStatus.FAILED_PREPARING_INDEX_FOLDER.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
# in case that pack already exist at cloud storage path and in index, don't show that the pack was changed
if skipped_upload and exists_in_index:
logging.info(f"{pack.name} pack status is {PackStatus.PACK_ALREADY_EXISTS.name}")
pack.status = PackStatus.PACK_ALREADY_EXISTS.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
logging.info(f"packs_missing_dependencies: {packs_missing_dependencies}")
# will go over all the packs what was marked as missing dependencies and will update them with the new index.json
for pack in packs_missing_dependencies:
task_status = pack.format_metadata(user_metadata, index_folder_path, packs_dependencies_mapping, build_number,
current_commit_hash, pack_was_modified, statistics_handler, pack_names, True)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_PARSING.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
# upload core packs json to bucket
create_corepacks_config(storage_bucket, build_number, index_folder_path,
artifacts_dir=os.path.dirname(packs_artifacts_path))
# finished iteration over content packs
upload_index_to_storage(index_folder_path=index_folder_path, extract_destination_path=extract_destination_path,
index_blob=index_blob, build_number=build_number, private_packs=private_packs,
current_commit_hash=current_commit_hash, index_generation=index_generation,
force_upload=force_upload, previous_commit_hash=previous_commit_hash,
landing_page_sections=statistics_handler.landing_page_sections,
artifacts_dir=os.path.dirname(packs_artifacts_path),
storage_bucket=storage_bucket,
)
# get the lists of packs divided by their status
successful_packs, skipped_packs, failed_packs = get_packs_summary(packs_list)
# Store successful and failed packs list in CircleCI artifacts - to be used in Upload Packs To Marketplace job
packs_results_file_path = os.path.join(os.path.dirname(packs_artifacts_path), BucketUploadFlow.PACKS_RESULTS_FILE)
store_successful_and_failed_packs_in_ci_artifacts(
packs_results_file_path, BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING, successful_packs, failed_packs,
updated_private_packs_ids, images_data=get_images_data(packs_list)
)
# summary of packs status
print_packs_summary(successful_packs, skipped_packs, failed_packs, not is_bucket_upload_flow)
|
def main():
install_logging('Prepare_Content_Packs_For_Testing.log')
option = option_handler()
packs_artifacts_path = option.artifacts_path
extract_destination_path = option.extract_path
storage_bucket_name = option.bucket_name
service_account = option.service_account
target_packs = option.pack_names if option.pack_names else ""
build_number = option.ci_build_number if option.ci_build_number else str(uuid.uuid4())
override_all_packs = option.override_all_packs
signature_key = option.key_string
packs_dependencies_mapping = load_json(option.pack_dependencies) if option.pack_dependencies else {}
storage_base_path = option.storage_base_path
remove_test_playbooks = option.remove_test_playbooks
is_bucket_upload_flow = option.bucket_upload
private_bucket_name = option.private_bucket_name
ci_branch = option.ci_branch
force_upload = option.force_upload
# google cloud storage client initialized
storage_client = init_storage_client(service_account)
storage_bucket = storage_client.bucket(storage_bucket_name)
if storage_base_path:
GCPConfig.STORAGE_BASE_PATH = storage_base_path
# Relevant when triggering test upload flow
if storage_bucket_name:
GCPConfig.PRODUCTION_BUCKET = storage_bucket_name
# download and extract index from public bucket
index_folder_path, index_blob, index_generation = download_and_extract_index(storage_bucket,
extract_destination_path)
# content repo client initialized
content_repo = get_content_git_client(CONTENT_ROOT_PATH)
current_commit_hash, previous_commit_hash = get_recent_commits_data(content_repo, index_folder_path,
is_bucket_upload_flow, ci_branch)
# detect packs to upload
pack_names = get_packs_names(target_packs, previous_commit_hash)
extract_packs_artifacts(packs_artifacts_path, extract_destination_path)
packs_list = [Pack(pack_name, os.path.join(extract_destination_path, pack_name)) for pack_name in pack_names
if os.path.exists(os.path.join(extract_destination_path, pack_name))]
diff_files_list = content_repo.commit(current_commit_hash).diff(content_repo.commit(previous_commit_hash))
# taking care of private packs
is_private_content_updated, private_packs, updated_private_packs_ids = handle_private_content(
index_folder_path, private_bucket_name, extract_destination_path, storage_client, pack_names
)
if not option.override_all_packs:
check_if_index_is_updated(index_folder_path, content_repo, current_commit_hash, previous_commit_hash,
storage_bucket, is_private_content_updated)
# initiate the statistics handler for marketplace packs
statistics_handler = StatisticsHandler(service_account, index_folder_path)
# clean index and gcs from non existing or invalid packs
clean_non_existing_packs(index_folder_path, private_packs, storage_bucket)
# Packages that depend on new packs that are not in the previous index.json
packs_missing_dependencies = []
# starting iteration over packs
for pack in packs_list:
task_status, user_metadata = pack.load_user_metadata()
if not task_status:
pack.status = PackStatus.FAILED_LOADING_USER_METADATA.value
pack.cleanup()
continue
task_status = pack.collect_content_items()
if not task_status:
pack.status = PackStatus.FAILED_COLLECT_ITEMS.name
pack.cleanup()
continue
task_status = pack.upload_integration_images(storage_bucket, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_IMAGES_UPLOAD.name
pack.cleanup()
continue
task_status = pack.upload_author_image(storage_bucket, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_AUTHOR_IMAGE_UPLOAD.name
pack.cleanup()
continue
task_status, modified_pack_files_paths, pack_was_modified = pack.detect_modified(
content_repo, index_folder_path, current_commit_hash, previous_commit_hash)
if not task_status:
pack.status = PackStatus.FAILED_DETECTING_MODIFIED_FILES.name
pack.cleanup()
continue
task_status = pack.format_metadata(user_metadata, index_folder_path, packs_dependencies_mapping, build_number,
current_commit_hash, pack_was_modified, statistics_handler, pack_names)
if pack.is_missing_dependencies:
# If the pack is dependent on a new pack (which is not yet in the index.json)
# we will note that it is missing dependencies.
# And finally after updating all the packages in index.json.
# We will go over the pack again to add what was missing
packs_missing_dependencies.append(pack)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_PARSING.name
pack.cleanup()
continue
task_status, not_updated_build = pack.prepare_release_notes(index_folder_path, build_number, pack_was_modified,
modified_pack_files_paths)
if not task_status:
pack.status = PackStatus.FAILED_RELEASE_NOTES.name
pack.cleanup()
continue
if not_updated_build:
pack.status = PackStatus.PACK_IS_NOT_UPDATED_IN_RUNNING_BUILD.name
pack.cleanup()
continue
task_status = pack.remove_unwanted_files(remove_test_playbooks)
if not task_status:
pack.status = PackStatus.FAILED_REMOVING_PACK_SKIPPED_FOLDERS
pack.cleanup()
continue
task_status = pack.sign_pack(signature_key)
if not task_status:
pack.status = PackStatus.FAILED_SIGNING_PACKS.name
pack.cleanup()
continue
task_status, zip_pack_path = pack.zip_pack()
if not task_status:
pack.status = PackStatus.FAILED_ZIPPING_PACK_ARTIFACTS.name
pack.cleanup()
continue
task_status, skipped_upload, _ = pack.upload_to_storage(zip_pack_path, pack.latest_version, storage_bucket,
override_all_packs or pack_was_modified)
if not task_status:
pack.status = PackStatus.FAILED_UPLOADING_PACK.name
pack.cleanup()
continue
task_status, exists_in_index = pack.check_if_exists_in_index(index_folder_path)
if not task_status:
pack.status = PackStatus.FAILED_SEARCHING_PACK_IN_INDEX.name
pack.cleanup()
continue
task_status = pack.prepare_for_index_upload()
if not task_status:
pack.status = PackStatus.FAILED_PREPARING_INDEX_FOLDER.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
# in case that pack already exist at cloud storage path and in index, don't show that the pack was changed
if skipped_upload and exists_in_index:
logging.info(f"{pack.name} pack status is {PackStatus.PACK_ALREADY_EXISTS.name}")
pack.status = PackStatus.PACK_ALREADY_EXISTS.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
logging.info(f"packs_missing_dependencies: {packs_missing_dependencies}")
# Going over all packs that were marked as missing dependencies, updating them with the new data for the new packs that were added to the index.zip
for pack in packs_missing_dependencies:
task_status = pack.format_metadata(user_metadata, index_folder_path, packs_dependencies_mapping, build_number,
current_commit_hash, pack_was_modified, statistics_handler, pack_names, True)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_PARSING.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
# upload core packs json to bucket
create_corepacks_config(storage_bucket, build_number, index_folder_path,
artifacts_dir=os.path.dirname(packs_artifacts_path))
# finished iteration over content packs
upload_index_to_storage(index_folder_path=index_folder_path, extract_destination_path=extract_destination_path,
index_blob=index_blob, build_number=build_number, private_packs=private_packs,
current_commit_hash=current_commit_hash, index_generation=index_generation,
force_upload=force_upload, previous_commit_hash=previous_commit_hash,
landing_page_sections=statistics_handler.landing_page_sections,
artifacts_dir=os.path.dirname(packs_artifacts_path),
storage_bucket=storage_bucket,
)
# get the lists of packs divided by their status
successful_packs, skipped_packs, failed_packs = get_packs_summary(packs_list)
# Store successful and failed packs list in CircleCI artifacts - to be used in Upload Packs To Marketplace job
packs_results_file_path = os.path.join(os.path.dirname(packs_artifacts_path), BucketUploadFlow.PACKS_RESULTS_FILE)
store_successful_and_failed_packs_in_ci_artifacts(
packs_results_file_path, BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING, successful_packs, failed_packs,
updated_private_packs_ids, images_data=get_images_data(packs_list)
)
# summary of packs status
print_packs_summary(successful_packs, skipped_packs, failed_packs, not is_bucket_upload_flow)
|
41,351 |
def linear_expand(expr):
if not isinstance(expr, Expr):
raise TypeError(expr + 'is not a SymPy Expr')
expr = expand(expr)
if expr == 0:
coefs = [expr]
bases = [S(1)]
return (coefs, bases)
if isinstance(expr, Add):
args = expr.args
else:
if expr.is_commutative:
return ([expr], [S(1)])
else:
args = [expr]
coefs = []
bases = []
for term in args:
if term.is_commutative:
if S(1) in bases:
coefs[bases.index(S(1))] += term
else:
bases.append(S(1))
coefs.append(term)
else:
c, nc = term.args_cnc()
base = nc[0]
coef = Mul._from_args(c)
if base in bases:
coefs[bases.index(base)] += coef
else:
bases.append(base)
coefs.append(coef)
return (coefs, bases)
|
def linear_expand(expr):
if not isinstance(expr, Expr):
raise TypeError('{!r} is not a SymPy Expr'.format(expr))
expr = expand(expr)
if expr == 0:
coefs = [expr]
bases = [S(1)]
return (coefs, bases)
if isinstance(expr, Add):
args = expr.args
else:
if expr.is_commutative:
return ([expr], [S(1)])
else:
args = [expr]
coefs = []
bases = []
for term in args:
if term.is_commutative:
if S(1) in bases:
coefs[bases.index(S(1))] += term
else:
bases.append(S(1))
coefs.append(term)
else:
c, nc = term.args_cnc()
base = nc[0]
coef = Mul._from_args(c)
if base in bases:
coefs[bases.index(base)] += coef
else:
bases.append(base)
coefs.append(coef)
return (coefs, bases)
|
5,910 |
def test_prompt_for_authentication(script, data, cert_factory):
"""Test behaviour while installing from a index url
requiring authentication
"""
cert_path = cert_factory()
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.load_cert_chain(cert_path, cert_path)
ctx.load_verify_locations(cafile=cert_path)
ctx.verify_mode = ssl.CERT_REQUIRED
server = make_mock_server(ssl_context=ctx)
server.mock.side_effect = [
package_page({
"simple-3.0.tar.gz": "/files/simple-3.0.tar.gz",
}),
authorization_response(str(data.packages / "simple-3.0.tar.gz")),
]
url = "https://{}:{}/simple".format(server.host, server.port)
with server_running(server):
result = script.pip('install', "--index-url", url,
"--cert", cert_path, "--client-cert", cert_path,
'simple', expect_error=True)
print(result)
assert 'User for {}:{}'.format(server.host, server.port) in result.stdout
|
def test_prompt_for_authentication(script, data, cert_factory):
"""Test behaviour while installing from a index url
requiring authentication
"""
cert_path = cert_factory()
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.load_cert_chain(cert_path, cert_path)
ctx.load_verify_locations(cafile=cert_path)
ctx.verify_mode = ssl.CERT_REQUIRED
server = make_mock_server(ssl_context=ctx)
server.mock.side_effect = [
package_page({
"simple-3.0.tar.gz": "/files/simple-3.0.tar.gz",
}),
authorization_response(str(data.packages / "simple-3.0.tar.gz")),
]
url = "https://{}:{}/simple".format(server.host, server.port)
with server_running(server):
result = script.pip('install', "--index-url", url,
"--cert", cert_path, "--client-cert", cert_path,
'simple', expect_error=True)
assert 'User for {}:{}'.format(server.host, server.port) in result.stdout, str(result)
|
7,896 |
def _vectfit_xs(energy, ce_xs, mts, rtol=1e-3, atol=1e-5, orders=None,
n_vf_iter=30, log=False, path_out=None, **kwargs):
r"""Convert point-wise cross section to multipole data via Vector Fitting.
Parameters
----------
energy : np.ndarray
Energy array
ce_xs : np.ndarray
Point-wise cross sections to be fitted
mts : Iterable of int
Reaction list
rtol : float, optional
Relative error tolerance
atol : float, optional
Absolute error tolerance
orders : Iterable of int, optional
A list of orders (number of poles) to be searched
n_vf_iter : int, optional
Number of maximum VF iterations
log : bool or int, optional
Whether to print running logs
path_out : str, optional
Path to save the figures
**kwargs
Additional keyword arguments
Returns
-------
Tuple
(poles, residues)
"""
# import vectfit package: https://github.com/liangjg/vectfit
import vectfit as vf
ne = energy.size
nmt = len(mts)
if ce_xs.shape != (nmt, ne):
raise ValueError('Inconsistent cross section data.')
# construct test data: interpolate xs with finer grids
N_FINER = 10
ne_test = (ne-1)*N_FINER + 1
test_energy = np.interp(np.arange(ne_test),
np.arange(ne_test, step=N_FINER), energy)
test_energy[[0, -1]] = energy[[0, -1]] # avoid numerical issue
test_xs_ref = np.zeros((nmt, ne_test))
for i in range(nmt):
test_xs_ref[i] = np.interp(test_energy, energy, ce_xs[i])
if log:
print("Energy: {:.3e} to {:.3e} eV ({} points)".format(
energy[0], energy[-1], ne))
# inputs
f = ce_xs * energy # sigma*E
s = np.sqrt(energy) # sqrt(E)
test_s = np.sqrt(test_energy)
weight = 1.0/f
# very small cross sections can lead to huge weights, which will harm the
# fitting accuracy
MIN_CROSS_SECTION = 1e-7
for i in range(nmt):
if np.all(ce_xs[i]<=MIN_CROSS_SECTION):
weight[i] = 1.0
elif np.any(ce_xs[i]<=MIN_CROSS_SECTION):
weight[i, ce_xs[i]<=MIN_CROSS_SECTION] = \
max(weight[i, ce_xs[i]>MIN_CROSS_SECTION])
# detect peaks (resonances) and determine VF order search range
peaks, _ = find_peaks(ce_xs[0]+ce_xs[1])
n_peaks = peaks.size
if orders is not None:
# make sure orders are even integers
orders = list(set([int(i/2)*2 for i in orders if i>=2]))
else:
lowest_order = max(2, 2*n_peaks)
highest_order = max(200, 4*n_peaks)
orders = list(range(lowest_order, highest_order+1, 2))
if log:
print("Found {} peaks".format(n_peaks))
print("Fitting orders from {} to {}".format(orders[0], orders[-1]))
# perform VF with increasing orders
found_ideal = False
n_discarded = 0 # for accelation, number of discarded searches
best_quality = best_ratio = -np.inf
for i, order in enumerate(orders):
if log:
print("Order={}({}/{})".format(order, i, len(orders)))
# initial guessed poles
poles = np.linspace(s[0], s[-1], order//2)
poles = poles + poles*0.01j
poles = np.sort(np.append(poles, np.conj(poles)))
found_better = False
# fitting iteration
for i_vf in range(n_vf_iter):
if log >= DETAILED_LOGGING:
print("VF iteration {}/{}".format(i_vf+1, n_vf_iter))
# call vf
try:
poles, residues, cf, f_fit, rms = vf.vectfit(f, s, poles, weight)
except:
break
# convert real pole to conjugate pairs
n_real_poles = 0
new_poles = []
for p in poles:
p_r, p_i = np.real(p), np.imag(p)
if (s[0] <= p_r <= s[-1]) and p_i == 0.:
new_poles += [p_r+p_r*0.01j, p_r-p_r*0.01j]
n_real_poles += 1
else:
new_poles += [p]
new_poles = np.array(new_poles)
# re-calculate residues if poles changed
if n_real_poles > 0:
if log >= DETAILED_LOGGING:
print(" # real poles: {}".format(n_real_poles))
new_poles, residues, cf, f_fit, rms = \
vf.vectfit(f, s, new_poles, weight, skip_pole=True)
# assess the result on test grid
test_xs = vf.evaluate(test_s, new_poles, residues) / test_energy
abserr = np.abs(test_xs - test_xs_ref)
relerr = abserr / test_xs_ref
if np.any(np.isnan(abserr)):
maxre, ratio, ratio2 = np.inf, -np.inf, -np.inf
elif np.all(abserr <= atol):
maxre, ratio, ratio2 = 0., 1., 1.
else:
maxre = np.max(relerr[abserr > atol])
ratio = np.sum((relerr<rtol) | (abserr<atol)) / relerr.size
ratio2 = np.sum((relerr<10*rtol) | (abserr<atol)) / relerr.size
quality = ratio + ratio2 - min(0.1*maxre, 1) - 0.001*new_poles.size
if np.any(test_xs < -atol):
quality = -np.inf
if log >= DETAILED_LOGGING:
print(" # poles: {}".format(new_poles.size))
print(" Max relative error: {:.3f}%".format(maxre*100))
print(" Satisfaction: {:.1f}%, {:.1f}%".format(ratio*100, ratio2*100))
print(" Quality: {:.2f}".format(quality))
if quality > best_quality:
if log >= DETAILED_LOGGING:
print(" Best by far!")
found_better = True
best_quality, best_ratio = quality, ratio
best_poles, best_residues = new_poles, residues
best_test_xs, best_relerr = test_xs, relerr
if best_ratio >= 1.0:
if log:
print("Found ideal results. Stop!")
found_ideal = True
break
else:
if log >= DETAILED_LOGGING:
print(" Discarded!")
if found_ideal:
break
# acceleration
if found_better:
n_discarded = 0
else:
if order > max(2*n_peaks, 50) and best_ratio > 0.7:
n_discarded += 1
if n_discarded >= 10 or (n_discarded >= 5 and best_ratio > 0.9):
if log >= DETAILED_LOGGING:
print("Couldn't get better results. Stop!")
break
# merge conjugate poles
real_idx = []
conj_idx = []
found_conj = False
for i, p in enumerate(best_poles):
if found_conj:
found_conj = False
continue
if np.imag(p) == 0.:
real_idx.append(i)
else:
if i < best_poles.size and np.conj(p) == best_poles[i+1]:
found_conj = True
conj_idx.append(i)
else:
raise RuntimeError("Complex poles are not conjugate!")
if log:
print("Found {} real poles and {} conjugate complex pairs.".format(
len(real_idx), len(conj_idx)))
mp_poles = best_poles[real_idx+conj_idx]
mp_residues = np.concatenate((best_residues[:, real_idx],
best_residues[:, conj_idx]*2), axis=1)/1j
if log:
print("Final number of poles: {}".format(mp_poles.size))
if path_out:
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
if not os.path.exists(path_out):
os.makedirs(path_out)
for i, mt in enumerate(mts):
fig, ax1 = plt.subplots()
lns1 = ax1.loglog(test_energy, test_xs_ref[i], 'g', label="ACE xs")
lns2 = ax1.loglog(test_energy, best_test_xs[i], 'b', label="VF xs")
ax2 = ax1.twinx()
lns3 = ax2.loglog(test_energy, best_relerr[i], 'r',
label="Relative error", alpha=0.5)
lns = lns1 + lns2 + lns3
labels = [l.get_label() for l in lns]
ax1.legend(lns, labels, loc='best')
ax1.set_xlabel('energy (eV)')
ax1.set_ylabel('cross section (b)', color='b')
ax1.tick_params('y', colors='b')
ax2.set_ylabel('relative error', color='r')
ax2.tick_params('y', colors='r')
plt.title("MT {} vectfitted with {} poles".format(mt, mp_poles.size))
fig.tight_layout()
fig_file = os.path.join(path_out, "{:.0f}-{:.0f}_MT{}.png".format(
energy[0], energy[-1], mt))
plt.savefig(fig_file)
plt.close()
if log:
print("Saved figure: {}".format(fig_file))
return (mp_poles, mp_residues)
|
def _vectfit_xs(energy, ce_xs, mts, rtol=1e-3, atol=1e-5, orders=None,
n_vf_iter=30, log=False, path_out=None, **kwargs):
r"""Convert point-wise cross section to multipole data via Vector Fitting.
Parameters
----------
energy : np.ndarray
Energy array
ce_xs : np.ndarray
Point-wise cross sections to be fitted
mts : Iterable of int
Reaction list
rtol : float, optional
Relative error tolerance
atol : float, optional
Absolute error tolerance
orders : Iterable of int, optional
A list of orders (number of poles) to be searched
n_vf_iter : int, optional
Number of maximum VF iterations
log : bool or int, optional
Whether to print running logs
path_out : str, optional
Path to save the figures
**kwargs
Additional keyword arguments
Returns
-------
Tuple
(poles, residues)
"""
# import vectfit package: https://github.com/liangjg/vectfit
import vectfit as vf
ne = energy.size
nmt = len(mts)
if ce_xs.shape != (nmt, ne):
raise ValueError('Inconsistent cross section data.')
# construct test data: interpolate xs with finer grids
N_FINER = 10
ne_test = (ne-1)*N_FINER + 1
test_energy = np.interp(np.arange(ne_test),
np.arange(ne_test, step=N_FINER), energy)
test_energy[[0, -1]] = energy[[0, -1]] # avoid numerical issue
test_xs_ref = np.zeros((nmt, ne_test))
for i in range(nmt):
test_xs_ref[i] = np.interp(test_energy, energy, ce_xs[i])
if log:
print("Energy: {:.3e} to {:.3e} eV ({} points)".format(
energy[0], energy[-1], ne))
# inputs
f = ce_xs * energy # sigma*E
s = np.sqrt(energy) # sqrt(E)
test_s = np.sqrt(test_energy)
weight = 1.0/f
# very small cross sections can lead to huge weights, which will harm the
# fitting accuracy
MIN_CROSS_SECTION = 1e-7
for i in range(nmt):
if np.all(ce_xs[i]<=MIN_CROSS_SECTION):
weight[i] = 1.0
elif np.any(ce_xs[i]<=MIN_CROSS_SECTION):
weight[i, ce_xs[i]<=MIN_CROSS_SECTION] = \
max(weight[i, ce_xs[i]>MIN_CROSS_SECTION])
# detect peaks (resonances) and determine VF order search range
peaks, _ = find_peaks(ce_xs[0]+ce_xs[1])
n_peaks = peaks.size
if orders is not None:
# make sure orders are even integers
orders = list(set([int(i/2)*2 for i in orders if i>=2]))
else:
lowest_order = max(2, 2*n_peaks)
highest_order = max(200, 4*n_peaks)
orders = list(range(lowest_order, highest_order + 1, 2))
if log:
print("Found {} peaks".format(n_peaks))
print("Fitting orders from {} to {}".format(orders[0], orders[-1]))
# perform VF with increasing orders
found_ideal = False
n_discarded = 0 # for accelation, number of discarded searches
best_quality = best_ratio = -np.inf
for i, order in enumerate(orders):
if log:
print("Order={}({}/{})".format(order, i, len(orders)))
# initial guessed poles
poles = np.linspace(s[0], s[-1], order//2)
poles = poles + poles*0.01j
poles = np.sort(np.append(poles, np.conj(poles)))
found_better = False
# fitting iteration
for i_vf in range(n_vf_iter):
if log >= DETAILED_LOGGING:
print("VF iteration {}/{}".format(i_vf+1, n_vf_iter))
# call vf
try:
poles, residues, cf, f_fit, rms = vf.vectfit(f, s, poles, weight)
except:
break
# convert real pole to conjugate pairs
n_real_poles = 0
new_poles = []
for p in poles:
p_r, p_i = np.real(p), np.imag(p)
if (s[0] <= p_r <= s[-1]) and p_i == 0.:
new_poles += [p_r+p_r*0.01j, p_r-p_r*0.01j]
n_real_poles += 1
else:
new_poles += [p]
new_poles = np.array(new_poles)
# re-calculate residues if poles changed
if n_real_poles > 0:
if log >= DETAILED_LOGGING:
print(" # real poles: {}".format(n_real_poles))
new_poles, residues, cf, f_fit, rms = \
vf.vectfit(f, s, new_poles, weight, skip_pole=True)
# assess the result on test grid
test_xs = vf.evaluate(test_s, new_poles, residues) / test_energy
abserr = np.abs(test_xs - test_xs_ref)
relerr = abserr / test_xs_ref
if np.any(np.isnan(abserr)):
maxre, ratio, ratio2 = np.inf, -np.inf, -np.inf
elif np.all(abserr <= atol):
maxre, ratio, ratio2 = 0., 1., 1.
else:
maxre = np.max(relerr[abserr > atol])
ratio = np.sum((relerr<rtol) | (abserr<atol)) / relerr.size
ratio2 = np.sum((relerr<10*rtol) | (abserr<atol)) / relerr.size
quality = ratio + ratio2 - min(0.1*maxre, 1) - 0.001*new_poles.size
if np.any(test_xs < -atol):
quality = -np.inf
if log >= DETAILED_LOGGING:
print(" # poles: {}".format(new_poles.size))
print(" Max relative error: {:.3f}%".format(maxre*100))
print(" Satisfaction: {:.1f}%, {:.1f}%".format(ratio*100, ratio2*100))
print(" Quality: {:.2f}".format(quality))
if quality > best_quality:
if log >= DETAILED_LOGGING:
print(" Best by far!")
found_better = True
best_quality, best_ratio = quality, ratio
best_poles, best_residues = new_poles, residues
best_test_xs, best_relerr = test_xs, relerr
if best_ratio >= 1.0:
if log:
print("Found ideal results. Stop!")
found_ideal = True
break
else:
if log >= DETAILED_LOGGING:
print(" Discarded!")
if found_ideal:
break
# acceleration
if found_better:
n_discarded = 0
else:
if order > max(2*n_peaks, 50) and best_ratio > 0.7:
n_discarded += 1
if n_discarded >= 10 or (n_discarded >= 5 and best_ratio > 0.9):
if log >= DETAILED_LOGGING:
print("Couldn't get better results. Stop!")
break
# merge conjugate poles
real_idx = []
conj_idx = []
found_conj = False
for i, p in enumerate(best_poles):
if found_conj:
found_conj = False
continue
if np.imag(p) == 0.:
real_idx.append(i)
else:
if i < best_poles.size and np.conj(p) == best_poles[i+1]:
found_conj = True
conj_idx.append(i)
else:
raise RuntimeError("Complex poles are not conjugate!")
if log:
print("Found {} real poles and {} conjugate complex pairs.".format(
len(real_idx), len(conj_idx)))
mp_poles = best_poles[real_idx+conj_idx]
mp_residues = np.concatenate((best_residues[:, real_idx],
best_residues[:, conj_idx]*2), axis=1)/1j
if log:
print("Final number of poles: {}".format(mp_poles.size))
if path_out:
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
if not os.path.exists(path_out):
os.makedirs(path_out)
for i, mt in enumerate(mts):
fig, ax1 = plt.subplots()
lns1 = ax1.loglog(test_energy, test_xs_ref[i], 'g', label="ACE xs")
lns2 = ax1.loglog(test_energy, best_test_xs[i], 'b', label="VF xs")
ax2 = ax1.twinx()
lns3 = ax2.loglog(test_energy, best_relerr[i], 'r',
label="Relative error", alpha=0.5)
lns = lns1 + lns2 + lns3
labels = [l.get_label() for l in lns]
ax1.legend(lns, labels, loc='best')
ax1.set_xlabel('energy (eV)')
ax1.set_ylabel('cross section (b)', color='b')
ax1.tick_params('y', colors='b')
ax2.set_ylabel('relative error', color='r')
ax2.tick_params('y', colors='r')
plt.title("MT {} vectfitted with {} poles".format(mt, mp_poles.size))
fig.tight_layout()
fig_file = os.path.join(path_out, "{:.0f}-{:.0f}_MT{}.png".format(
energy[0], energy[-1], mt))
plt.savefig(fig_file)
plt.close()
if log:
print("Saved figure: {}".format(fig_file))
return (mp_poles, mp_residues)
|
9,168 |
def find_git_http_backend():
"""Find Git HTTP back-end."""
if hasattr(find_git_http_backend, "result"):
return find_git_http_backend.result
try:
path = subprocess.run(
["git", "--exec-path"],
universal_newlines=True,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
).stdout.strip()
if path:
GIT_PATHS.insert(0, path)
except OSError:
pass
for path in GIT_PATHS:
names = ("git-http-backend", "git-http-backend.exe")
for name in names:
name = os.path.join(path, "git-http-backend")
if os.path.exists(name):
find_git_http_backend.result = name
return name
return None
|
def find_git_http_backend():
"""Find Git HTTP back-end."""
if hasattr(find_git_http_backend, "result"):
return find_git_http_backend.result
try:
path = subprocess.run(
["git", "--exec-path"],
universal_newlines=True,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
).stdout.strip()
if path:
GIT_PATHS.insert(0, path)
except OSError:
pass
for path in GIT_PATHS:
names = ("git-http-backend", "git-http-backend.exe")
for name in names:
name = os.path.join(path, name)
if os.path.exists(name):
find_git_http_backend.result = name
return name
return None
|
59,914 |
def main():
# Setup command line options
argparser = argparse.ArgumentParser()
output_options = argparser.add_argument_group(
'Options controlling ReFrame output'
)
locate_options = argparser.add_argument_group(
'Options for discovering checks'
)
select_options = argparser.add_argument_group(
'Options for selecting checks'
)
action_options = argparser.add_argument_group(
'Options controlling actions'
)
run_options = argparser.add_argument_group(
'Options controlling the execution of checks'
)
env_options = argparser.add_argument_group(
'Options controlling the ReFrame environment'
)
misc_options = argparser.add_argument_group('Miscellaneous options')
# Output directory options
output_options.add_argument(
'--prefix', action='store', metavar='DIR',
help='Set general directory prefix to DIR',
envvar='RFM_PREFIX', configvar='systems/prefix'
)
output_options.add_argument(
'-o', '--output', action='store', metavar='DIR',
help='Set output directory prefix to DIR',
envvar='RFM_OUTPUT_DIR', configvar='systems/outputdir'
)
output_options.add_argument(
'-s', '--stage', action='store', metavar='DIR',
help='Set stage directory prefix to DIR',
envvar='RFM_STAGE_DIR', configvar='systems/stagedir'
)
output_options.add_argument(
'--timestamp', action='store', nargs='?', const='', metavar='TIMEFMT',
help=('Append a timestamp to the output and stage directory prefixes '
'(default: "%%FT%%T")'),
envvar='RFM_TIMESTAMP_DIRS', configvar='general/timestamp_dirs'
)
output_options.add_argument(
'--perflogdir', action='store', metavar='DIR',
help=('Set performance log data directory prefix '
'(relevant only to the filelog log handler)'),
envvar='RFM_PERFLOG_DIR',
configvar='logging/handlers_perflog/filelog_basedir'
)
output_options.add_argument(
'--keep-stage-files', action='store_true',
help='Keep stage directories even for successful checks',
envvar='RFM_KEEP_STAGE_FILES', configvar='general/keep_stage_files'
)
output_options.add_argument(
'--dont-restage', action='store_false', dest='clean_stagedir',
help='Reuse the test stage directory',
envvar='RFM_CLEAN_STAGEDIR', configvar='general/clean_stagedir'
)
output_options.add_argument(
'--save-log-files', action='store_true', default=False,
help='Save ReFrame log files to the output directory',
envvar='RFM_SAVE_LOG_FILES', configvar='general/save_log_files'
)
output_options.add_argument(
'--report-file', action='store', metavar='FILE',
help="Store JSON run report in FILE",
envvar='RFM_REPORT_FILE',
configvar='general/report_file'
)
# Check discovery options
locate_options.add_argument(
'-c', '--checkpath', action='append', metavar='PATH',
help="Add PATH to the check search path list",
envvar='RFM_CHECK_SEARCH_PATH :', configvar='general/check_search_path'
)
locate_options.add_argument(
'-R', '--recursive', action='store_true',
help='Search for checks in the search path recursively',
envvar='RFM_CHECK_SEARCH_RECURSIVE',
configvar='general/check_search_recursive'
)
locate_options.add_argument(
'--ignore-check-conflicts', action='store_true',
help='Skip checks with conflicting names',
envvar='RFM_IGNORE_CHECK_CONFLICTS',
configvar='general/ignore_check_conflicts'
)
# Select options
select_options.add_argument(
'-t', '--tag', action='append', dest='tags', metavar='PATTERN',
default=[],
help='Select checks with at least one tag matching PATTERN'
)
select_options.add_argument(
'-n', '--name', action='append', dest='names', default=[],
metavar='PATTERN', help='Select checks whose name matches PATTERN'
)
select_options.add_argument(
'-x', '--exclude', action='append', dest='exclude_names',
metavar='PATTERN', default=[],
help='Exclude checks whose name matches PATTERN'
)
select_options.add_argument(
'-p', '--prgenv', action='append', default=[r'.*'], metavar='PATTERN',
help=('Select checks with at least one '
'programming environment matching PATTERN')
)
select_options.add_argument(
'--gpu-only', action='store_true',
help='Select only GPU checks'
)
select_options.add_argument(
'--cpu-only', action='store_true',
help='Select only CPU checks'
)
# Action options
action_options.add_argument(
'-l', '--list', action='store_true',
help='List the selected checks'
)
action_options.add_argument(
'-L', '--list-detailed', action='store_true',
help='List the selected checks providing details for each test'
)
action_options.add_argument(
'-r', '--run', action='store_true',
help='Run the selected checks'
)
# Run options
run_options.add_argument(
'-J', '--job-option', action='append', metavar='OPT',
dest='job_options', default=[],
help='Pass option OPT to job scheduler'
)
run_options.add_argument(
'--force-local', action='store_true',
help='Force local execution of checks'
)
run_options.add_argument(
'--skip-sanity-check', action='store_true',
help='Skip sanity checking'
)
run_options.add_argument(
'--skip-performance-check', action='store_true',
help='Skip performance checking'
)
run_options.add_argument(
'--strict', action='store_true',
help='Enforce strict performance checking'
)
run_options.add_argument(
'--skip-system-check', action='store_true',
help='Skip system check'
)
run_options.add_argument(
'--skip-prgenv-check', action='store_true',
help='Skip programming environment check'
)
run_options.add_argument(
'--exec-policy', metavar='POLICY', action='store',
choices=['async', 'serial'], default='async',
help='Set the execution policy of ReFrame (default: "async")'
)
run_options.add_argument(
'--mode', action='store', help='Execution mode to use'
)
run_options.add_argument(
'--max-retries', metavar='NUM', action='store', default=0,
help='Set the maximum number of times a failed regression test '
'may be retried (default: 0)'
)
run_options.add_argument(
'--flex-alloc-nodes', action='store',
dest='flex_alloc_nodes', metavar='{all|STATE|NUM}', default=None,
help='Set strategy for the flexible node allocation (default: "idle").'
)
run_options.add_argument(
'--disable-hook', action='append', metavar='NAME', dest='hooks',
default=[], help='Disable a pipeline hook for this run'
)
env_options.add_argument(
'-M', '--map-module', action='append', metavar='MAPPING',
dest='module_mappings', default=[],
help='Add a module mapping',
envvar='RFM_MODULE_MAPPINGS ,', configvar='general/module_mappings'
)
env_options.add_argument(
'-m', '--module', action='append', default=[],
metavar='MOD', dest='user_modules',
help='Load module MOD before running any regression check',
envvar='RFM_USER_MODULES ,', configvar='general/user_modules'
)
env_options.add_argument(
'--module-mappings', action='store', metavar='FILE',
dest='module_map_file',
help='Load module mappings from FILE',
envvar='RFM_MODULE_MAP_FILE', configvar='general/module_map_file'
)
env_options.add_argument(
'-u', '--unload-module', action='append', metavar='MOD',
dest='unload_modules', default=[],
help='Unload module MOD before running any regression check',
envvar='RFM_UNLOAD_MODULES ,', configvar='general/unload_modules'
)
env_options.add_argument(
'--purge-env', action='store_true', dest='purge_env', default=False,
help='Unload all modules before running any regression check',
envvar='RFM_PURGE_ENVIRONMENT', configvar='general/purge_environment'
)
env_options.add_argument(
'--non-default-craype', action='store_true',
help='Test a non-default Cray Programming Environment',
envvar='RFM_NON_DEFAULT_CRAYPE', configvar='general/non_default_craype'
)
# Miscellaneous options
misc_options.add_argument(
'-C', '--config-file', action='store',
dest='config_file', metavar='FILE',
help='Set configuration file',
envvar='RFM_CONFIG_FILE'
)
misc_options.add_argument(
'--nocolor', action='store_false', dest='colorize',
help='Disable coloring of output',
envvar='RFM_COLORIZE', configvar='general/colorize'
)
misc_options.add_argument(
'--failure-stats', action='store_true', help='Print failure statistics'
)
misc_options.add_argument(
'--performance-report', action='store_true',
help='Print a report for performance tests'
)
misc_options.add_argument(
'--show-config', action='store', nargs='?', const='all',
metavar='PARAM',
help='Print the value of configuration parameter PARAM and exit'
)
misc_options.add_argument(
'--system', action='store', help='Load configuration for SYSTEM',
envvar='RFM_SYSTEM'
)
misc_options.add_argument(
'--upgrade-config-file', action='store', metavar='OLD[:NEW]',
help='Upgrade ReFrame 2.x configuration file to ReFrame 3.x syntax'
)
misc_options.add_argument(
'-V', '--version', action='version', version=osext.reframe_version()
)
misc_options.add_argument(
'-v', '--verbose', action='count',
help='Increase verbosity level of output',
envvar='RFM_VERBOSE', configvar='general/verbose'
)
# Options not associated with command-line arguments
argparser.add_argument(
dest='graylog_server',
envvar='RFM_GRAYLOG_ADDRESS',
configvar='logging/handlers_perflog/graylog_address',
help='Graylog server address'
)
argparser.add_argument(
dest='syslog_address',
envvar='RFM_SYSLOG_ADDRESS',
configvar='logging/handlers_perflog/syslog_address',
help='Syslog server address'
)
argparser.add_argument(
dest='ignore_reqnodenotavail',
envvar='RFM_IGNORE_REQNODENOTAVAIL',
configvar='schedulers/ignore_reqnodenotavail',
action='store_true',
help='Graylog server address'
)
argparser.add_argument(
dest='use_login_shell',
envvar='RFM_USE_LOGIN_SHELL',
configvar='general/use_login_shell',
action='store_true',
help='Use a login shell for job scripts'
)
if len(sys.argv) == 1:
argparser.print_help()
sys.exit(1)
# Parse command line
options = argparser.parse_args()
# First configure logging with our generic configuration so as to be able
# to print pretty messages; logging will be reconfigured by user's
# configuration later
site_config = config.load_config(
os.path.join(reframe.INSTALL_PREFIX, 'reframe/core/settings.py')
)
site_config.select_subconfig('generic')
options.update_config(site_config)
logging.configure_logging(site_config)
logging.getlogger().colorize = site_config.get('general/0/colorize')
printer = PrettyPrinter()
printer.colorize = site_config.get('general/0/colorize')
printer.inc_verbosity(site_config.get('general/0/verbose'))
if os.getenv('RFM_GRAYLOG_SERVER'):
printer.warning(
'RFM_GRAYLOG_SERVER environment variable is deprecated; '
'please use RFM_GRAYLOG_ADDRESS instead'
)
os.environ['RFM_GRAYLOG_ADDRESS'] = os.getenv('RFM_GRAYLOG_SERVER')
if options.upgrade_config_file is not None:
old_config, *new_config = options.upgrade_config_file.split(
':', maxsplit=1)
new_config = new_config[0] if new_config else None
try:
new_config = config.convert_old_config(old_config, new_config)
except Exception as e:
printer.error(f'could not convert file: {e}')
sys.exit(1)
printer.info(
f'Conversion successful! '
f'The converted file can be found at {new_config!r}.'
)
sys.exit(0)
# Now configure ReFrame according to the user configuration file
try:
try:
site_config = config.load_config(options.config_file)
except warnings.ReframeDeprecationWarning as e:
printer.warning(e)
converted = config.convert_old_config(options.config_file)
printer.warning(
f"configuration file has been converted "
f"to the new syntax here: '{converted}'"
)
site_config = config.load_config(converted)
site_config.validate()
# We ignore errors about unresolved sections or configuration
# parameters here, because they might be defined at the individual
# partition level and will be caught when we will instantiating
# internally the system and partitions later on.
site_config.select_subconfig(options.system,
ignore_resolve_errors=True)
for err in options.update_config(site_config):
printer.warning(str(err))
# Update options from the selected execution mode
if options.mode:
mode_args = site_config.get(f'modes/@{options.mode}/options')
# Parse the mode's options and reparse the command-line
mode_args = list(itertools.chain(*[shlex.split(m)
for m in mode_args]))
options = argparser.parse_args(mode_args)
options = argparser.parse_args(namespace=options.cmd_options)
options.update_config(site_config)
logging.configure_logging(site_config)
except (OSError, errors.ConfigError) as e:
printer.error(f'failed to load configuration: {e}')
sys.exit(1)
logging.getlogger().colorize = site_config.get('general/0/colorize')
printer.colorize = site_config.get('general/0/colorize')
printer.inc_verbosity(site_config.get('general/0/verbose'))
try:
runtime.init_runtime(site_config)
except errors.ConfigError as e:
printer.error(f'failed to initialize runtime: {e}')
sys.exit(1)
rt = runtime.runtime()
try:
if site_config.get('general/0/module_map_file'):
rt.modules_system.load_mapping_from_file(
site_config.get('general/0/module_map_file')
)
if site_config.get('general/0/module_mappings'):
for m in site_config.get('general/0/module_mappings'):
rt.modules_system.load_mapping(m)
except (errors.ConfigError, OSError) as e:
printer.error('could not load module mappings: %s' % e)
sys.exit(1)
if (osext.samefile(rt.stage_prefix, rt.output_prefix) and
not site_config.get('general/0/keep_stage_files')):
printer.error("stage and output refer to the same directory; "
"if this is on purpose, please use the "
"'--keep-stage-files' option.")
sys.exit(1)
# Show configuration after everything is set up
if options.show_config:
config_param = options.show_config
if config_param == 'all':
printer.info(str(rt.site_config))
else:
value = rt.get_option(config_param)
if value is None:
printer.error(
f'no such configuration parameter found: {config_param}'
)
else:
printer.info(json.dumps(value, indent=2))
sys.exit(0)
printer.debug(format_env(options.env_vars))
# Setup the check loader
loader = RegressionCheckLoader(
load_path=site_config.get('general/0/check_search_path'),
recurse=site_config.get('general/0/check_search_recursive'),
ignore_conflicts=site_config.get('general/0/ignore_check_conflicts')
)
def print_infoline(param, value):
param = param + ':'
printer.info(f" {param.ljust(18)} {value}")
session_info = {
'cmdline': ' '.join(sys.argv),
'config_file': rt.site_config.filename,
'data_version': '1.0',
'hostname': socket.gethostname(),
'prefix_output': rt.output_prefix,
'prefix_stage': rt.stage_prefix,
'user': osext.osuser(),
'version': osext.reframe_version(),
'workdir': os.getcwd(),
}
# Print command line
printer.info(f"[ReFrame Setup]")
print_infoline('version', session_info['version'])
print_infoline('command', repr(session_info['cmdline']))
print_infoline(
f"launched by",
f"{session_info['user'] or '<unknown>'}@{session_info['hostname']}"
)
print_infoline('working directory', repr(session_info['workdir']))
print_infoline('settings file', f"{session_info['config_file']!r}")
print_infoline('check search path',
f"{'(R) ' if loader.recurse else ''}"
f"{':'.join(loader.load_path)!r}")
print_infoline('stage directory', repr(session_info['prefix_stage']))
print_infoline('output directory', repr(session_info['prefix_output']))
printer.info('')
try:
# Locate and load checks
try:
checks_found = loader.load_all()
except OSError as e:
raise errors.ReframeError from e
# Filter checks by name
checks_matched = checks_found
if options.exclude_names:
for name in options.exclude_names:
checks_matched = filter(filters.have_not_name(name),
checks_matched)
if options.names:
checks_matched = filter(filters.have_name('|'.join(options.names)),
checks_matched)
# Filter checks by tags
for tag in options.tags:
checks_matched = filter(filters.have_tag(tag), checks_matched)
# Filter checks by prgenv
if not options.skip_prgenv_check:
for prgenv in options.prgenv:
checks_matched = filter(filters.have_prgenv(prgenv),
checks_matched)
# Filter checks by system
if not options.skip_system_check:
checks_matched = filter(
filters.have_partition(rt.system.partitions), checks_matched)
# Filter checks further
if options.gpu_only and options.cpu_only:
printer.error("options `--gpu-only' and `--cpu-only' "
"are mutually exclusive")
sys.exit(1)
if options.gpu_only:
checks_matched = filter(filters.have_gpu_only(), checks_matched)
elif options.cpu_only:
checks_matched = filter(filters.have_cpu_only(), checks_matched)
# Determine the allowed programming environments
allowed_environs = {e.name
for env_patt in options.prgenv
for p in rt.system.partitions
for e in p.environs if re.match(env_patt, e.name)}
# Generate the test cases, validate dependencies and sort them
checks_matched = list(checks_matched)
# Disable hooks
for c in checks_matched:
for h in options.hooks:
type(c).disable_hook(h)
testcases = generate_testcases(checks_matched,
options.skip_system_check,
options.skip_prgenv_check,
allowed_environs)
testgraph = dependency.build_deps(testcases)
dependency.validate_deps(testgraph)
testcases = dependency.toposort(testgraph)
# Manipulate ReFrame's environment
if site_config.get('general/0/purge_environment'):
rt.modules_system.unload_all()
else:
for m in site_config.get('general/0/unload_modules'):
rt.modules_system.unload_module(m)
# Load the environment for the current system
try:
runtime.loadenv(rt.system.preload_environ)
except errors.EnvironError as e:
printer.error("failed to load current system's environment; "
"please check your configuration")
printer.debug(str(e))
raise
for m in site_config.get('general/0/user_modules'):
try:
rt.modules_system.load_module(m, force=True)
except errors.EnvironError as e:
printer.warning("could not load module '%s' correctly: "
"Skipping..." % m)
printer.debug(str(e))
options.flex_alloc_nodes = options.flex_alloc_nodes or 'idle'
# Act on checks
success = True
if options.list or options.list_detailed:
list_checks(list(checks_matched), printer, options.list_detailed)
elif options.run:
# Setup the execution policy
if options.exec_policy == 'serial':
exec_policy = SerialExecutionPolicy()
elif options.exec_policy == 'async':
exec_policy = AsynchronousExecutionPolicy()
else:
# This should not happen, since choices are handled by
# argparser
printer.error("unknown execution policy `%s': Exiting...")
sys.exit(1)
exec_policy.skip_system_check = options.skip_system_check
exec_policy.force_local = options.force_local
exec_policy.strict_check = options.strict
exec_policy.skip_sanity_check = options.skip_sanity_check
exec_policy.skip_performance_check = options.skip_performance_check
exec_policy.keep_stage_files = site_config.get(
'general/0/keep_stage_files'
)
try:
errmsg = "invalid option for --flex-alloc-nodes: '{0}'"
sched_flex_alloc_nodes = int(options.flex_alloc_nodes)
if sched_flex_alloc_nodes <= 0:
raise errors.ConfigError(
errmsg.format(options.flex_alloc_nodes)
)
except ValueError:
sched_flex_alloc_nodes = options.flex_alloc_nodes
exec_policy.sched_flex_alloc_nodes = sched_flex_alloc_nodes
parsed_job_options = []
for opt in options.job_options:
if opt.startswith('-') or opt.startswith('#'):
parsed_job_options.append(opt)
elif len(opt) == 1:
parsed_job_options.append(f'-{opt}')
else:
parsed_job_options.append(f'--{opt}')
exec_policy.sched_options = parsed_job_options
try:
max_retries = int(options.max_retries)
except ValueError:
raise errors.ConfigError(
f'--max-retries is not a valid integer: {max_retries}'
) from None
runner = Runner(exec_policy, printer, max_retries)
try:
time_start = time.time()
session_info['time_start'] = time.strftime(
'%FT%T%z', time.localtime(time_start),
)
runner.runall(testcases)
finally:
time_end = time.time()
session_info['time_end'] = time.strftime(
'%FT%T%z', time.localtime(time_end)
)
session_info['time_elapsed'] = time_end - time_start
# Print a retry report if we did any retries
if runner.stats.failures(run=0):
printer.info(runner.stats.retry_report())
# Print a failure report if we had failures in the last run
if runner.stats.failures():
runner.stats.print_failure_report(printer)
success = False
if options.failure_stats:
runner.stats.print_failure_stats(printer)
if options.performance_report:
printer.info(runner.stats.performance_report())
# Generate the report for this session
report_file = os.path.normpath(
osext.expandvars(rt.get_option('general/0/report_file'))
)
basedir = os.path.dirname(report_file)
if basedir:
os.makedirs(basedir, exist_ok=True)
# Build final JSON report
run_stats = runner.stats.json()
session_info.update({
'num_cases': run_stats[0]['num_cases'],
'num_failures': run_stats[-1]['num_failures']
})
json_report = {
'session_info': session_info,
'runs': run_stats
}
report_file = generate_report_filename(report_file)
try:
with open(report_file, 'w') as fp:
jsonext.dump(json_report, fp, indent=2)
except OSError as e:
printer.warning(
f'failed to generate report in {report_file!r}: {e}'
)
else:
printer.error("No action specified. Please specify `-l'/`-L' for "
"listing or `-r' for running. "
"Try `%s -h' for more options." %
argparser.prog)
sys.exit(1)
if not success:
sys.exit(1)
sys.exit(0)
except KeyboardInterrupt:
sys.exit(1)
except errors.ReframeError as e:
printer.error(str(e))
sys.exit(1)
except (Exception, errors.ReframeFatalError):
exc_info = sys.exc_info()
tb = ''.join(traceback.format_exception(*exc_info))
printer.error(errors.what(*exc_info))
if errors.is_severe(*exc_info):
printer.error(tb)
else:
printer.verbose(tb)
sys.exit(1)
finally:
try:
log_files = logging.log_files()
if site_config.get('general/0/save_log_files'):
log_files = logging.save_log_files(rt.output_prefix)
except OSError as e:
printer.error(f'could not save log file: {e}')
sys.exit(1)
finally:
if not log_files:
msg = '<no log file was generated>'
else:
msg = f'{", ".join(repr(f) for f in log_files)}'
printer.info(f'Log file(s) saved in: {msg}')
|
def main():
# Setup command line options
argparser = argparse.ArgumentParser()
output_options = argparser.add_argument_group(
'Options controlling ReFrame output'
)
locate_options = argparser.add_argument_group(
'Options for discovering checks'
)
select_options = argparser.add_argument_group(
'Options for selecting checks'
)
action_options = argparser.add_argument_group(
'Options controlling actions'
)
run_options = argparser.add_argument_group(
'Options controlling the execution of checks'
)
env_options = argparser.add_argument_group(
'Options controlling the ReFrame environment'
)
misc_options = argparser.add_argument_group('Miscellaneous options')
# Output directory options
output_options.add_argument(
'--prefix', action='store', metavar='DIR',
help='Set general directory prefix to DIR',
envvar='RFM_PREFIX', configvar='systems/prefix'
)
output_options.add_argument(
'-o', '--output', action='store', metavar='DIR',
help='Set output directory prefix to DIR',
envvar='RFM_OUTPUT_DIR', configvar='systems/outputdir'
)
output_options.add_argument(
'-s', '--stage', action='store', metavar='DIR',
help='Set stage directory prefix to DIR',
envvar='RFM_STAGE_DIR', configvar='systems/stagedir'
)
output_options.add_argument(
'--timestamp', action='store', nargs='?', const='', metavar='TIMEFMT',
help=('Append a timestamp to the output and stage directory prefixes '
'(default: "%%FT%%T")'),
envvar='RFM_TIMESTAMP_DIRS', configvar='general/timestamp_dirs'
)
output_options.add_argument(
'--perflogdir', action='store', metavar='DIR',
help=('Set performance log data directory prefix '
'(relevant only to the filelog log handler)'),
envvar='RFM_PERFLOG_DIR',
configvar='logging/handlers_perflog/filelog_basedir'
)
output_options.add_argument(
'--keep-stage-files', action='store_true',
help='Keep stage directories even for successful checks',
envvar='RFM_KEEP_STAGE_FILES', configvar='general/keep_stage_files'
)
output_options.add_argument(
'--dont-restage', action='store_false', dest='clean_stagedir',
help='Reuse the test stage directory',
envvar='RFM_CLEAN_STAGEDIR', configvar='general/clean_stagedir'
)
output_options.add_argument(
'--save-log-files', action='store_true', default=False,
help='Save ReFrame log files to the output directory',
envvar='RFM_SAVE_LOG_FILES', configvar='general/save_log_files'
)
output_options.add_argument(
'--report-file', action='store', metavar='FILE',
help="Store JSON run report in FILE",
envvar='RFM_REPORT_FILE',
configvar='general/report_file'
)
# Check discovery options
locate_options.add_argument(
'-c', '--checkpath', action='append', metavar='PATH',
help="Add PATH to the check search path list",
envvar='RFM_CHECK_SEARCH_PATH :', configvar='general/check_search_path'
)
locate_options.add_argument(
'-R', '--recursive', action='store_true',
help='Search for checks in the search path recursively',
envvar='RFM_CHECK_SEARCH_RECURSIVE',
configvar='general/check_search_recursive'
)
locate_options.add_argument(
'--ignore-check-conflicts', action='store_true',
help='Skip checks with conflicting names',
envvar='RFM_IGNORE_CHECK_CONFLICTS',
configvar='general/ignore_check_conflicts'
)
# Select options
select_options.add_argument(
'-t', '--tag', action='append', dest='tags', metavar='PATTERN',
default=[],
help='Select checks with at least one tag matching PATTERN'
)
select_options.add_argument(
'-n', '--name', action='append', dest='names', default=[],
metavar='PATTERN', help='Select checks whose name matches PATTERN'
)
select_options.add_argument(
'-x', '--exclude', action='append', dest='exclude_names',
metavar='PATTERN', default=[],
help='Exclude checks whose name matches PATTERN'
)
select_options.add_argument(
'-p', '--prgenv', action='append', default=[r'.*'], metavar='PATTERN',
help=('Select checks with at least one '
'programming environment matching PATTERN')
)
select_options.add_argument(
'--gpu-only', action='store_true',
help='Select only GPU checks'
)
select_options.add_argument(
'--cpu-only', action='store_true',
help='Select only CPU checks'
)
# Action options
action_options.add_argument(
'-l', '--list', action='store_true',
help='List the selected checks'
)
action_options.add_argument(
'-L', '--list-detailed', action='store_true',
help='List the selected checks providing details for each test'
)
action_options.add_argument(
'-r', '--run', action='store_true',
help='Run the selected checks'
)
# Run options
run_options.add_argument(
'-J', '--job-option', action='append', metavar='OPT',
dest='job_options', default=[],
help='Pass option OPT to job scheduler'
)
run_options.add_argument(
'--force-local', action='store_true',
help='Force local execution of checks'
)
run_options.add_argument(
'--skip-sanity-check', action='store_true',
help='Skip sanity checking'
)
run_options.add_argument(
'--skip-performance-check', action='store_true',
help='Skip performance checking'
)
run_options.add_argument(
'--strict', action='store_true',
help='Enforce strict performance checking'
)
run_options.add_argument(
'--skip-system-check', action='store_true',
help='Skip system check'
)
run_options.add_argument(
'--skip-prgenv-check', action='store_true',
help='Skip programming environment check'
)
run_options.add_argument(
'--exec-policy', metavar='POLICY', action='store',
choices=['async', 'serial'], default='async',
help='Set the execution policy of ReFrame (default: "async")'
)
run_options.add_argument(
'--mode', action='store', help='Execution mode to use'
)
run_options.add_argument(
'--max-retries', metavar='NUM', action='store', default=0,
help='Set the maximum number of times a failed regression test '
'may be retried (default: 0)'
)
run_options.add_argument(
'--flex-alloc-nodes', action='store',
dest='flex_alloc_nodes', metavar='{all|STATE|NUM}', default=None,
help='Set strategy for the flexible node allocation (default: "idle").'
)
run_options.add_argument(
'--disable-hook', action='append', metavar='NAME', dest='hooks',
default=[], help='Disable a pipeline hook for this run'
)
env_options.add_argument(
'-M', '--map-module', action='append', metavar='MAPPING',
dest='module_mappings', default=[],
help='Add a module mapping',
envvar='RFM_MODULE_MAPPINGS ,', configvar='general/module_mappings'
)
env_options.add_argument(
'-m', '--module', action='append', default=[],
metavar='MOD', dest='user_modules',
help='Load module MOD before running any regression check',
envvar='RFM_USER_MODULES ,', configvar='general/user_modules'
)
env_options.add_argument(
'--module-mappings', action='store', metavar='FILE',
dest='module_map_file',
help='Load module mappings from FILE',
envvar='RFM_MODULE_MAP_FILE', configvar='general/module_map_file'
)
env_options.add_argument(
'-u', '--unload-module', action='append', metavar='MOD',
dest='unload_modules', default=[],
help='Unload module MOD before running any regression check',
envvar='RFM_UNLOAD_MODULES ,', configvar='general/unload_modules'
)
env_options.add_argument(
'--purge-env', action='store_true', dest='purge_env', default=False,
help='Unload all modules before running any regression check',
envvar='RFM_PURGE_ENVIRONMENT', configvar='general/purge_environment'
)
env_options.add_argument(
'--non-default-craype', action='store_true',
help='Test a non-default Cray Programming Environment',
envvar='RFM_NON_DEFAULT_CRAYPE', configvar='general/non_default_craype'
)
# Miscellaneous options
misc_options.add_argument(
'-C', '--config-file', action='store',
dest='config_file', metavar='FILE',
help='Set configuration file',
envvar='RFM_CONFIG_FILE'
)
misc_options.add_argument(
'--nocolor', action='store_false', dest='colorize',
help='Disable coloring of output',
envvar='RFM_COLORIZE', configvar='general/colorize'
)
misc_options.add_argument(
'--failure-stats', action='store_true', help='Print failure statistics'
)
misc_options.add_argument(
'--performance-report', action='store_true',
help='Print a report for performance tests'
)
misc_options.add_argument(
'--show-config', action='store', nargs='?', const='all',
metavar='PARAM',
help='Print the value of configuration parameter PARAM and exit'
)
misc_options.add_argument(
'--system', action='store', help='Load configuration for SYSTEM',
envvar='RFM_SYSTEM'
)
misc_options.add_argument(
'--upgrade-config-file', action='store', metavar='OLD[:NEW]',
help='Upgrade ReFrame 2.x configuration file to ReFrame 3.x syntax'
)
misc_options.add_argument(
'-V', '--version', action='version', version=osext.reframe_version()
)
misc_options.add_argument(
'-v', '--verbose', action='count',
help='Increase verbosity level of output',
envvar='RFM_VERBOSE', configvar='general/verbose'
)
# Options not associated with command-line arguments
argparser.add_argument(
dest='graylog_server',
envvar='RFM_GRAYLOG_ADDRESS',
configvar='logging/handlers_perflog/graylog_address',
help='Graylog server address'
)
argparser.add_argument(
dest='syslog_address',
envvar='RFM_SYSLOG_ADDRESS',
configvar='logging/handlers_perflog/syslog_address',
help='Syslog server address'
)
argparser.add_argument(
dest='ignore_reqnodenotavail',
envvar='RFM_IGNORE_REQNODENOTAVAIL',
configvar='schedulers/ignore_reqnodenotavail',
action='store_true',
help='Graylog server address'
)
argparser.add_argument(
dest='use_login_shell',
envvar='RFM_USE_LOGIN_SHELL',
configvar='general/use_login_shell',
action='store_true',
help='Use a login shell for job scripts'
)
if len(sys.argv) == 1:
argparser.print_help()
sys.exit(1)
# Parse command line
options = argparser.parse_args()
# First configure logging with our generic configuration so as to be able
# to print pretty messages; logging will be reconfigured by user's
# configuration later
site_config = config.load_config(
os.path.join(reframe.INSTALL_PREFIX, 'reframe/core/settings.py')
)
site_config.select_subconfig('generic')
options.update_config(site_config)
logging.configure_logging(site_config)
logging.getlogger().colorize = site_config.get('general/0/colorize')
printer = PrettyPrinter()
printer.colorize = site_config.get('general/0/colorize')
printer.inc_verbosity(site_config.get('general/0/verbose'))
if os.getenv('RFM_GRAYLOG_SERVER'):
printer.warning(
'RFM_GRAYLOG_SERVER environment variable is deprecated; '
'please use RFM_GRAYLOG_ADDRESS instead'
)
os.environ['RFM_GRAYLOG_ADDRESS'] = os.getenv('RFM_GRAYLOG_SERVER')
if options.upgrade_config_file is not None:
old_config, *new_config = options.upgrade_config_file.split(
':', maxsplit=1)
new_config = new_config[0] if new_config else None
try:
new_config = config.convert_old_config(old_config, new_config)
except Exception as e:
printer.error(f'could not convert file: {e}')
sys.exit(1)
printer.info(
f'Conversion successful! '
f'The converted file can be found at {new_config!r}.'
)
sys.exit(0)
# Now configure ReFrame according to the user configuration file
try:
try:
site_config = config.load_config(options.config_file)
except warnings.ReframeDeprecationWarning as e:
printer.warning(e)
converted = config.convert_old_config(options.config_file)
printer.warning(
f"configuration file has been converted "
f"to the new syntax here: '{converted}'"
)
site_config = config.load_config(converted)
site_config.validate()
# We ignore errors about unresolved sections or configuration
# parameters here, because they might be defined at the individual
# partition level and will be caught when we will instantiating
# internally the system and partitions later on.
site_config.select_subconfig(options.system,
ignore_resolve_errors=True)
for err in options.update_config(site_config):
printer.warning(str(err))
# Update options from the selected execution mode
if options.mode:
mode_args = site_config.get(f'modes/@{options.mode}/options')
# Parse the mode's options and reparse the command-line
mode_args = list(itertools.chain(*(shlex.split(m)
for m in mode_args)))
options = argparser.parse_args(mode_args)
options = argparser.parse_args(namespace=options.cmd_options)
options.update_config(site_config)
logging.configure_logging(site_config)
except (OSError, errors.ConfigError) as e:
printer.error(f'failed to load configuration: {e}')
sys.exit(1)
logging.getlogger().colorize = site_config.get('general/0/colorize')
printer.colorize = site_config.get('general/0/colorize')
printer.inc_verbosity(site_config.get('general/0/verbose'))
try:
runtime.init_runtime(site_config)
except errors.ConfigError as e:
printer.error(f'failed to initialize runtime: {e}')
sys.exit(1)
rt = runtime.runtime()
try:
if site_config.get('general/0/module_map_file'):
rt.modules_system.load_mapping_from_file(
site_config.get('general/0/module_map_file')
)
if site_config.get('general/0/module_mappings'):
for m in site_config.get('general/0/module_mappings'):
rt.modules_system.load_mapping(m)
except (errors.ConfigError, OSError) as e:
printer.error('could not load module mappings: %s' % e)
sys.exit(1)
if (osext.samefile(rt.stage_prefix, rt.output_prefix) and
not site_config.get('general/0/keep_stage_files')):
printer.error("stage and output refer to the same directory; "
"if this is on purpose, please use the "
"'--keep-stage-files' option.")
sys.exit(1)
# Show configuration after everything is set up
if options.show_config:
config_param = options.show_config
if config_param == 'all':
printer.info(str(rt.site_config))
else:
value = rt.get_option(config_param)
if value is None:
printer.error(
f'no such configuration parameter found: {config_param}'
)
else:
printer.info(json.dumps(value, indent=2))
sys.exit(0)
printer.debug(format_env(options.env_vars))
# Setup the check loader
loader = RegressionCheckLoader(
load_path=site_config.get('general/0/check_search_path'),
recurse=site_config.get('general/0/check_search_recursive'),
ignore_conflicts=site_config.get('general/0/ignore_check_conflicts')
)
def print_infoline(param, value):
param = param + ':'
printer.info(f" {param.ljust(18)} {value}")
session_info = {
'cmdline': ' '.join(sys.argv),
'config_file': rt.site_config.filename,
'data_version': '1.0',
'hostname': socket.gethostname(),
'prefix_output': rt.output_prefix,
'prefix_stage': rt.stage_prefix,
'user': osext.osuser(),
'version': osext.reframe_version(),
'workdir': os.getcwd(),
}
# Print command line
printer.info(f"[ReFrame Setup]")
print_infoline('version', session_info['version'])
print_infoline('command', repr(session_info['cmdline']))
print_infoline(
f"launched by",
f"{session_info['user'] or '<unknown>'}@{session_info['hostname']}"
)
print_infoline('working directory', repr(session_info['workdir']))
print_infoline('settings file', f"{session_info['config_file']!r}")
print_infoline('check search path',
f"{'(R) ' if loader.recurse else ''}"
f"{':'.join(loader.load_path)!r}")
print_infoline('stage directory', repr(session_info['prefix_stage']))
print_infoline('output directory', repr(session_info['prefix_output']))
printer.info('')
try:
# Locate and load checks
try:
checks_found = loader.load_all()
except OSError as e:
raise errors.ReframeError from e
# Filter checks by name
checks_matched = checks_found
if options.exclude_names:
for name in options.exclude_names:
checks_matched = filter(filters.have_not_name(name),
checks_matched)
if options.names:
checks_matched = filter(filters.have_name('|'.join(options.names)),
checks_matched)
# Filter checks by tags
for tag in options.tags:
checks_matched = filter(filters.have_tag(tag), checks_matched)
# Filter checks by prgenv
if not options.skip_prgenv_check:
for prgenv in options.prgenv:
checks_matched = filter(filters.have_prgenv(prgenv),
checks_matched)
# Filter checks by system
if not options.skip_system_check:
checks_matched = filter(
filters.have_partition(rt.system.partitions), checks_matched)
# Filter checks further
if options.gpu_only and options.cpu_only:
printer.error("options `--gpu-only' and `--cpu-only' "
"are mutually exclusive")
sys.exit(1)
if options.gpu_only:
checks_matched = filter(filters.have_gpu_only(), checks_matched)
elif options.cpu_only:
checks_matched = filter(filters.have_cpu_only(), checks_matched)
# Determine the allowed programming environments
allowed_environs = {e.name
for env_patt in options.prgenv
for p in rt.system.partitions
for e in p.environs if re.match(env_patt, e.name)}
# Generate the test cases, validate dependencies and sort them
checks_matched = list(checks_matched)
# Disable hooks
for c in checks_matched:
for h in options.hooks:
type(c).disable_hook(h)
testcases = generate_testcases(checks_matched,
options.skip_system_check,
options.skip_prgenv_check,
allowed_environs)
testgraph = dependency.build_deps(testcases)
dependency.validate_deps(testgraph)
testcases = dependency.toposort(testgraph)
# Manipulate ReFrame's environment
if site_config.get('general/0/purge_environment'):
rt.modules_system.unload_all()
else:
for m in site_config.get('general/0/unload_modules'):
rt.modules_system.unload_module(m)
# Load the environment for the current system
try:
runtime.loadenv(rt.system.preload_environ)
except errors.EnvironError as e:
printer.error("failed to load current system's environment; "
"please check your configuration")
printer.debug(str(e))
raise
for m in site_config.get('general/0/user_modules'):
try:
rt.modules_system.load_module(m, force=True)
except errors.EnvironError as e:
printer.warning("could not load module '%s' correctly: "
"Skipping..." % m)
printer.debug(str(e))
options.flex_alloc_nodes = options.flex_alloc_nodes or 'idle'
# Act on checks
success = True
if options.list or options.list_detailed:
list_checks(list(checks_matched), printer, options.list_detailed)
elif options.run:
# Setup the execution policy
if options.exec_policy == 'serial':
exec_policy = SerialExecutionPolicy()
elif options.exec_policy == 'async':
exec_policy = AsynchronousExecutionPolicy()
else:
# This should not happen, since choices are handled by
# argparser
printer.error("unknown execution policy `%s': Exiting...")
sys.exit(1)
exec_policy.skip_system_check = options.skip_system_check
exec_policy.force_local = options.force_local
exec_policy.strict_check = options.strict
exec_policy.skip_sanity_check = options.skip_sanity_check
exec_policy.skip_performance_check = options.skip_performance_check
exec_policy.keep_stage_files = site_config.get(
'general/0/keep_stage_files'
)
try:
errmsg = "invalid option for --flex-alloc-nodes: '{0}'"
sched_flex_alloc_nodes = int(options.flex_alloc_nodes)
if sched_flex_alloc_nodes <= 0:
raise errors.ConfigError(
errmsg.format(options.flex_alloc_nodes)
)
except ValueError:
sched_flex_alloc_nodes = options.flex_alloc_nodes
exec_policy.sched_flex_alloc_nodes = sched_flex_alloc_nodes
parsed_job_options = []
for opt in options.job_options:
if opt.startswith('-') or opt.startswith('#'):
parsed_job_options.append(opt)
elif len(opt) == 1:
parsed_job_options.append(f'-{opt}')
else:
parsed_job_options.append(f'--{opt}')
exec_policy.sched_options = parsed_job_options
try:
max_retries = int(options.max_retries)
except ValueError:
raise errors.ConfigError(
f'--max-retries is not a valid integer: {max_retries}'
) from None
runner = Runner(exec_policy, printer, max_retries)
try:
time_start = time.time()
session_info['time_start'] = time.strftime(
'%FT%T%z', time.localtime(time_start),
)
runner.runall(testcases)
finally:
time_end = time.time()
session_info['time_end'] = time.strftime(
'%FT%T%z', time.localtime(time_end)
)
session_info['time_elapsed'] = time_end - time_start
# Print a retry report if we did any retries
if runner.stats.failures(run=0):
printer.info(runner.stats.retry_report())
# Print a failure report if we had failures in the last run
if runner.stats.failures():
runner.stats.print_failure_report(printer)
success = False
if options.failure_stats:
runner.stats.print_failure_stats(printer)
if options.performance_report:
printer.info(runner.stats.performance_report())
# Generate the report for this session
report_file = os.path.normpath(
osext.expandvars(rt.get_option('general/0/report_file'))
)
basedir = os.path.dirname(report_file)
if basedir:
os.makedirs(basedir, exist_ok=True)
# Build final JSON report
run_stats = runner.stats.json()
session_info.update({
'num_cases': run_stats[0]['num_cases'],
'num_failures': run_stats[-1]['num_failures']
})
json_report = {
'session_info': session_info,
'runs': run_stats
}
report_file = generate_report_filename(report_file)
try:
with open(report_file, 'w') as fp:
jsonext.dump(json_report, fp, indent=2)
except OSError as e:
printer.warning(
f'failed to generate report in {report_file!r}: {e}'
)
else:
printer.error("No action specified. Please specify `-l'/`-L' for "
"listing or `-r' for running. "
"Try `%s -h' for more options." %
argparser.prog)
sys.exit(1)
if not success:
sys.exit(1)
sys.exit(0)
except KeyboardInterrupt:
sys.exit(1)
except errors.ReframeError as e:
printer.error(str(e))
sys.exit(1)
except (Exception, errors.ReframeFatalError):
exc_info = sys.exc_info()
tb = ''.join(traceback.format_exception(*exc_info))
printer.error(errors.what(*exc_info))
if errors.is_severe(*exc_info):
printer.error(tb)
else:
printer.verbose(tb)
sys.exit(1)
finally:
try:
log_files = logging.log_files()
if site_config.get('general/0/save_log_files'):
log_files = logging.save_log_files(rt.output_prefix)
except OSError as e:
printer.error(f'could not save log file: {e}')
sys.exit(1)
finally:
if not log_files:
msg = '<no log file was generated>'
else:
msg = f'{", ".join(repr(f) for f in log_files)}'
printer.info(f'Log file(s) saved in: {msg}')
|
50,000 |
def metadata_filter(
instances,
item=None,
standard_name=None,
long_name=None,
var_name=None,
attributes=None,
axis=None,
):
"""
Filter a collection of objects by their metadata to fit the given metadata
criteria.
Criteria can be either specific properties or other objects with metadata
to be matched.
Args:
* instances:
One or more objects to be filtered.
Kwargs:
* item:
Either,
* a :attr:`~iris.common.mixin.CFVariableMixin.standard_name`,
:attr:`~iris.common.mixin.CFVariableMixin.long_name`, or
:attr:`~iris.common.mixin.CFVariableMixin.var_name` which is compared
against the :meth:`~iris.common.mixin.CFVariableMixin.name`.
* a coordinate or metadata instance equal to that of
the desired coordinates e.g., :class:`~iris.coords.DimCoord`
or :class:`CoordMetadata`.
* standard_name:
The CF standard name of the desired coordinate. If ``None``, does not
check for ``standard_name``.
* long_name:
An unconstrained description of the coordinate. If ``None``, does not
check for ``long_name``.
* var_name:
The netCDF variable name of the desired coordinate. If ``None``, does
not check for ``var_name``.
* attributes:
A dictionary of attributes desired on the coordinates. If ``None``,
does not check for ``attributes``.
* axis:
The desired coordinate axis, see :func:`~iris.util.guess_coord_axis`.
If ``None``, does not check for ``axis``. Accepts the values ``X``,
``Y``, ``Z`` and ``T`` (case-insensitive).
Returns:
A list of the objects supplied in the ``instances`` argument, limited
to only those that matched the given criteria.
"""
name = None
obj = None
if isinstance(item, str):
name = item
else:
obj = item
# apply de morgan's law for one less logical operation
if not (isinstance(instances, str) or isinstance(instances, Iterable)):
instances = [instances]
result = instances
if name is not None:
result = [instance for instance in result if instance.name() == name]
if standard_name is not None:
result = [
instance
for instance in result
if instance.standard_name == standard_name
]
if long_name is not None:
result = [
instance for instance in result if instance.long_name == long_name
]
if var_name is not None:
result = [
instance for instance in result if instance.var_name == var_name
]
if attributes is not None:
if not isinstance(attributes, Mapping):
msg = (
"The attributes keyword was expecting a dictionary "
"type, but got a %s instead." % type(attributes)
)
raise ValueError(msg)
def attr_filter(instance):
return all(
k in instance.attributes
and hexdigest(instance.attributes[k]) == hexdigest(v)
for k, v in attributes.items()
)
result = [instance for instance in result if attr_filter(instance)]
if axis is not None:
axis = axis.upper()
def get_axis(instance):
if hasattr(instance, "axis"):
axis = instance.axis.upper()
else:
axis = guess_coord_axis(instance)
return axis
result = [
instance for instance in result if get_axis(instance) == axis
]
if obj is not None:
if hasattr(obj, "__class__") and issubclass(
obj.__class__, BaseMetadata
):
target_metadata = obj
else:
target_metadata = obj.metadata
result = [
instance
for instance in result
if instance.metadata == target_metadata
]
return result
|
def metadata_filter(
instances,
item=None,
standard_name=None,
long_name=None,
var_name=None,
attributes=None,
axis=None,
):
"""
Filter a collection of objects by their metadata to fit the given metadata
criteria.
Criteria can be either specific properties or other objects with metadata
to be matched.
Args:
* instances:
One or more objects to be filtered.
Kwargs:
* item:
Either,
* a :attr:`~iris.common.mixin.CFVariableMixin.standard_name`,
:attr:`~iris.common.mixin.CFVariableMixin.long_name`, or
:attr:`~iris.common.mixin.CFVariableMixin.var_name` which is compared
against the :meth:`~iris.common.mixin.CFVariableMixin.name`.
* an object or metadata instance equal to that of
the desired objects e.g., :class:`~iris.coords.DimCoord`
or :class:`CoordMetadata`.
* standard_name:
The CF standard name of the desired object. If ``None``, does not
check for ``standard_name``.
* long_name:
An unconstrained description of the object. If ``None``, does not
check for ``long_name``.
* var_name:
The netCDF variable name of the desired object. If ``None``, does
not check for ``var_name``.
* attributes:
A dictionary of attributes desired on the object. If ``None``,
does not check for ``attributes``.
* axis:
The desired object's axis (if applicable), see
:func:`~iris.util.guess_coord_axis`. If ``None``, does not
check for ``axis``. Accepts the values ``X``, ``Y``, ``Z`` and
``T`` (case-insensitive).
Returns:
A list of the objects supplied in the ``instances`` argument, limited
to only those that matched the given criteria.
"""
name = None
obj = None
if isinstance(item, str):
name = item
else:
obj = item
# apply de morgan's law for one less logical operation
if not (isinstance(instances, str) or isinstance(instances, Iterable)):
instances = [instances]
result = instances
if name is not None:
result = [instance for instance in result if instance.name() == name]
if standard_name is not None:
result = [
instance
for instance in result
if instance.standard_name == standard_name
]
if long_name is not None:
result = [
instance for instance in result if instance.long_name == long_name
]
if var_name is not None:
result = [
instance for instance in result if instance.var_name == var_name
]
if attributes is not None:
if not isinstance(attributes, Mapping):
msg = (
"The attributes keyword was expecting a dictionary "
"type, but got a %s instead." % type(attributes)
)
raise ValueError(msg)
def attr_filter(instance):
return all(
k in instance.attributes
and hexdigest(instance.attributes[k]) == hexdigest(v)
for k, v in attributes.items()
)
result = [instance for instance in result if attr_filter(instance)]
if axis is not None:
axis = axis.upper()
def get_axis(instance):
if hasattr(instance, "axis"):
axis = instance.axis.upper()
else:
axis = guess_coord_axis(instance)
return axis
result = [
instance for instance in result if get_axis(instance) == axis
]
if obj is not None:
if hasattr(obj, "__class__") and issubclass(
obj.__class__, BaseMetadata
):
target_metadata = obj
else:
target_metadata = obj.metadata
result = [
instance
for instance in result
if instance.metadata == target_metadata
]
return result
|
45,873 |
def undistort_points(points: torch.Tensor, K: torch.Tensor, dist: torch.Tensor) -> torch.Tensor:
r"""Compensate for lens distortion a set of 2D image points.
Radial :math:`(k_1, k_2, k_3, k_4, k_4, k_6)`,
tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)` distortion models are considered in this function.
Args:
points: Input image points with shape :math:`(*, N, 2)`.
K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`.
dist: Distortion coefficients
:math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is
a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`
Returns:
Undistorted 2D points with shape :math:`(*, N, 2)`.
"""
assert points.dim() >= 2 and points.shape[-1] == 2
assert K.shape[-2:] == (3, 3)
assert dist.shape[-1] in [4, 5, 8, 12, 14]
if dist.shape[-1] < 14:
dist = torch.nn.functional.pad(dist, [0, 14 - dist.shape[-1]])
# Convert 2D points from pixels to normalized camera coordinates
cx: torch.Tensor = K[..., 0:1, 2] # princial point in x (Bx1)
cy: torch.Tensor = K[..., 1:2, 2] # princial point in y (Bx1)
fx: torch.Tensor = K[..., 0:1, 0] # focal in x (Bx1)
fy: torch.Tensor = K[..., 1:2, 1] # focal in y (Bx1)
# This is equivalent to K^-1 [u,v,1]^T
x: torch.Tensor = (points[..., 0] - cx) / fx # (BxN - Bx1)/Bx1 -> BxN
y: torch.Tensor = (points[..., 1] - cy) / fy # (BxN - Bx1)/Bx1 -> BxN
# Compensate for tilt distortion
if torch.any(dist[..., 12] != 0) or torch.any(dist[..., 13] != 0):
invTilt = tiltProjection(dist[..., 12], dist[..., 13], True)
# Transposed untilt points (instead of [x,y,1]^T, we obtain [x,y,1])
pointsUntilt = torch.stack([x, y, torch.ones(x.shape, device=x.device, dtype=x.dtype)], -1) @ invTilt.transpose(-2, -1)
x = pointsUntilt[..., 0] / pointsUntilt[..., 2]
y = pointsUntilt[..., 1] / pointsUntilt[..., 2]
# Iteratively undistort points
x0, y0 = x, y
for _ in range(5):
r2 = x * x + y * y
inv_rad_poly = (1 + dist[..., 5:6] * r2 + dist[..., 6:7] * r2 * r2 + dist[..., 7:8] * r2 ** 3) / (
1 + dist[..., 0:1] * r2 + dist[..., 1:2] * r2 * r2 + dist[..., 4:5] * r2 ** 3
)
deltaX = (
2 * dist[..., 2:3] * x * y
+ dist[..., 3:4] * (r2 + 2 * x * x)
+ dist[..., 8:9] * r2
+ dist[..., 9:10] * r2 * r2
)
deltaY = (
dist[..., 2:3] * (r2 + 2 * y * y)
+ 2 * dist[..., 3:4] * x * y
+ dist[..., 10:11] * r2
+ dist[..., 11:12] * r2 * r2
)
x = (x0 - deltaX) * inv_rad_poly
y = (y0 - deltaY) * inv_rad_poly
# Convert points from normalized camera coordinates to pixel coordinates
x = fx * x + cx
y = fy * y + cy
return torch.stack([x, y], -1)
|
def undistort_points(points: torch.Tensor, K: torch.Tensor, dist: torch.Tensor) -> torch.Tensor:
r"""Compensate for lens distortion a set of 2D image points.
Radial :math:`(k_1, k_2, k_3, k_4, k_4, k_6)`,
tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)` distortion models are considered in this function.
Args:
points: Input image points with shape :math:`(*, N, 2)`.
K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`.
dist: Distortion coefficients
:math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is
a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`
Returns:
Undistorted 2D points with shape :math:`(*, N, 2)`.
"""
assert points.dim() >= 2 and points.shape[-1] == 2
assert K.shape[-2:] == (3, 3)
assert dist.shape[-1] in [4, 5, 8, 12, 14]
if dist.shape[-1] < 14:
dist = torch.nn.functional.pad(dist, [0, 14 - dist.shape[-1]])
# Convert 2D points from pixels to normalized camera coordinates
cx: torch.Tensor = K[..., 0:1, 2] # princial point in x (Bx1)
cy: torch.Tensor = K[..., 1:2, 2] # princial point in y (Bx1)
fx: torch.Tensor = K[..., 0:1, 0] # focal in x (Bx1)
fy: torch.Tensor = K[..., 1:2, 1] # focal in y (Bx1)
# This is equivalent to K^-1 [u,v,1]^T
x: torch.Tensor = (points[..., 0] - cx) / fx # (BxN - Bx1)/Bx1 -> BxN
y: torch.Tensor = (points[..., 1] - cy) / fy # (BxN - Bx1)/Bx1 -> BxN
# Compensate for tilt distortion
if torch.any(dist[..., 12] != 0) or torch.any(dist[..., 13] != 0):
invTilt = tiltProjection(dist[..., 12], dist[..., 13], True)
# Transposed untilt points (instead of [x,y,1]^T, we obtain [x,y,1])
points_untilt = torch.stack([x, y, torch.ones_like(x)], -1) @ invTilt.transpose(-2, -1)
x = pointsUntilt[..., 0] / pointsUntilt[..., 2]
y = pointsUntilt[..., 1] / pointsUntilt[..., 2]
# Iteratively undistort points
x0, y0 = x, y
for _ in range(5):
r2 = x * x + y * y
inv_rad_poly = (1 + dist[..., 5:6] * r2 + dist[..., 6:7] * r2 * r2 + dist[..., 7:8] * r2 ** 3) / (
1 + dist[..., 0:1] * r2 + dist[..., 1:2] * r2 * r2 + dist[..., 4:5] * r2 ** 3
)
deltaX = (
2 * dist[..., 2:3] * x * y
+ dist[..., 3:4] * (r2 + 2 * x * x)
+ dist[..., 8:9] * r2
+ dist[..., 9:10] * r2 * r2
)
deltaY = (
dist[..., 2:3] * (r2 + 2 * y * y)
+ 2 * dist[..., 3:4] * x * y
+ dist[..., 10:11] * r2
+ dist[..., 11:12] * r2 * r2
)
x = (x0 - deltaX) * inv_rad_poly
y = (y0 - deltaY) * inv_rad_poly
# Convert points from normalized camera coordinates to pixel coordinates
x = fx * x + cx
y = fy * y + cy
return torch.stack([x, y], -1)
|
32,520 |
def test_module(client: Client, params: Dict[str, Any], first_fetch_time: int) -> str:
"""
Tests API connectivity and authentication'
When 'ok' is returned it indicates the integration works like it is supposed to and connection to the service is
successful.
Raises exceptions if something goes wrong.
Args:
client (Client): HelloWorld client to use.
params (Dict): Integration parameters.
first_fetch_time (int): The first fetch time as configured in the integration params.
Returns:
str: 'ok' if test passed, anything else will raise an exception and will fail the test.
"""
# INTEGRATION DEVELOPER TIP
# Client class should raise the exceptions, but if the test fails
# the exception text is printed to the Cortex XSOAR UI.
# If you have some specific errors you want to capture (i.e. auth failure)
# you should catch the exception here and return a string with a more
# readable output (for example return 'Authentication Error, API Key
# invalid').
# Cortex XSOAR will print everything you return different than 'ok' as
# an error
try:
client.search_alerts(max_results=1, start_time=first_fetch_time, alert_status=None, alert_type=None,
severity=None)
except DemistoException as e:
if 'Forbidden' in str(e):
return 'Authorization Error: make sure API Key is correctly set'
else:
raise e
# Tests fetch incident:
if params.get('isFetch'):
alert_status = params.get('alert_status', None)
alert_type = params.get('alert_type', None)
min_severity = params.get('min_severity', None)
fetch_incidents(
client=client,
max_results=1,
last_run=demisto.getLastRun(),
first_fetch_time=first_fetch_time,
alert_status=alert_status,
min_severity=min_severity,
alert_type=alert_type
)
return 'ok'
|
def test_module(client: Client, params: Dict[str, Any], first_fetch_time: int) -> str:
"""
Tests API connectivity and authentication'
When 'ok' is returned it indicates the integration works like it is supposed to and connection to the service is
successful.
Raises exceptions if something goes wrong.
Args:
client (Client): HelloWorld client to use.
params (Dict): Integration parameters.
first_fetch_time (int): The first fetch time as configured in the integration params.
Returns:
str: 'ok' if test passed, anything else will raise an exception and will fail the test.
"""
# INTEGRATION DEVELOPER TIP
# Client class should raise the exceptions, but if the test fails
# the exception text is printed to the Cortex XSOAR UI.
# If you have some specific errors you want to capture (i.e. auth failure)
# you should catch the exception here and return a string with a more
# readable output (for example return 'Authentication Error, API Key
# invalid').
# Cortex XSOAR will print everything you return different than 'ok' as
# an error
try:
client.search_alerts(max_results=1, start_time=first_fetch_time, alert_status=None, alert_type=None,
severity=None)
except DemistoException as e:
if 'Forbidden' in str(e):
return 'Authorization Error: make sure API Key is correctly set'
else:
raise e
# Tests fetch incident:
if params.get('isFetch'):
alert_status = params.get('alert_status', None)
alert_type = params.get('alert_type', None)
min_severity = params.get('min_severity', None)
fetch_incidents(
client=client,
max_results=1,
last_run={},
first_fetch_time=first_fetch_time,
alert_status=alert_status,
min_severity=min_severity,
alert_type=alert_type
)
return 'ok'
|
58,144 |
def add_custom_malware_feeds(client: PrismaCloudComputeClient, args: dict) -> CommandResults:
"""
Add custom md5 hashes of malware to the prisma cloud compute.
Implement the command 'prisma-cloud-compute-custom-feeds-malware-add'
Args:
client (PrismaCloudComputeClient): prisma-cloud-compute client.
args (dict): prisma-cloud-compute-custom-feeds-malware-add command arguments.
Returns:
CommandResults: command-results object.
"""
# the api overrides the md5 malware hashes, therefore it is necessary to add those who exist to the 'PUT' request.
feeds = client.get_custom_md5_malware()
if feeds:
feeds = feeds.get("feed", [])
else:
feeds = []
name = args.get("name")
md5s = argToList(arg=args.get("md5", []))
existing_md5s = set([feed.get("md5") for feed in feeds])
for md5 in md5s:
if md5 not in existing_md5s: # verify that there are no duplicates because the api doesn't handle it
feeds.append({"name": name, "md5": md5})
client.add_custom_md5_malware(feeds=feeds)
return CommandResults(readable_output="Successfully updated the custom md5 malware feeds")
|
def add_custom_malware_feeds(client: PrismaCloudComputeClient, args: dict) -> CommandResults:
"""
Add custom md5 hashes of malware to the prisma cloud compute.
Implement the command 'prisma-cloud-compute-custom-feeds-malware-add'
Args:
client (PrismaCloudComputeClient): prisma-cloud-compute client.
args (dict): prisma-cloud-compute-custom-feeds-malware-add command arguments.
Returns:
CommandResults: command-results object.
"""
# the api overrides the md5 malware hashes, therefore it is necessary to add those who exist to the 'PUT' request.
# if there aren't any md5 feeds in the environment, the api returns None
feeds = (client.get_custom_md5_malware() or {}).get('feed') or []
name = args.get("name")
md5s = argToList(arg=args.get("md5", []))
existing_md5s = set([feed.get("md5") for feed in feeds])
for md5 in md5s:
if md5 not in existing_md5s: # verify that there are no duplicates because the api doesn't handle it
feeds.append({"name": name, "md5": md5})
client.add_custom_md5_malware(feeds=feeds)
return CommandResults(readable_output="Successfully updated the custom md5 malware feeds")
|
11,626 |
def check_zone_overlap(zone, raise_on_error=True):
logger.info("Checking DNS domain %s, please wait ...", zone)
if not isinstance(zone, DNSName):
zone = DNSName(zone).make_absolute()
# automatic empty zones always exist so checking them is pointless,
# do not report them to avoid meaningless error messages
if is_auto_empty_zone(zone):
return
try:
containing_zone = zone_for_name(zone)
except dns.exception.DNSException as e:
msg = ("DNS check for domain %s failed: %s." % (zone, e))
if raise_on_error:
if isinstance(e, dns.resolver.NoNameservers):
raise DNSNoNameservers(**e.args, **e.kwargs)
raise ValueError(msg)
else:
logger.warning('%s', msg)
return
if containing_zone == zone:
try:
ns = [ans.to_text() for ans in resolve(zone, 'NS')]
except dns.exception.DNSException as e:
logger.debug("Failed to resolve nameserver(s) for domain %s: %s",
zone, e)
ns = []
raise DNSZoneAlreadyExists(zone=zone.to_text(), ns=ns)
|
def check_zone_overlap(zone, raise_on_error=True):
logger.info("Checking DNS domain %s, please wait ...", zone)
if not isinstance(zone, DNSName):
zone = DNSName(zone).make_absolute()
# automatic empty zones always exist so checking them is pointless,
# do not report them to avoid meaningless error messages
if is_auto_empty_zone(zone):
return
try:
containing_zone = zone_for_name(zone)
except dns.exception.DNSException as e:
msg = ("DNS check for domain %s failed: %s." % (zone, e))
if raise_on_error:
if isinstance(e, dns.resolver.NoNameservers):
raise DNSNoNameservers(*e.args, **e.kwargs) from None
raise ValueError(msg)
else:
logger.warning('%s', msg)
return
if containing_zone == zone:
try:
ns = [ans.to_text() for ans in resolve(zone, 'NS')]
except dns.exception.DNSException as e:
logger.debug("Failed to resolve nameserver(s) for domain %s: %s",
zone, e)
ns = []
raise DNSZoneAlreadyExists(zone=zone.to_text(), ns=ns)
|
3,043 |
def test_no_idea_how_to_name_this_or_where_to_place_this():
# https://github.com/pandas-dev/pandas/issues/10329
df = pd.DataFrame(
{
"date": pd.date_range("2012-01-01", periods=3),
"time": [time(i, i, i) for i in range(3)],
}
)
msg = r"unsupported operand type\(s\) for -: 'Timestamp' and 'datetime.time'"
with pytest.raises(TypeError, match=msg):
df["date"] - df["time"]
|
def test_timestamp_and_time_dtype_raises():
# https://github.com/pandas-dev/pandas/issues/10329
df = pd.DataFrame(
{
"date": pd.date_range("2012-01-01", periods=3),
"time": [time(i, i, i) for i in range(3)],
}
)
msg = r"unsupported operand type\(s\) for -: 'Timestamp' and 'datetime.time'"
with pytest.raises(TypeError, match=msg):
df["date"] - df["time"]
|
22,003 |
def update_local_associations(
sydent: Sydent,
db: sqlite3.Connection,
send_email: bool,
dry_run: bool,
test: bool = False,
) -> None:
"""Update the DB table local_threepid_associations so that all stored
emails are casefolded, and any duplicate mxid's associated with the
given email are deleted.
Setting dry_run to True means that the script is being run in dry-run mode
by the user, i.e. it will run but will not send any email nor update the database.
Setting test to True means that the function is being called as part of an automated
test, and therefore we shouldn't backoff when sending emails.
:return: None
"""
logger.info("Processing rows in local_threepid_associations")
res = db.execute(
"SELECT address, mxid FROM local_threepid_associations WHERE medium = 'email'"
"ORDER BY ts DESC"
)
# a dict that associates an email address with correspoinding mxids and lookup hashes
associations: Dict[str, List[Tuple[str, str, str]]] = {}
logger.info("Computing new hashes and signatures for local_threepid_associations")
# iterate through selected associations, casefold email, rehash it, and add to
# associations dict
for address, mxid in res.fetchall():
casefold_address = address.casefold()
# rehash email since hashes are case-sensitive
lookup_hash = calculate_lookup_hash(sydent, casefold_address)
if casefold_address in associations:
associations[casefold_address].append((address, mxid, lookup_hash))
else:
associations[casefold_address] = [(address, mxid, lookup_hash)]
# Deltas to apply to the database, associated with the casefolded address they're for.
deltas: Dict[str, Delta] = {}
# Iterate through the results, to build the deltas.
for casefold_address, assoc_tuples in associations.items():
# If the row is already in the right state and there's no duplicate, don't compute
# a delta for it.
if len(assoc_tuples) == 1 and assoc_tuples[0][0] == casefold_address:
continue
deltas[casefold_address] = Delta(
to_update=UpdateDelta(
address=assoc_tuples[0][0],
mxid=assoc_tuples[0][1],
lookup_hash=assoc_tuples[0][2],
)
)
if len(assoc_tuples) > 1:
# Iterate over all associations except for the first one, since we've already
# processed it.
deltas[casefold_address].to_delete = []
for address, mxid, _ in assoc_tuples[1:]:
deltas[casefold_address].to_delete.append(
DeleteDelta(
address=address,
mxid=mxid,
)
)
logger.info(f"{len(deltas)} rows to update in local_threepid_associations")
# Apply the deltas
for casefolded_address, delta in deltas.items():
if not test:
log_msg = f"Updating {casefolded_address}"
if delta.to_delete is not None:
log_msg += (
f" and deleting {len(delta.to_delete)} rows associated with it"
)
logger.info(log_msg)
try:
# Delete each association, and send an email mentioning the affected MXID.
if delta.to_delete is not None:
for to_delete in delta.to_delete:
if send_email and not dry_run:
# If the MXID is one that will still be associated with this
# email address after this run, don't send an email for it.
if to_delete.mxid == delta.to_update.mxid:
continue
sendEmailWithBackoff(
sydent,
to_delete.address,
to_delete.mxid,
test=test,
)
if not dry_run:
cur = db.cursor()
cur.execute(
"DELETE FROM local_threepid_associations WHERE medium = 'email' AND address = ?",
(to_delete.address,),
)
db.commit()
logger.info(
f"Deleting {to_delete.address} from table local_threepid_associations"
)
# Update the row now that there's no duplicate.
if not dry_run:
cur = db.cursor()
cur.execute(
"UPDATE local_threepid_associations SET address = ?, lookup_hash = ? WHERE medium = 'email' AND address = ? AND mxid = ?",
(
casefolded_address,
delta.to_update.lookup_hash,
delta.to_update.address,
delta.to_update.mxid,
),
)
logger.info(
f"Updating table local threepid associations setting address to {casefolded_address},"
f"lookup_hash to {delta.to_update.lookup_hash}, where medium = email and address = {delta.to_update.address} and mxid = {delta.to_update.mxid}"
)
db.commit()
except CantSendEmailException:
# If we failed because we couldn't send an email move on to the next address
# to de-duplicate.
# We catch this error here rather than when sending the email because we want
# to avoid deleting rows we can't warn users about, and we don't want to
# proceed with the subsequent update because there might still be duplicates
# in the database (since we haven't deleted everything we wanted to delete).
logger.info(f"Failed to send email to {to_delete.address}")
continue
|
def update_local_associations(
sydent: Sydent,
db: sqlite3.Connection,
send_email: bool,
dry_run: bool,
test: bool = False,
) -> None:
"""Update the DB table local_threepid_associations so that all stored
emails are casefolded, and any duplicate mxid's associated with the
given email are deleted.
Setting dry_run to True means that the script is being run in dry-run mode
by the user, i.e. it will run but will not send any email nor update the database.
Setting test to True means that the function is being called as part of an automated
test, and therefore we shouldn't backoff when sending emails.
:return: None
"""
logger.info("Processing rows in local_threepid_associations")
res = db.execute(
"SELECT address, mxid FROM local_threepid_associations WHERE medium = 'email'"
"ORDER BY ts DESC"
)
# a dict that associates an email address with correspoinding mxids and lookup hashes
associations: Dict[str, List[Tuple[str, str, str]]] = {}
logger.info("Computing new hashes and signatures for local_threepid_associations")
# iterate through selected associations, casefold email, rehash it, and add to
# associations dict
for address, mxid in res.fetchall():
casefold_address = address.casefold()
# rehash email since hashes are case-sensitive
lookup_hash = calculate_lookup_hash(sydent, casefold_address)
if casefold_address in associations:
associations[casefold_address].append((address, mxid, lookup_hash))
else:
associations[casefold_address] = [(address, mxid, lookup_hash)]
# Deltas to apply to the database, associated with the casefolded address they're for.
deltas: Dict[str, Delta] = {}
# Iterate through the results, to build the deltas.
for casefold_address, assoc_tuples in associations.items():
# If the row is already in the right state and there's no duplicate, don't compute
# a delta for it.
if len(assoc_tuples) == 1 and assoc_tuples[0][0] == casefold_address:
continue
deltas[casefold_address] = Delta(
to_update=UpdateDelta(
address=assoc_tuples[0][0],
mxid=assoc_tuples[0][1],
lookup_hash=assoc_tuples[0][2],
)
)
if len(assoc_tuples) > 1:
# Iterate over all associations except for the first one, since we've already
# processed it.
deltas[casefold_address].to_delete = []
for address, mxid, _ in assoc_tuples[1:]:
deltas[casefold_address].to_delete.append(
DeleteDelta(
address=address,
mxid=mxid,
)
)
logger.info(f"{len(deltas)} rows to update in local_threepid_associations")
# Apply the deltas
for casefolded_address, delta in deltas.items():
if not test:
log_msg = f"Updating {casefolded_address}"
if delta.to_delete is not None:
log_msg += (
f" and deleting {len(delta.to_delete)} rows associated with it"
)
logger.info(log_msg)
try:
# Delete each association, and send an email mentioning the affected MXID.
if delta.to_delete is not None:
for to_delete in delta.to_delete:
if send_email and not dry_run:
# If the MXID is one that will still be associated with this
# email address after this run, don't send an email for it.
if to_delete.mxid == delta.to_update.mxid:
continue
sendEmailWithBackoff(
sydent,
to_delete.address,
to_delete.mxid,
test=test,
)
if not dry_run:
cur = db.cursor()
cur.execute(
"DELETE FROM local_threepid_associations WHERE medium = 'email' AND address = ?",
(to_delete.address,),
)
db.commit()
logger.info(
f"Deleting {to_delete.address} from table local_threepid_associations"
)
# Update the row now that there's no duplicate.
if not dry_run:
cur = db.cursor()
cur.execute(
"UPDATE local_threepid_associations SET address = ?, lookup_hash = ? WHERE medium = 'email' AND address = ? AND mxid = ?",
(
casefolded_address,
delta.to_update.lookup_hash,
delta.to_update.address,
delta.to_update.mxid,
),
)
logger.info(
f"Updating table local threepid associations setting address to {casefolded_address},"
f"lookup_hash to {delta.to_update.lookup_hash}, where medium = email and address = {delta.to_update.address} and mxid = {delta.to_update.mxid}"
)
db.commit()
except CantSendEmailException:
# If we failed because we couldn't send an email move on to the next address
# to de-duplicate.
# We catch this error here rather than when sending the email because we want
# to avoid deleting rows we can't warn users about, and we don't want to
# proceed with the subsequent update because there might still be duplicates
# in the database (since we haven't deleted everything we wanted to delete).
logger.warn(f"Failed to send email to %s; skipping!", to_delete.address)
continue
|
54,722 |
def _flag_missing_timestamps(
df: pd.DataFrame,
frequency: str,
column_name: str,
first_time_stamp: pd.Timestamp,
last_time_stamp: pd.Timestamp,
) -> namedtuple:
"""
Utility function to test if input data frame is missing any timestamps relative to expected timestamps
generated based on the first_time_stamp, last_time_stamp and frequency.
:param pd.DataFrame df: data frame which needs to be tested for missing timestamps
:param str frequency: frequency i.e. sampling frequency of the data, expressed in seconds. A list of acceptable
frequency strings are available here
(https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases)
:param str column_name: name of the column which has time series if not the index.
:param pd.Timestamp first_time_stamp: timestamp at which the time_series is expected to start from.
:param pd.Timestamp last_time_stamp: timestamp at which the time_series is expected to end with.
:return: namedtuple with 3 attributes namely flag, raw_data and new_index
1. flag - boolean set to True if there are missing timestamps, else set to False
2. raw_data - input data frame as is without any modifications
3. new_index - pd.DateTimeIndex that can be used to set the new index, defaults to None, assigned a value only
when flag is set to True
:rtype: namedtuple
"""
# Declare a named tuple to hold results
MissingTimeStampFlag = namedtuple('MissingTimeStampFlag', ['flag', 'raw_data', 'new_index'])
result = {
'flag': None,
'raw_data': df.copy(deep=True),
'new_index': None
}
# Generate expected timestamps
expected_timestamps = pd.date_range(start=first_time_stamp, end=last_time_stamp, frequency=frequency)
# Get actual timestamps
if column_name:
df.set_index(column_name, inplace=True)
df.sort_index(inplace=True)
actual_timestamps = df.index.values
# Check if they are the same
comparison_index = expected_timestamps.difference(actual_timestamps)
if comparison_index.empty:
result['flag'] = True
result['new_index'] = expected_timestamps
else:
result['flag'] = False
# Return the result as a Named Tuple
return MissingTimeStampFlag._make(result)
|
def _flag_missing_timestamps(
df: pd.DataFrame,
frequency: str,
column_name: str,
first_time_stamp: pd.Timestamp,
last_time_stamp: pd.Timestamp,
) -> namedtuple:
"""
Utility function to test if input data frame is missing any timestamps relative to expected timestamps
generated based on the first_time_stamp, last_time_stamp and frequency.
:param pd.DataFrame df: data frame which needs to be tested for missing timestamps
:param str frequency: frequency i.e. sampling frequency of the data,
expressed in seconds.
A list of acceptable frequency strings are available here:
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases
:param str column_name: name of the column which has time series if not the index.
:param pd.Timestamp first_time_stamp: timestamp at which the time_series is expected to start from.
:param pd.Timestamp last_time_stamp: timestamp at which the time_series is expected to end with.
:return: namedtuple with 3 attributes namely flag, raw_data and new_index
1. flag - boolean set to True if there are missing timestamps, else set to False
2. raw_data - input data frame as is without any modifications
3. new_index - pd.DateTimeIndex that can be used to set the new index, defaults to None, assigned a value only
when flag is set to True
:rtype: namedtuple
"""
# Declare a named tuple to hold results
MissingTimeStampFlag = namedtuple('MissingTimeStampFlag', ['flag', 'raw_data', 'new_index'])
result = {
'flag': None,
'raw_data': df.copy(deep=True),
'new_index': None
}
# Generate expected timestamps
expected_timestamps = pd.date_range(start=first_time_stamp, end=last_time_stamp, frequency=frequency)
# Get actual timestamps
if column_name:
df.set_index(column_name, inplace=True)
df.sort_index(inplace=True)
actual_timestamps = df.index.values
# Check if they are the same
comparison_index = expected_timestamps.difference(actual_timestamps)
if comparison_index.empty:
result['flag'] = True
result['new_index'] = expected_timestamps
else:
result['flag'] = False
# Return the result as a Named Tuple
return MissingTimeStampFlag._make(result)
|
27,720 |
def _raw_skip_reason(rep):
assert rep.skipped
assert len(rep.longrepr) == 3
_, _, reason = rep.longrepr
if reason.startswith("Skipped: "):
reason = reason[9:]
return reason
|
def _raw_skip_reason(rep):
assert rep.skipped
assert len(rep.longrepr) == 3
_, _, reason = rep.longrepr
if reason.startswith("Skipped: "):
reason = reason[len("Skipped: "):]
return reason
|
2,109 |
def test_array_function_not_called():
X = np.array([[1, 1], [1, 2], [1, 3], [1, 4],
[2, 1], [2, 2], [2, 3], [2, 4],
[3, 1], [3, 2], [3, 3], [3, 4]])
X = _NotAnArray(X)
y = _NotAnArray([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 2, 2])
estimator = LogisticRegression()
estimator.fit(X, y)
rng = np.random.RandomState(42)
permutation_importance(estimator, X, y, n_repeats=5,
random_state=rng, n_jobs=1)
|
def test_array_function_not_called():
X = np.array([[1, 1], [1, 2], [1, 3], [1, 4],
[2, 1], [2, 2], [2, 3], [2, 4],
[3, 1], [3, 2], [3, 3], [3, 4]])
X = _NotAnArray(X)
y = _NotAnArray([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 2, 2])
estimator = LogisticRegression()
estimator.fit(X, y)
rng = np.random.RandomState(42)
permutation_importance(estimator, X, y, n_repeats=2,
random_state=rng, n_jobs=1)
|
42,429 |
def create_request_function(url: str, path: Optional[str]) -> Callable[[], float]:
if path == "none" or path is None:
return lambda: 0.0
else:
return functools.partial(_request_value, url + path)
|
def create_request_function(url: str, path: Optional[str]) -> Callable[[], float]:
if path == "none" or path is None:
return lambda: None
else:
return functools.partial(_request_value, url + path)
|
4,423 |
def _temp_proj(ref_2, ref_1, raw_data, n_proj=6):
"""Remove common signal subspace of ref_2 and ref_1 from raw_data.
Parameters
----------
ref_2 : np.ndarray of float, shape (n_sensors_2, n_times)
The magnetometer data for CSS. Can use either all magnetometer data or
a few selected sensors close to a region to be suppressed.
ref_1 : np.ndarray of float, shape (n_sensors_1, n_times)
The gradiometer data for CSS. Can use either all gradiometer data or
a few selected sensors close to a region to be suppressed.
raw_data : np.ndarray of float, shape (n_sensors_raw, n_times)
The data to be filtered, typically the EEG data.
n_proj : int
The number of projection vectors.
Notes
-----
This temporal projection procedure removes the common signal subspace
between ref_2 and ref_1 from raw_data using n_proj number of
projection vectors. Normally used for cortical signal suppression, where
ref_1 is gradiometer data, ref_2 is magnetometer data and
raw_data is EEG data.
"""
# Orthonormalize gradiometer and magnetometer data by a QR decomposition
ref_1_orth = np.linalg.qr(ref_1.T)[0]
ref_2_orth = np.linalg.qr(ref_2.T)[0]
# Calculate cross-correlation
cross_corr = np.dot(ref_1_orth.T, ref_2_orth)
# Channel weights for common temporal subspace by SVD of cross-correlation
ref_1_ch_weights, _, _ = np.linalg.svd(cross_corr)
# Get temporal signals from channel weights
proj_mat = ref_1_orth @ ref_1_ch_weights
# Project out common subspace
filtered_data = raw_data
proj_vec = proj_mat[:, :n_proj]
weights = filtered_data @ proj_vec
filtered_data -= weights @ proj_vec.T
|
def _temp_proj(ref_2, ref_1, raw_data, n_proj=6):
"""Remove common signal subspace of ref_2 and ref_1 from raw_data.
Parameters
----------
ref_2 : array of float, shape (n_sensors_2, n_times)
The magnetometer data for CSS. Can use either all magnetometer data or
a few selected sensors close to a region to be suppressed.
ref_1 : np.ndarray of float, shape (n_sensors_1, n_times)
The gradiometer data for CSS. Can use either all gradiometer data or
a few selected sensors close to a region to be suppressed.
raw_data : np.ndarray of float, shape (n_sensors_raw, n_times)
The data to be filtered, typically the EEG data.
n_proj : int
The number of projection vectors.
Notes
-----
This temporal projection procedure removes the common signal subspace
between ref_2 and ref_1 from raw_data using n_proj number of
projection vectors. Normally used for cortical signal suppression, where
ref_1 is gradiometer data, ref_2 is magnetometer data and
raw_data is EEG data.
"""
# Orthonormalize gradiometer and magnetometer data by a QR decomposition
ref_1_orth = np.linalg.qr(ref_1.T)[0]
ref_2_orth = np.linalg.qr(ref_2.T)[0]
# Calculate cross-correlation
cross_corr = np.dot(ref_1_orth.T, ref_2_orth)
# Channel weights for common temporal subspace by SVD of cross-correlation
ref_1_ch_weights, _, _ = np.linalg.svd(cross_corr)
# Get temporal signals from channel weights
proj_mat = ref_1_orth @ ref_1_ch_weights
# Project out common subspace
filtered_data = raw_data
proj_vec = proj_mat[:, :n_proj]
weights = filtered_data @ proj_vec
filtered_data -= weights @ proj_vec.T
|
42,674 |
def test_events_filter_params(rotkehlchen_api_server, ethereum_accounts):
"""Tests filtering by transaction's events' properties
Test cases:
- Filtering by asset
- Filtering by protocol (counterparty)
- Filtering by both asset and a protocol
- Transaction has multiple related events
- Transaction has no related events
- Multiple transactions are queried
"""
logging.getLogger('rotkehlchen.externalapis.etherscan').disabled = True
rotki = rotkehlchen_api_server.rest_api.rotkehlchen
db = rotki.data.db
tx1 = create_tx(tx_hash=b'1')
tx2 = create_tx(tx_hash=b'2')
tx3 = create_tx(tx_hash=b'3')
event1 = create_tx_event(tx_hash=b'1', index=1, asset=A_ETH)
event2 = create_tx_event(tx_hash=b'1', index=2, asset=A_ETH, counterparty='EXAMPLE_PROTOCOL')
event3 = create_tx_event(tx_hash=b'1', index=3, asset=A_BTC, counterparty='EXAMPLE_PROTOCOL')
event4 = create_tx_event(tx_hash=b'2', index=4, asset=A_BTC)
dbethtx = DBEthTx(db)
dbethtx.add_ethereum_transactions([tx1, tx2, tx3], relevant_address=ethereum_accounts[0])
dbevents = DBHistoryEvents(db)
dbevents.add_history_events([event1, event2, event3, event4])
response = requests.get(
api_url_for(
rotkehlchen_api_server,
'ethereumtransactionsresource',
),
json={
'asset': A_ETH.serialize(),
},
)
result = assert_proper_response_with_result(response)
expected = generate_tx_entries_response([(tx1, [event1, event2])])
assert result['entries'] == expected
response = requests.get(
api_url_for(
rotkehlchen_api_server,
'ethereumtransactionsresource',
),
json={
'asset': A_BTC.serialize(),
},
)
result = assert_proper_response_with_result(response)
expected = generate_tx_entries_response([(tx1, [event3]), (tx2, [event4])])
# For some reason data this data can be reversed,
# and we avoid failing with a help of this ugly check.
# Dicts are not hashable, so it's not possible to use better and simpler way
assert result['entries'] == expected or result['entries'] == list(reversed(expected))
response = requests.get(
api_url_for(
rotkehlchen_api_server,
'ethereumtransactionsresource',
),
json={
'protocol': 'EXAMPLE_PROTOCOL',
},
)
result = assert_proper_response_with_result(response)
expected = generate_tx_entries_response([(tx1, [event2, event3])])
assert result['entries'] == expected
response = requests.get(
api_url_for(
rotkehlchen_api_server,
'ethereumtransactionsresource',
),
json={
'asset': A_BTC.serialize(),
'protocol': 'EXAMPLE_PROTOCOL',
},
)
result = assert_proper_response_with_result(response)
expected = generate_tx_entries_response([(tx1, [event3])])
assert result['entries'] == expected
|
def test_events_filter_params(rotkehlchen_api_server, ethereum_accounts):
"""Tests filtering by transaction's events' properties
Test cases:
- Filtering by asset
- Filtering by protocol (counterparty)
- Filtering by both asset and a protocol
- Transaction has multiple related events
- Transaction has no related events
- Multiple transactions are queried
"""
logging.getLogger('rotkehlchen.externalapis.etherscan').disabled = True
rotki = rotkehlchen_api_server.rest_api.rotkehlchen
db = rotki.data.db
tx1 = create_tx(tx_hash=b'1')
tx2 = create_tx(tx_hash=b'2')
tx3 = create_tx(tx_hash=b'3')
event1 = create_tx_event(tx_hash=b'1', index=1, asset=A_ETH)
event2 = create_tx_event(tx_hash=b'1', index=2, asset=A_ETH, counterparty='EXAMPLE_PROTOCOL')
event3 = create_tx_event(tx_hash=b'1', index=3, asset=A_BTC, counterparty='EXAMPLE_PROTOCOL')
event4 = create_tx_event(tx_hash=b'2', index=4, asset=A_BTC)
dbethtx = DBEthTx(db)
dbethtx.add_ethereum_transactions([tx1, tx2, tx3], relevant_address=ethereum_accounts[0])
dbevents = DBHistoryEvents(db)
dbevents.add_history_events([event1, event2, event3, event4])
response = requests.get(
api_url_for(
rotkehlchen_api_server,
'ethereumtransactionsresource',
),
json={'asset': A_ETH.serialize()},
)
result = assert_proper_response_with_result(response)
expected = generate_tx_entries_response([(tx1, [event1, event2])])
assert result['entries'] == expected
response = requests.get(
api_url_for(
rotkehlchen_api_server,
'ethereumtransactionsresource',
),
json={
'asset': A_BTC.serialize(),
},
)
result = assert_proper_response_with_result(response)
expected = generate_tx_entries_response([(tx1, [event3]), (tx2, [event4])])
# For some reason data this data can be reversed,
# and we avoid failing with a help of this ugly check.
# Dicts are not hashable, so it's not possible to use better and simpler way
assert result['entries'] == expected or result['entries'] == list(reversed(expected))
response = requests.get(
api_url_for(
rotkehlchen_api_server,
'ethereumtransactionsresource',
),
json={
'protocol': 'EXAMPLE_PROTOCOL',
},
)
result = assert_proper_response_with_result(response)
expected = generate_tx_entries_response([(tx1, [event2, event3])])
assert result['entries'] == expected
response = requests.get(
api_url_for(
rotkehlchen_api_server,
'ethereumtransactionsresource',
),
json={
'asset': A_BTC.serialize(),
'protocol': 'EXAMPLE_PROTOCOL',
},
)
result = assert_proper_response_with_result(response)
expected = generate_tx_entries_response([(tx1, [event3])])
assert result['entries'] == expected
|
36,689 |
def _init_module_attrs(spec, module, *, override=False):
# The passed-in module may be not support attribute assignment,
# in which case we simply don't set the attributes.
# __name__
if (override or getattr(module, '__name__', None) is None):
try:
module.__name__ = spec.name
except AttributeError:
pass
# __loader__
if override or getattr(module, '__loader__', None) is None:
try:
module.__loader__ = spec.loader
except AttributeError:
pass
# __package__
if override or getattr(module, '__package__', None) is None:
try:
module.__package__ = spec.parent
except AttributeError:
pass
# __spec__
try:
module.__spec__ = spec
except AttributeError:
pass
# __path__
if override or getattr(module, '__path__', None) is None:
if spec.submodule_search_locations is not None:
# XXX We should extend __path__ if it's already a list.
try:
module.__path__ = spec.submodule_search_locations
except AttributeError:
pass
# __file__/__cached__
if spec.has_location:
if override or getattr(module, '__file__', None) is None:
try:
module.__file__ = spec.origin
except AttributeError:
pass
if override or getattr(module, '__cached__', None) is None:
if spec.cached is not None:
try:
module.__cached__ = spec.cached
except AttributeError:
pass
# A backward compatibility hack.
if _bootstrap_external and isinstance(spec.loader, _bootstrap_external.NamespaceLoader):
# While the docs say that module.__file__ is not set for
# built-in modules, and the code below will avoid setting it if
# spec.has_location is false, this is incorrect for namespace
# packages. Namespace packages have no location, but their
# __spec__.origin is None, and thus their module.__file__
# should also be None for consistency. While a bit of a hack,
# this is the best place to ensure this consistency.
#
# See # https://docs.python.org/3/library/importlib.html#importlib.abc.Loader.load_module
# and bpo-32305
module.__file__ = None
return module
|
def _init_module_attrs(spec, module, *, override=False):
# The passed-in module may be not support attribute assignment,
# in which case we simply don't set the attributes.
# __name__
if (override or getattr(module, '__name__', None) is None):
try:
module.__name__ = spec.name
except AttributeError:
pass
# __loader__
if override or getattr(module, '__loader__', None) is None:
try:
module.__loader__ = spec.loader
except AttributeError:
pass
# __package__
if override or getattr(module, '__package__', None) is None:
try:
module.__package__ = spec.parent
except AttributeError:
pass
# __spec__
try:
module.__spec__ = spec
except AttributeError:
pass
# __path__
if override or getattr(module, '__path__', None) is None:
if spec.submodule_search_locations is not None:
# XXX We should extend __path__ if it's already a list.
try:
module.__path__ = spec.submodule_search_locations
except AttributeError:
pass
# __file__/__cached__
if spec.has_location:
if override or getattr(module, '__file__', None) is None:
try:
module.__file__ = spec.origin
except AttributeError:
pass
if override or getattr(module, '__cached__', None) is None:
if spec.cached is not None:
try:
module.__cached__ = spec.cached
except AttributeError:
pass
# A backward compatibility hack.
elif _bootstrap_external and isinstance(spec.loader, _bootstrap_external.NamespaceLoader):
# While the docs say that module.__file__ is not set for
# built-in modules, and the code below will avoid setting it if
# spec.has_location is false, this is incorrect for namespace
# packages. Namespace packages have no location, but their
# __spec__.origin is None, and thus their module.__file__
# should also be None for consistency. While a bit of a hack,
# this is the best place to ensure this consistency.
#
# See # https://docs.python.org/3/library/importlib.html#importlib.abc.Loader.load_module
# and bpo-32305
module.__file__ = None
return module
|
33,121 |
def partial_Id_Obs(Nx, obs_inds):
"""Specify identity observations of a subset of obs. indices.
It is not a function of time.
Parameters
----------
Nx: int
Number of total length of state vector
obs_inds: ndarray
The observed indices.
Returns
-------
Obs: dict
Observation operator including size of the observation space,
observation operator/model and tangent linear observation operator
"""
Ny = len(obs_inds)
H = direct_obs_matrix(Nx, obs_inds)
@ens_compatible
def model(x, t): return x[obs_inds]
def linear(x, t): return H
Obs = {
'M': Ny,
'model': model,
'linear': linear,
}
return Obs
|
def partial_Id_Obs(Nx, obs_inds):
"""Specify identity observations of a subset of obs. indices.
It is not a function of time.
Parameters
----------
Nx: int
Length of state vector
obs_inds: ndarray
The observed indices.
Returns
-------
Obs: dict
Observation operator including size of the observation space,
observation operator/model and tangent linear observation operator
"""
Ny = len(obs_inds)
H = direct_obs_matrix(Nx, obs_inds)
@ens_compatible
def model(x, t): return x[obs_inds]
def linear(x, t): return H
Obs = {
'M': Ny,
'model': model,
'linear': linear,
}
return Obs
|
953 |
def solve(f, *symbols, **flags):
r"""
Algebraically solves equations and systems of equations.
Explanation
===========
Currently supported:
- polynomial
- transcendental
- piecewise combinations of the above
- systems of linear and polynomial equations
- systems containing relational expressions
Examples
========
The output varies according to the input and can be seen by example:
>>> from sympy import solve, Poly, Eq, Function, exp
>>> from sympy.abc import x, y, z, a, b, c
>>> f = Function('f')
Boolean or univariate Relational:
>>> solve(x < 3)
(-oo < x) & (x < 3)
To always get a list of solution mappings, use flag dict=True:
>>> solve(x - 3, dict=True)
[{x: 3}]
>>> sol = solve([x - 3, y - 1], dict=True)
>>> sol
[{x: 3, y: 1}]
>>> sol[0][x]
3
>>> sol[0][y]
1
To get a list of *symbols* and set of solution(s) use flag set=True:
>>> solve([x**2 - 3, y - 1], set=True)
([x, y], {(-sqrt(3), 1), (sqrt(3), 1)})
Single expression and single symbol that is in the expression:
>>> solve(x - y, x)
[y]
>>> solve(x - 3, x)
[3]
>>> solve(Eq(x, 3), x)
[3]
>>> solve(Poly(x - 3), x)
[3]
>>> solve(x**2 - y**2, x, set=True)
([x], {(-y,), (y,)})
>>> solve(x**4 - 1, x, set=True)
([x], {(-1,), (1,), (-I,), (I,)})
Single expression with no symbol that is in the expression:
>>> solve(3, x)
[]
>>> solve(x - 3, y)
[]
Single expression with no symbol given. In this case, all free *symbols*
will be selected as potential *symbols* to solve for. If the equation is
univariate then a list of solutions is returned; otherwise - as is the case
when *symbols* are given as an iterable of length greater than 1 - a list of
mappings will be returned:
>>> solve(x - 3)
[3]
>>> solve(x**2 - y**2)
[{x: -y}, {x: y}]
>>> solve(z**2*x**2 - z**2*y**2)
[{x: -y}, {x: y}, {z: 0}]
>>> solve(z**2*x - z**2*y**2)
[{x: y**2}, {z: 0}]
When an object other than a Symbol is given as a symbol, it is
isolated algebraically and an implicit solution may be obtained.
This is mostly provided as a convenience to save you from replacing
the object with a Symbol and solving for that Symbol. It will only
work if the specified object can be replaced with a Symbol using the
subs method:
>>> solve(f(x) - x, f(x))
[x]
>>> solve(f(x).diff(x) - f(x) - x, f(x).diff(x))
[x + f(x)]
>>> solve(f(x).diff(x) - f(x) - x, f(x))
[-x + Derivative(f(x), x)]
>>> solve(x + exp(x)**2, exp(x), set=True)
([exp(x)], {(-sqrt(-x),), (sqrt(-x),)})
>>> from sympy import Indexed, IndexedBase, Tuple, sqrt
>>> A = IndexedBase('A')
>>> eqs = Tuple(A[1] + A[2] - 3, A[1] - A[2] + 1)
>>> solve(eqs, eqs.atoms(Indexed))
{A[1]: 1, A[2]: 2}
* To solve for a symbol implicitly, use implicit=True:
>>> solve(x + exp(x), x)
[-LambertW(1)]
>>> solve(x + exp(x), x, implicit=True)
[-exp(x)]
* It is possible to solve for anything that can be targeted with
subs:
>>> solve(x + 2 + sqrt(3), x + 2)
[-sqrt(3)]
>>> solve((x + 2 + sqrt(3), x + 4 + y), y, x + 2)
{y: -2 + sqrt(3), x + 2: -sqrt(3)}
* Nothing heroic is done in this implicit solving so you may end up
with a symbol still in the solution:
>>> eqs = (x*y + 3*y + sqrt(3), x + 4 + y)
>>> solve(eqs, y, x + 2)
{y: -sqrt(3)/(x + 3), x + 2: -2*x/(x + 3) - 6/(x + 3) + sqrt(3)/(x + 3)}
>>> solve(eqs, y*x, x)
{x: -y - 4, x*y: -3*y - sqrt(3)}
* If you attempt to solve for a number remember that the number
you have obtained does not necessarily mean that the value is
equivalent to the expression obtained:
>>> solve(sqrt(2) - 1, 1)
[sqrt(2)]
>>> solve(x - y + 1, 1) # /!\ -1 is targeted, too
[x/(y - 1)]
>>> [_.subs(z, -1) for _ in solve((x - y + 1).subs(-1, z), 1)]
[-x + y]
* To solve for a function within a derivative, use ``dsolve``.
Single expression and more than one symbol:
* When there is a linear solution:
>>> solve(x - y**2, x, y)
[(y**2, y)]
>>> solve(x**2 - y, x, y)
[(x, x**2)]
>>> solve(x**2 - y, x, y, dict=True)
[{y: x**2}]
* If there is no linear solution, then the first successful
attempt for a nonlinear solution will be returned:
>>> solve(x**2 - y**2, x, y, dict=True)
[{x: -y}, {x: y}]
>>> solve(x**2 - y**2/exp(x), x, y, dict=True)
[{x: 2*LambertW(-y/2)}, {x: 2*LambertW(y/2)}]
>>> solve(x**2 - y**2/exp(x), y, x)
[(-x*sqrt(exp(x)), x), (x*sqrt(exp(x)), x)]
* When undetermined coefficients are identified:
This happens when it is possible to form a linear set of
equations in the variables provided from the coefficients
of the expressions in symbols not provided. A single
dictionary with specified values will be returned:
>>> eq = (a + b)*x - b + 2
>>> solve(eq, a, b)
{a: -2, b: 2}
The coefficient system solved was:
>>> list(eq.expand().as_coefficients_dict(x).values())
[a + b, -b + 2]
To obtain an algebraic solution in terms of ``a`` or ``b``
pass the equation in a list:
>>> solve([eq], a, b)
{a: b*(1 - x)/x - c/x}
Iterable of one or more of the above:
* Involving relationals or bools:
>>> solve([x < 3, x - 2])
Eq(x, 2)
>>> solve([x > 3, x - 2])
False
* When the system is linear:
* With a solution:
>>> solve([x - 3], x)
{x: 3}
>>> solve((x + 5*y - 2, -3*x + 6*y - 15), x, y)
{x: -3, y: 1}
>>> solve((x + 5*y - 2, -3*x + 6*y - 15), x, y, z)
{x: -3, y: 1}
>>> solve((x + 5*y - 2, -3*x + 6*y - z), z, x, y)
{x: 2 - 5*y, z: 21*y - 6}
* Without a solution:
>>> solve([x + 3, x - 3])
[]
* When the system is not linear:
>>> solve([x**2 + y -2, y**2 - 4], x, y, set=True)
([x, y], {(-2, -2), (0, 2), (2, -2)})
* If no *symbols* are given, all free *symbols* will be selected and a
list of mappings returned:
>>> solve([x - 2, x**2 + y])
[{x: 2, y: -4}]
>>> solve([x - 2, x**2 + f(x)], {f(x), x})
[{x: 2, f(x): -4}]
* If any equation does not depend on the symbol(s) given, it will be
eliminated from the equation set and an answer may be given
implicitly in terms of variables that were not of interest:
>>> solve([x - y, y - 3], x)
{x: y}
**Additional Examples**
``solve()`` with check=True (default) will run through the symbol tags to
elimate unwanted solutions. If no assumptions are included, all possible
solutions will be returned:
>>> from sympy import Symbol, solve
>>> x = Symbol("x")
>>> solve(x**2 - 1)
[-1, 1]
By using the positive tag, only one solution will be returned:
>>> pos = Symbol("pos", positive=True)
>>> solve(pos**2 - 1)
[1]
Assumptions are not checked when ``solve()`` input involves
relationals or bools.
When the solutions are checked, those that make any denominator zero
are automatically excluded. If you do not want to exclude such solutions,
then use the check=False option:
>>> from sympy import sin, limit
>>> solve(sin(x)/x) # 0 is excluded
[pi]
If check=False, then a solution to the numerator being zero is found: x = 0.
In this case, this is a spurious solution since $\sin(x)/x$ has the well
known limit (without dicontinuity) of 1 at x = 0:
>>> solve(sin(x)/x, check=False)
[0, pi]
In the following case, however, the limit exists and is equal to the
value of x = 0 that is excluded when check=True:
>>> eq = x**2*(1/x - z**2/x)
>>> solve(eq, x)
[]
>>> solve(eq, x, check=False)
[0]
>>> limit(eq, x, 0, '-')
0
>>> limit(eq, x, 0, '+')
0
**Disabling High-Order Explicit Solutions**
When solving polynomial expressions, you might not want explicit solutions
(which can be quite long). If the expression is univariate, ``CRootOf``
instances will be returned instead:
>>> solve(x**3 - x + 1)
[-1/((-1/2 - sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)) -
(-1/2 - sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)/3,
-(-1/2 + sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)/3 -
1/((-1/2 + sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)),
-(3*sqrt(69)/2 + 27/2)**(1/3)/3 -
1/(3*sqrt(69)/2 + 27/2)**(1/3)]
>>> solve(x**3 - x + 1, cubics=False)
[CRootOf(x**3 - x + 1, 0),
CRootOf(x**3 - x + 1, 1),
CRootOf(x**3 - x + 1, 2)]
If the expression is multivariate, no solution might be returned:
>>> solve(x**3 - x + a, x, cubics=False)
[]
Sometimes solutions will be obtained even when a flag is False because the
expression could be factored. In the following example, the equation can
be factored as the product of a linear and a quadratic factor so explicit
solutions (which did not require solving a cubic expression) are obtained:
>>> eq = x**3 + 3*x**2 + x - 1
>>> solve(eq, cubics=False)
[-1, -1 + sqrt(2), -sqrt(2) - 1]
**Solving Equations Involving Radicals**
Because of SymPy's use of the principle root, some solutions
to radical equations will be missed unless check=False:
>>> from sympy import root
>>> eq = root(x**3 - 3*x**2, 3) + 1 - x
>>> solve(eq)
[]
>>> solve(eq, check=False)
[1/3]
In the above example, there is only a single solution to the
equation. Other expressions will yield spurious roots which
must be checked manually; roots which give a negative argument
to odd-powered radicals will also need special checking:
>>> from sympy import real_root, S
>>> eq = root(x, 3) - root(x, 5) + S(1)/7
>>> solve(eq) # this gives 2 solutions but misses a 3rd
[CRootOf(7*x**5 - 7*x**3 + 1, 1)**15,
CRootOf(7*x**5 - 7*x**3 + 1, 2)**15]
>>> sol = solve(eq, check=False)
>>> [abs(eq.subs(x,i).n(2)) for i in sol]
[0.48, 0.e-110, 0.e-110, 0.052, 0.052]
The first solution is negative so ``real_root`` must be used to see that it
satisfies the expression:
>>> abs(real_root(eq.subs(x, sol[0])).n(2))
0.e-110
If the roots of the equation are not real then more care will be
necessary to find the roots, especially for higher order equations.
Consider the following expression:
>>> expr = root(x, 3) - root(x, 5)
We will construct a known value for this expression at x = 3 by selecting
the 1-th root for each radical:
>>> expr1 = root(x, 3, 1) - root(x, 5, 1)
>>> v = expr1.subs(x, -3)
The ``solve`` function is unable to find any exact roots to this equation:
>>> eq = Eq(expr, v); eq1 = Eq(expr1, v)
>>> solve(eq, check=False), solve(eq1, check=False)
([], [])
The function ``unrad``, however, can be used to get a form of the equation
for which numerical roots can be found:
>>> from sympy.solvers.solvers import unrad
>>> from sympy import nroots
>>> e, (p, cov) = unrad(eq)
>>> pvals = nroots(e)
>>> inversion = solve(cov, x)[0]
>>> xvals = [inversion.subs(p, i) for i in pvals]
Although ``eq`` or ``eq1`` could have been used to find ``xvals``, the
solution can only be verified with ``expr1``:
>>> z = expr - v
>>> [xi.n(chop=1e-9) for xi in xvals if abs(z.subs(x, xi).n()) < 1e-9]
[]
>>> z1 = expr1 - v
>>> [xi.n(chop=1e-9) for xi in xvals if abs(z1.subs(x, xi).n()) < 1e-9]
[-3.0]
Parameters
==========
f :
- a single Expr or Poly that must be zero
- an Equality
- a Relational expression
- a Boolean
- iterable of one or more of the above
symbols : (object(s) to solve for) specified as
- none given (other non-numeric objects will be used)
- single symbol
- denested list of symbols
(e.g., ``solve(f, x, y)``)
- ordered iterable of symbols
(e.g., ``solve(f, [x, y])``)
flags :
dict=True (default is False)
Return list (perhaps empty) of solution mappings.
set=True (default is False)
Return list of symbols and set of tuple(s) of solution(s).
exclude=[] (default)
Do not try to solve for any of the free symbols in exclude;
if expressions are given, the free symbols in them will
be extracted automatically.
check=True (default)
If False, do not do any testing of solutions. This can be
useful if you want to include solutions that make any
denominator zero.
numerical=True (default)
Do a fast numerical check if *f* has only one symbol.
minimal=True (default is False)
A very fast, minimal testing.
warn=True (default is False)
Show a warning if ``checksol()`` could not conclude.
simplify=True (default)
Simplify all but polynomials of order 3 or greater before
returning them and (if check is not False) use the
general simplify function on the solutions and the
expression obtained when they are substituted into the
function which should be zero.
force=True (default is False)
Make positive all symbols without assumptions regarding sign.
rational=True (default)
Recast Floats as Rational; if this option is not used, the
system containing Floats may fail to solve because of issues
with polys. If rational=None, Floats will be recast as
rationals but the answer will be recast as Floats. If the
flag is False then nothing will be done to the Floats.
manual=True (default is False)
Do not use the polys/matrix method to solve a system of
equations, solve them one at a time as you might "manually."
implicit=True (default is False)
Allows ``solve`` to return a solution for a pattern in terms of
other functions that contain that pattern; this is only
needed if the pattern is inside of some invertible function
like cos, exp, ect.
particular=True (default is False)
Instructs ``solve`` to try to find a particular solution to
a linear system with as many zeros as possible; this is very
expensive.
quick=True (default is False; ``particular`` must be True)
Selects a fast heuristic to find a solution with many zeros
whereas a value of False uses the very slow method guaranteed
to find the largest number of zeros possible.
cubics=True (default)
Return explicit solutions when cubic expressions are encountered.
When False, quartics and quintics are disabled, too.
quartics=True (default)
Return explicit solutions when quartic expressions are encountered.
When False, quintics are disabled, too.
quintics=True (default)
Return explicit solutions (if possible) when quintic expressions
are encountered.
See Also
========
rsolve: For solving recurrence relationships
dsolve: For solving differential equations
"""
from .inequalities import reduce_inequalities
# set solver types explicitly; as soon as one is False
# all the rest will be False
###########################################################################
hints = ('cubics', 'quartics', 'quintics')
default = True
for k in hints:
default = flags.setdefault(k, bool(flags.get(k, default)))
# keeping track of how f was passed since if it is a list
# a dictionary of results will be returned.
###########################################################################
def _sympified_list(w):
return list(map(sympify, w if iterable(w) else [w]))
bare_f = not iterable(f)
# check flag usage for particular/quick which should only be used
# with systems of equations
if flags.get('quick', None) is not None:
if not flags.get('particular', None):
raise ValueError('when using `quick`, `particular` should be True')
if flags.get('particular', False) and bare_f:
raise ValueError(filldedent("""
The 'particular/quick' flag is usually used with systems of
equations. Either pass your equation in a list or
consider using a solver like `diophantine` if you are
looking for a solution in integers."""))
f, symbols = (_sympified_list(w) for w in [f, symbols])
if isinstance(f, list):
f = [s for s in f if s is not S.true and s is not True]
implicit = flags.get('implicit', False)
# preprocess symbol(s)
###########################################################################
ordered_symbols = None # were the symbols in a well defined order?
if not symbols:
# get symbols from equations
symbols = set().union(*[fi.free_symbols for fi in f])
if len(symbols) < len(f):
for fi in f:
pot = preorder_traversal(fi)
for p in pot:
if isinstance(p, AppliedUndef):
flags['dict'] = True # better show symbols
symbols.add(p)
pot.skip() # don't go any deeper
ordered_symbols = False
symbols = list(ordered(symbols)) # to make it canonical
else:
if len(symbols) == 1 and iterable(symbols[0]):
symbols = symbols[0]
ordered_symbols = symbols and is_sequence(symbols,
include=GeneratorType)
_symbols = list(uniq(symbols))
if len(_symbols) != len(symbols):
ordered_symbols = False
symbols = list(ordered(symbols))
else:
symbols = _symbols
# remove symbols the user is not interested in
exclude = flags.pop('exclude', set())
if exclude:
if isinstance(exclude, Expr):
exclude = [exclude]
exclude = set().union(*[e.free_symbols for e in sympify(exclude)])
symbols = [s for s in symbols if s not in exclude]
# preprocess equation(s)
###########################################################################
for i, fi in enumerate(f):
if isinstance(fi, (Eq, Ne)):
if 'ImmutableDenseMatrix' in [type(a).__name__ for a in fi.args]:
fi = fi.lhs - fi.rhs
else:
L, R = fi.args
if isinstance(R, BooleanAtom):
L, R = R, L
if isinstance(L, BooleanAtom):
if isinstance(fi, Ne):
L = ~L
if R.is_Relational:
fi = ~R if L is S.false else R
elif R.is_Symbol:
return L
elif R.is_Boolean and (~R).is_Symbol:
return ~L
else:
raise NotImplementedError(filldedent('''
Unanticipated argument of Eq when other arg
is True or False.
'''))
else:
fi = fi.rewrite(Add, evaluate=False)
f[i] = fi
if fi.is_Relational:
return reduce_inequalities(f, symbols=symbols)
if isinstance(fi, Poly):
f[i] = fi.as_expr()
# rewrite hyperbolics in terms of exp if they have symbols of
# interest
f[i] = f[i].replace(lambda w: isinstance(w, HyperbolicFunction) and \
w.has_free(*symbols), lambda w: w.rewrite(exp))
# if we have a Matrix, we need to iterate over its elements again
if f[i].is_Matrix:
bare_f = False
f.extend(list(f[i]))
f[i] = S.Zero
# if we can split it into real and imaginary parts then do so
freei = f[i].free_symbols
if freei and all(s.is_extended_real or s.is_imaginary for s in freei):
fr, fi = f[i].as_real_imag()
# accept as long as new re, im, arg or atan2 are not introduced
had = f[i].atoms(re, im, arg, atan2)
if fr and fi and fr != fi and not any(
i.atoms(re, im, arg, atan2) - had for i in (fr, fi)):
if bare_f:
bare_f = False
f[i: i + 1] = [fr, fi]
# real/imag handling -----------------------------
if any(isinstance(fi, (bool, BooleanAtom)) for fi in f):
if flags.get('set', False):
return [], set()
return []
for i, fi in enumerate(f):
# Abs
while True:
was = fi
fi = fi.replace(Abs, lambda arg:
separatevars(Abs(arg)).rewrite(Piecewise) if arg.has(*symbols)
else Abs(arg))
if was == fi:
break
for e in fi.find(Abs):
if e.has(*symbols):
raise NotImplementedError('solving %s when the argument '
'is not real or imaginary.' % e)
# arg
fi = fi.replace(arg, lambda a: arg(a).rewrite(atan2).rewrite(atan))
# save changes
f[i] = fi
# see if re(s) or im(s) appear
freim = [fi for fi in f if fi.has(re, im)]
if freim:
irf = []
for s in symbols:
if s.is_real or s.is_imaginary:
continue # neither re(x) nor im(x) will appear
# if re(s) or im(s) appear, the auxiliary equation must be present
if any(fi.has(re(s), im(s)) for fi in freim):
irf.append((s, re(s) + S.ImaginaryUnit*im(s)))
if irf:
for s, rhs in irf:
f = [fi.xreplace({s: rhs}) for fi in f] + [s - rhs]
symbols.extend([re(s), im(s)])
if bare_f:
bare_f = False
flags['dict'] = True
# end of real/imag handling -----------------------------
# we can solve for non-symbol entities by replacing them with Dummy symbols
f, symbols, swap_sym = recast_to_symbols(f, symbols)
# this is needed in the next two events
symset = set(symbols)
# get rid of equations that have no symbols of interest; we don't
# try to solve them because the user didn't ask and they might be
# hard to solve; this means that solutions may be given in terms
# of the eliminated equations e.g. solve((x-y, y-3), x) -> {x: y}
newf = []
for fi in f:
# let the solver handle equations that..
# - have no symbols but are expressions
# - have symbols of interest
# - have no symbols of interest but are constant
# but when an expression is not constant and has no symbols of
# interest, it can't change what we obtain for a solution from
# the remaining equations so we don't include it; and if it's
# zero it can be removed and if it's not zero, there is no
# solution for the equation set as a whole
#
# The reason for doing this filtering is to allow an answer
# to be obtained to queries like solve((x - y, y), x); without
# this mod the return value is []
ok = False
if fi.free_symbols & symset:
ok = True
else:
if fi.is_number:
if fi.is_Number:
if fi.is_zero:
continue
return []
ok = True
else:
if fi.is_constant():
ok = True
if ok:
newf.append(fi)
if not newf:
return []
f = newf
del newf
# mask off any Object that we aren't going to invert: Derivative,
# Integral, etc... so that solving for anything that they contain will
# give an implicit solution
seen = set()
non_inverts = set()
for fi in f:
pot = preorder_traversal(fi)
for p in pot:
if not isinstance(p, Expr) or isinstance(p, Piecewise):
pass
elif (isinstance(p, bool) or
not p.args or
p in symset or
p.is_Add or p.is_Mul or
p.is_Pow and not implicit or
p.is_Function and not implicit) and p.func not in (re, im):
continue
elif p not in seen:
seen.add(p)
if p.free_symbols & symset:
non_inverts.add(p)
else:
continue
pot.skip()
del seen
non_inverts = dict(list(zip(non_inverts, [Dummy() for _ in non_inverts])))
f = [fi.subs(non_inverts) for fi in f]
# Both xreplace and subs are needed below: xreplace to force substitution
# inside Derivative, subs to handle non-straightforward substitutions
non_inverts = [(v, k.xreplace(swap_sym).subs(swap_sym)) for k, v in non_inverts.items()]
# rationalize Floats
floats = False
if flags.get('rational', True) is not False:
for i, fi in enumerate(f):
if fi.has(Float):
floats = True
f[i] = nsimplify(fi, rational=True)
# capture any denominators before rewriting since
# they may disappear after the rewrite, e.g. issue 14779
flags['_denominators'] = _simple_dens(f[0], symbols)
# Any embedded piecewise functions need to be brought out to the
# top level so that the appropriate strategy gets selected.
# However, this is necessary only if one of the piecewise
# functions depends on one of the symbols we are solving for.
def _has_piecewise(e):
if e.is_Piecewise:
return e.has(*symbols)
return any(_has_piecewise(a) for a in e.args)
for i, fi in enumerate(f):
if _has_piecewise(fi):
f[i] = piecewise_fold(fi)
#
# try to get a solution
###########################################################################
if bare_f:
solution = _solve(f[0], *symbols, **flags)
else:
solution = _solve_system(f, symbols, **flags)
#
# postprocessing
###########################################################################
# Restore masked-off objects
if non_inverts:
def _do_dict(solution):
return {k: v.subs(non_inverts) for k, v in
solution.items()}
for i in range(1):
if isinstance(solution, dict):
solution = _do_dict(solution)
break
elif solution and isinstance(solution, list):
if isinstance(solution[0], dict):
solution = [_do_dict(s) for s in solution]
break
elif isinstance(solution[0], tuple):
solution = [tuple([v.subs(non_inverts) for v in s]) for s
in solution]
break
else:
solution = [v.subs(non_inverts) for v in solution]
break
elif not solution:
break
else:
raise NotImplementedError(filldedent('''
no handling of %s was implemented''' % solution))
# Restore original "symbols" if a dictionary is returned.
# This is not necessary for
# - the single univariate equation case
# since the symbol will have been removed from the solution;
# - the nonlinear poly_system since that only supports zero-dimensional
# systems and those results come back as a list
#
# ** unless there were Derivatives with the symbols, but those were handled
# above.
if swap_sym:
symbols = [swap_sym.get(k, k) for k in symbols]
if isinstance(solution, dict):
solution = {swap_sym.get(k, k): v.subs(swap_sym)
for k, v in solution.items()}
elif solution and isinstance(solution, list) and isinstance(solution[0], dict):
for i, sol in enumerate(solution):
solution[i] = {swap_sym.get(k, k): v.subs(swap_sym)
for k, v in sol.items()}
# Get assumptions about symbols, to filter solutions.
# Note that if assumptions about a solution can't be verified, it is still
# returned.
check = flags.get('check', True)
# restore floats
if floats and solution and flags.get('rational', None) is None:
solution = nfloat(solution, exponent=False)
if check and solution: # assumption checking
warn = flags.get('warn', False)
got_None = [] # solutions for which one or more symbols gave None
no_False = [] # solutions for which no symbols gave False
if isinstance(solution, tuple):
# this has already been checked and is in as_set form
return solution
elif isinstance(solution, list):
if isinstance(solution[0], tuple):
for sol in solution:
for symb, val in zip(symbols, sol):
test = check_assumptions(val, **symb.assumptions0)
if test is False:
break
if test is None:
got_None.append(sol)
else:
no_False.append(sol)
elif isinstance(solution[0], dict):
for sol in solution:
a_None = False
for symb, val in sol.items():
test = check_assumptions(val, **symb.assumptions0)
if test:
continue
if test is False:
break
a_None = True
else:
no_False.append(sol)
if a_None:
got_None.append(sol)
else: # list of expressions
for sol in solution:
test = check_assumptions(sol, **symbols[0].assumptions0)
if test is False:
continue
no_False.append(sol)
if test is None:
got_None.append(sol)
elif isinstance(solution, dict):
a_None = False
for symb, val in solution.items():
test = check_assumptions(val, **symb.assumptions0)
if test:
continue
if test is False:
no_False = None
break
a_None = True
else:
no_False = solution
if a_None:
got_None.append(solution)
elif isinstance(solution, (Relational, And, Or)):
if len(symbols) != 1:
raise ValueError("Length should be 1")
if warn and symbols[0].assumptions0:
warnings.warn(filldedent("""
\tWarning: assumptions about variable '%s' are
not handled currently.""" % symbols[0]))
# TODO: check also variable assumptions for inequalities
else:
raise TypeError('Unrecognized solution') # improve the checker
solution = no_False
if warn and got_None:
warnings.warn(filldedent("""
\tWarning: assumptions concerning following solution(s)
cannot be checked:""" + '\n\t' +
', '.join(str(s) for s in got_None)))
#
# done
###########################################################################
as_dict = flags.get('dict', False)
as_set = flags.get('set', False)
if solution is not None and type(solution) not in (list, dict):
return solution
if not solution:
return []
if (
# undo the dictionary solutions returned when the system was
# only partially solved with poly-system
not as_dict and
ordered_symbols and
type(solution) is list and
type(solution[0]) is dict
):
solution = [tuple([r.get(s, s) for s in symbols]) for r in solution]
# make orderings canonical for:
# - dict
# - list of
# * values
# * tuples
# * dicts
if type(solution) is dict:
solution = {k: solution[k] for k in ordered(solution.keys())}
elif not as_set: # for set, no point in ordering
solution.sort(key=default_sort_key)
if solution and type(solution[0]) is tuple:
# XXX is it better to handle at source of introduction?
# if we don't do it then (or now) then
# solve([x**2 + y -2, y**2 - 4], x, y) would
# otherwise have (0, 2) appearing twice
solution = list(uniq(solution))
if not (as_set or as_dict):
return solution
# convert all input to list of dicts
if type(solution) is list and type(solution[0]) is dict:
LOD = solution
else:
LOD = None
if as_dict or not LOD:
if isinstance(solution, dict):
LOD = [solution] # dict was made canonical above
elif type(solution[0]) is tuple:
LOD = [dict(zip(symbols, s)) for s in solution]
elif type(solution[0]) is dict:
if not as_set:
# put the keys in order within each dict
LOD = [{k: s[k] for k in ordered(s)} for s in solution]
else:
LOD = solution # we will order after unifying keys
else:
assert len(symbols) == 1, 'logical error'
LOD = [{symbols[0]: s} for s in solution]
else:
LOD = solution
if as_dict:
return LOD
# set output: (symbols, {t1, t2, ...}) from list of dictionaries;
# include all symbols for those that like a verbose solution
# and to resolve any differences in dictionary keys.
#
# The set results can easily be used to make a verbose dict as
# k, v = solve(eqs, syms, set=True)
# sol = [dict(zip(k,i)) for i in v]
#
if ordered_symbols:
k = symbols # keep preferred order
else:
# just unify the symbols for which solutions were found
k = list(ordered(set(flatten(tuple(i.keys()) for i in LOD))))
return k, {tuple([s.get(ki, ki) for ki in k]) for s in LOD}
|
def solve(f, *symbols, **flags):
r"""
Algebraically solves equations and systems of equations.
Explanation
===========
Currently supported:
- polynomial
- transcendental
- piecewise combinations of the above
- systems of linear and polynomial equations
- systems containing relational expressions
Examples
========
The output varies according to the input and can be seen by example:
>>> from sympy import solve, Poly, Eq, Function, exp
>>> from sympy.abc import x, y, z, a, b
>>> f = Function('f')
Boolean or univariate Relational:
>>> solve(x < 3)
(-oo < x) & (x < 3)
To always get a list of solution mappings, use flag dict=True:
>>> solve(x - 3, dict=True)
[{x: 3}]
>>> sol = solve([x - 3, y - 1], dict=True)
>>> sol
[{x: 3, y: 1}]
>>> sol[0][x]
3
>>> sol[0][y]
1
To get a list of *symbols* and set of solution(s) use flag set=True:
>>> solve([x**2 - 3, y - 1], set=True)
([x, y], {(-sqrt(3), 1), (sqrt(3), 1)})
Single expression and single symbol that is in the expression:
>>> solve(x - y, x)
[y]
>>> solve(x - 3, x)
[3]
>>> solve(Eq(x, 3), x)
[3]
>>> solve(Poly(x - 3), x)
[3]
>>> solve(x**2 - y**2, x, set=True)
([x], {(-y,), (y,)})
>>> solve(x**4 - 1, x, set=True)
([x], {(-1,), (1,), (-I,), (I,)})
Single expression with no symbol that is in the expression:
>>> solve(3, x)
[]
>>> solve(x - 3, y)
[]
Single expression with no symbol given. In this case, all free *symbols*
will be selected as potential *symbols* to solve for. If the equation is
univariate then a list of solutions is returned; otherwise - as is the case
when *symbols* are given as an iterable of length greater than 1 - a list of
mappings will be returned:
>>> solve(x - 3)
[3]
>>> solve(x**2 - y**2)
[{x: -y}, {x: y}]
>>> solve(z**2*x**2 - z**2*y**2)
[{x: -y}, {x: y}, {z: 0}]
>>> solve(z**2*x - z**2*y**2)
[{x: y**2}, {z: 0}]
When an object other than a Symbol is given as a symbol, it is
isolated algebraically and an implicit solution may be obtained.
This is mostly provided as a convenience to save you from replacing
the object with a Symbol and solving for that Symbol. It will only
work if the specified object can be replaced with a Symbol using the
subs method:
>>> solve(f(x) - x, f(x))
[x]
>>> solve(f(x).diff(x) - f(x) - x, f(x).diff(x))
[x + f(x)]
>>> solve(f(x).diff(x) - f(x) - x, f(x))
[-x + Derivative(f(x), x)]
>>> solve(x + exp(x)**2, exp(x), set=True)
([exp(x)], {(-sqrt(-x),), (sqrt(-x),)})
>>> from sympy import Indexed, IndexedBase, Tuple, sqrt
>>> A = IndexedBase('A')
>>> eqs = Tuple(A[1] + A[2] - 3, A[1] - A[2] + 1)
>>> solve(eqs, eqs.atoms(Indexed))
{A[1]: 1, A[2]: 2}
* To solve for a symbol implicitly, use implicit=True:
>>> solve(x + exp(x), x)
[-LambertW(1)]
>>> solve(x + exp(x), x, implicit=True)
[-exp(x)]
* It is possible to solve for anything that can be targeted with
subs:
>>> solve(x + 2 + sqrt(3), x + 2)
[-sqrt(3)]
>>> solve((x + 2 + sqrt(3), x + 4 + y), y, x + 2)
{y: -2 + sqrt(3), x + 2: -sqrt(3)}
* Nothing heroic is done in this implicit solving so you may end up
with a symbol still in the solution:
>>> eqs = (x*y + 3*y + sqrt(3), x + 4 + y)
>>> solve(eqs, y, x + 2)
{y: -sqrt(3)/(x + 3), x + 2: -2*x/(x + 3) - 6/(x + 3) + sqrt(3)/(x + 3)}
>>> solve(eqs, y*x, x)
{x: -y - 4, x*y: -3*y - sqrt(3)}
* If you attempt to solve for a number remember that the number
you have obtained does not necessarily mean that the value is
equivalent to the expression obtained:
>>> solve(sqrt(2) - 1, 1)
[sqrt(2)]
>>> solve(x - y + 1, 1) # /!\ -1 is targeted, too
[x/(y - 1)]
>>> [_.subs(z, -1) for _ in solve((x - y + 1).subs(-1, z), 1)]
[-x + y]
* To solve for a function within a derivative, use ``dsolve``.
Single expression and more than one symbol:
* When there is a linear solution:
>>> solve(x - y**2, x, y)
[(y**2, y)]
>>> solve(x**2 - y, x, y)
[(x, x**2)]
>>> solve(x**2 - y, x, y, dict=True)
[{y: x**2}]
* If there is no linear solution, then the first successful
attempt for a nonlinear solution will be returned:
>>> solve(x**2 - y**2, x, y, dict=True)
[{x: -y}, {x: y}]
>>> solve(x**2 - y**2/exp(x), x, y, dict=True)
[{x: 2*LambertW(-y/2)}, {x: 2*LambertW(y/2)}]
>>> solve(x**2 - y**2/exp(x), y, x)
[(-x*sqrt(exp(x)), x), (x*sqrt(exp(x)), x)]
* When undetermined coefficients are identified:
This happens when it is possible to form a linear set of
equations in the variables provided from the coefficients
of the expressions in symbols not provided. A single
dictionary with specified values will be returned:
>>> eq = (a + b)*x - b + 2
>>> solve(eq, a, b)
{a: -2, b: 2}
The coefficient system solved was:
>>> list(eq.expand().as_coefficients_dict(x).values())
[a + b, -b + 2]
To obtain an algebraic solution in terms of ``a`` or ``b``
pass the equation in a list:
>>> solve([eq], a, b)
{a: b*(1 - x)/x - c/x}
Iterable of one or more of the above:
* Involving relationals or bools:
>>> solve([x < 3, x - 2])
Eq(x, 2)
>>> solve([x > 3, x - 2])
False
* When the system is linear:
* With a solution:
>>> solve([x - 3], x)
{x: 3}
>>> solve((x + 5*y - 2, -3*x + 6*y - 15), x, y)
{x: -3, y: 1}
>>> solve((x + 5*y - 2, -3*x + 6*y - 15), x, y, z)
{x: -3, y: 1}
>>> solve((x + 5*y - 2, -3*x + 6*y - z), z, x, y)
{x: 2 - 5*y, z: 21*y - 6}
* Without a solution:
>>> solve([x + 3, x - 3])
[]
* When the system is not linear:
>>> solve([x**2 + y -2, y**2 - 4], x, y, set=True)
([x, y], {(-2, -2), (0, 2), (2, -2)})
* If no *symbols* are given, all free *symbols* will be selected and a
list of mappings returned:
>>> solve([x - 2, x**2 + y])
[{x: 2, y: -4}]
>>> solve([x - 2, x**2 + f(x)], {f(x), x})
[{x: 2, f(x): -4}]
* If any equation does not depend on the symbol(s) given, it will be
eliminated from the equation set and an answer may be given
implicitly in terms of variables that were not of interest:
>>> solve([x - y, y - 3], x)
{x: y}
**Additional Examples**
``solve()`` with check=True (default) will run through the symbol tags to
elimate unwanted solutions. If no assumptions are included, all possible
solutions will be returned:
>>> from sympy import Symbol, solve
>>> x = Symbol("x")
>>> solve(x**2 - 1)
[-1, 1]
By using the positive tag, only one solution will be returned:
>>> pos = Symbol("pos", positive=True)
>>> solve(pos**2 - 1)
[1]
Assumptions are not checked when ``solve()`` input involves
relationals or bools.
When the solutions are checked, those that make any denominator zero
are automatically excluded. If you do not want to exclude such solutions,
then use the check=False option:
>>> from sympy import sin, limit
>>> solve(sin(x)/x) # 0 is excluded
[pi]
If check=False, then a solution to the numerator being zero is found: x = 0.
In this case, this is a spurious solution since $\sin(x)/x$ has the well
known limit (without dicontinuity) of 1 at x = 0:
>>> solve(sin(x)/x, check=False)
[0, pi]
In the following case, however, the limit exists and is equal to the
value of x = 0 that is excluded when check=True:
>>> eq = x**2*(1/x - z**2/x)
>>> solve(eq, x)
[]
>>> solve(eq, x, check=False)
[0]
>>> limit(eq, x, 0, '-')
0
>>> limit(eq, x, 0, '+')
0
**Disabling High-Order Explicit Solutions**
When solving polynomial expressions, you might not want explicit solutions
(which can be quite long). If the expression is univariate, ``CRootOf``
instances will be returned instead:
>>> solve(x**3 - x + 1)
[-1/((-1/2 - sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)) -
(-1/2 - sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)/3,
-(-1/2 + sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)/3 -
1/((-1/2 + sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)),
-(3*sqrt(69)/2 + 27/2)**(1/3)/3 -
1/(3*sqrt(69)/2 + 27/2)**(1/3)]
>>> solve(x**3 - x + 1, cubics=False)
[CRootOf(x**3 - x + 1, 0),
CRootOf(x**3 - x + 1, 1),
CRootOf(x**3 - x + 1, 2)]
If the expression is multivariate, no solution might be returned:
>>> solve(x**3 - x + a, x, cubics=False)
[]
Sometimes solutions will be obtained even when a flag is False because the
expression could be factored. In the following example, the equation can
be factored as the product of a linear and a quadratic factor so explicit
solutions (which did not require solving a cubic expression) are obtained:
>>> eq = x**3 + 3*x**2 + x - 1
>>> solve(eq, cubics=False)
[-1, -1 + sqrt(2), -sqrt(2) - 1]
**Solving Equations Involving Radicals**
Because of SymPy's use of the principle root, some solutions
to radical equations will be missed unless check=False:
>>> from sympy import root
>>> eq = root(x**3 - 3*x**2, 3) + 1 - x
>>> solve(eq)
[]
>>> solve(eq, check=False)
[1/3]
In the above example, there is only a single solution to the
equation. Other expressions will yield spurious roots which
must be checked manually; roots which give a negative argument
to odd-powered radicals will also need special checking:
>>> from sympy import real_root, S
>>> eq = root(x, 3) - root(x, 5) + S(1)/7
>>> solve(eq) # this gives 2 solutions but misses a 3rd
[CRootOf(7*x**5 - 7*x**3 + 1, 1)**15,
CRootOf(7*x**5 - 7*x**3 + 1, 2)**15]
>>> sol = solve(eq, check=False)
>>> [abs(eq.subs(x,i).n(2)) for i in sol]
[0.48, 0.e-110, 0.e-110, 0.052, 0.052]
The first solution is negative so ``real_root`` must be used to see that it
satisfies the expression:
>>> abs(real_root(eq.subs(x, sol[0])).n(2))
0.e-110
If the roots of the equation are not real then more care will be
necessary to find the roots, especially for higher order equations.
Consider the following expression:
>>> expr = root(x, 3) - root(x, 5)
We will construct a known value for this expression at x = 3 by selecting
the 1-th root for each radical:
>>> expr1 = root(x, 3, 1) - root(x, 5, 1)
>>> v = expr1.subs(x, -3)
The ``solve`` function is unable to find any exact roots to this equation:
>>> eq = Eq(expr, v); eq1 = Eq(expr1, v)
>>> solve(eq, check=False), solve(eq1, check=False)
([], [])
The function ``unrad``, however, can be used to get a form of the equation
for which numerical roots can be found:
>>> from sympy.solvers.solvers import unrad
>>> from sympy import nroots
>>> e, (p, cov) = unrad(eq)
>>> pvals = nroots(e)
>>> inversion = solve(cov, x)[0]
>>> xvals = [inversion.subs(p, i) for i in pvals]
Although ``eq`` or ``eq1`` could have been used to find ``xvals``, the
solution can only be verified with ``expr1``:
>>> z = expr - v
>>> [xi.n(chop=1e-9) for xi in xvals if abs(z.subs(x, xi).n()) < 1e-9]
[]
>>> z1 = expr1 - v
>>> [xi.n(chop=1e-9) for xi in xvals if abs(z1.subs(x, xi).n()) < 1e-9]
[-3.0]
Parameters
==========
f :
- a single Expr or Poly that must be zero
- an Equality
- a Relational expression
- a Boolean
- iterable of one or more of the above
symbols : (object(s) to solve for) specified as
- none given (other non-numeric objects will be used)
- single symbol
- denested list of symbols
(e.g., ``solve(f, x, y)``)
- ordered iterable of symbols
(e.g., ``solve(f, [x, y])``)
flags :
dict=True (default is False)
Return list (perhaps empty) of solution mappings.
set=True (default is False)
Return list of symbols and set of tuple(s) of solution(s).
exclude=[] (default)
Do not try to solve for any of the free symbols in exclude;
if expressions are given, the free symbols in them will
be extracted automatically.
check=True (default)
If False, do not do any testing of solutions. This can be
useful if you want to include solutions that make any
denominator zero.
numerical=True (default)
Do a fast numerical check if *f* has only one symbol.
minimal=True (default is False)
A very fast, minimal testing.
warn=True (default is False)
Show a warning if ``checksol()`` could not conclude.
simplify=True (default)
Simplify all but polynomials of order 3 or greater before
returning them and (if check is not False) use the
general simplify function on the solutions and the
expression obtained when they are substituted into the
function which should be zero.
force=True (default is False)
Make positive all symbols without assumptions regarding sign.
rational=True (default)
Recast Floats as Rational; if this option is not used, the
system containing Floats may fail to solve because of issues
with polys. If rational=None, Floats will be recast as
rationals but the answer will be recast as Floats. If the
flag is False then nothing will be done to the Floats.
manual=True (default is False)
Do not use the polys/matrix method to solve a system of
equations, solve them one at a time as you might "manually."
implicit=True (default is False)
Allows ``solve`` to return a solution for a pattern in terms of
other functions that contain that pattern; this is only
needed if the pattern is inside of some invertible function
like cos, exp, ect.
particular=True (default is False)
Instructs ``solve`` to try to find a particular solution to
a linear system with as many zeros as possible; this is very
expensive.
quick=True (default is False; ``particular`` must be True)
Selects a fast heuristic to find a solution with many zeros
whereas a value of False uses the very slow method guaranteed
to find the largest number of zeros possible.
cubics=True (default)
Return explicit solutions when cubic expressions are encountered.
When False, quartics and quintics are disabled, too.
quartics=True (default)
Return explicit solutions when quartic expressions are encountered.
When False, quintics are disabled, too.
quintics=True (default)
Return explicit solutions (if possible) when quintic expressions
are encountered.
See Also
========
rsolve: For solving recurrence relationships
dsolve: For solving differential equations
"""
from .inequalities import reduce_inequalities
# set solver types explicitly; as soon as one is False
# all the rest will be False
###########################################################################
hints = ('cubics', 'quartics', 'quintics')
default = True
for k in hints:
default = flags.setdefault(k, bool(flags.get(k, default)))
# keeping track of how f was passed since if it is a list
# a dictionary of results will be returned.
###########################################################################
def _sympified_list(w):
return list(map(sympify, w if iterable(w) else [w]))
bare_f = not iterable(f)
# check flag usage for particular/quick which should only be used
# with systems of equations
if flags.get('quick', None) is not None:
if not flags.get('particular', None):
raise ValueError('when using `quick`, `particular` should be True')
if flags.get('particular', False) and bare_f:
raise ValueError(filldedent("""
The 'particular/quick' flag is usually used with systems of
equations. Either pass your equation in a list or
consider using a solver like `diophantine` if you are
looking for a solution in integers."""))
f, symbols = (_sympified_list(w) for w in [f, symbols])
if isinstance(f, list):
f = [s for s in f if s is not S.true and s is not True]
implicit = flags.get('implicit', False)
# preprocess symbol(s)
###########################################################################
ordered_symbols = None # were the symbols in a well defined order?
if not symbols:
# get symbols from equations
symbols = set().union(*[fi.free_symbols for fi in f])
if len(symbols) < len(f):
for fi in f:
pot = preorder_traversal(fi)
for p in pot:
if isinstance(p, AppliedUndef):
flags['dict'] = True # better show symbols
symbols.add(p)
pot.skip() # don't go any deeper
ordered_symbols = False
symbols = list(ordered(symbols)) # to make it canonical
else:
if len(symbols) == 1 and iterable(symbols[0]):
symbols = symbols[0]
ordered_symbols = symbols and is_sequence(symbols,
include=GeneratorType)
_symbols = list(uniq(symbols))
if len(_symbols) != len(symbols):
ordered_symbols = False
symbols = list(ordered(symbols))
else:
symbols = _symbols
# remove symbols the user is not interested in
exclude = flags.pop('exclude', set())
if exclude:
if isinstance(exclude, Expr):
exclude = [exclude]
exclude = set().union(*[e.free_symbols for e in sympify(exclude)])
symbols = [s for s in symbols if s not in exclude]
# preprocess equation(s)
###########################################################################
for i, fi in enumerate(f):
if isinstance(fi, (Eq, Ne)):
if 'ImmutableDenseMatrix' in [type(a).__name__ for a in fi.args]:
fi = fi.lhs - fi.rhs
else:
L, R = fi.args
if isinstance(R, BooleanAtom):
L, R = R, L
if isinstance(L, BooleanAtom):
if isinstance(fi, Ne):
L = ~L
if R.is_Relational:
fi = ~R if L is S.false else R
elif R.is_Symbol:
return L
elif R.is_Boolean and (~R).is_Symbol:
return ~L
else:
raise NotImplementedError(filldedent('''
Unanticipated argument of Eq when other arg
is True or False.
'''))
else:
fi = fi.rewrite(Add, evaluate=False)
f[i] = fi
if fi.is_Relational:
return reduce_inequalities(f, symbols=symbols)
if isinstance(fi, Poly):
f[i] = fi.as_expr()
# rewrite hyperbolics in terms of exp if they have symbols of
# interest
f[i] = f[i].replace(lambda w: isinstance(w, HyperbolicFunction) and \
w.has_free(*symbols), lambda w: w.rewrite(exp))
# if we have a Matrix, we need to iterate over its elements again
if f[i].is_Matrix:
bare_f = False
f.extend(list(f[i]))
f[i] = S.Zero
# if we can split it into real and imaginary parts then do so
freei = f[i].free_symbols
if freei and all(s.is_extended_real or s.is_imaginary for s in freei):
fr, fi = f[i].as_real_imag()
# accept as long as new re, im, arg or atan2 are not introduced
had = f[i].atoms(re, im, arg, atan2)
if fr and fi and fr != fi and not any(
i.atoms(re, im, arg, atan2) - had for i in (fr, fi)):
if bare_f:
bare_f = False
f[i: i + 1] = [fr, fi]
# real/imag handling -----------------------------
if any(isinstance(fi, (bool, BooleanAtom)) for fi in f):
if flags.get('set', False):
return [], set()
return []
for i, fi in enumerate(f):
# Abs
while True:
was = fi
fi = fi.replace(Abs, lambda arg:
separatevars(Abs(arg)).rewrite(Piecewise) if arg.has(*symbols)
else Abs(arg))
if was == fi:
break
for e in fi.find(Abs):
if e.has(*symbols):
raise NotImplementedError('solving %s when the argument '
'is not real or imaginary.' % e)
# arg
fi = fi.replace(arg, lambda a: arg(a).rewrite(atan2).rewrite(atan))
# save changes
f[i] = fi
# see if re(s) or im(s) appear
freim = [fi for fi in f if fi.has(re, im)]
if freim:
irf = []
for s in symbols:
if s.is_real or s.is_imaginary:
continue # neither re(x) nor im(x) will appear
# if re(s) or im(s) appear, the auxiliary equation must be present
if any(fi.has(re(s), im(s)) for fi in freim):
irf.append((s, re(s) + S.ImaginaryUnit*im(s)))
if irf:
for s, rhs in irf:
f = [fi.xreplace({s: rhs}) for fi in f] + [s - rhs]
symbols.extend([re(s), im(s)])
if bare_f:
bare_f = False
flags['dict'] = True
# end of real/imag handling -----------------------------
# we can solve for non-symbol entities by replacing them with Dummy symbols
f, symbols, swap_sym = recast_to_symbols(f, symbols)
# this is needed in the next two events
symset = set(symbols)
# get rid of equations that have no symbols of interest; we don't
# try to solve them because the user didn't ask and they might be
# hard to solve; this means that solutions may be given in terms
# of the eliminated equations e.g. solve((x-y, y-3), x) -> {x: y}
newf = []
for fi in f:
# let the solver handle equations that..
# - have no symbols but are expressions
# - have symbols of interest
# - have no symbols of interest but are constant
# but when an expression is not constant and has no symbols of
# interest, it can't change what we obtain for a solution from
# the remaining equations so we don't include it; and if it's
# zero it can be removed and if it's not zero, there is no
# solution for the equation set as a whole
#
# The reason for doing this filtering is to allow an answer
# to be obtained to queries like solve((x - y, y), x); without
# this mod the return value is []
ok = False
if fi.free_symbols & symset:
ok = True
else:
if fi.is_number:
if fi.is_Number:
if fi.is_zero:
continue
return []
ok = True
else:
if fi.is_constant():
ok = True
if ok:
newf.append(fi)
if not newf:
return []
f = newf
del newf
# mask off any Object that we aren't going to invert: Derivative,
# Integral, etc... so that solving for anything that they contain will
# give an implicit solution
seen = set()
non_inverts = set()
for fi in f:
pot = preorder_traversal(fi)
for p in pot:
if not isinstance(p, Expr) or isinstance(p, Piecewise):
pass
elif (isinstance(p, bool) or
not p.args or
p in symset or
p.is_Add or p.is_Mul or
p.is_Pow and not implicit or
p.is_Function and not implicit) and p.func not in (re, im):
continue
elif p not in seen:
seen.add(p)
if p.free_symbols & symset:
non_inverts.add(p)
else:
continue
pot.skip()
del seen
non_inverts = dict(list(zip(non_inverts, [Dummy() for _ in non_inverts])))
f = [fi.subs(non_inverts) for fi in f]
# Both xreplace and subs are needed below: xreplace to force substitution
# inside Derivative, subs to handle non-straightforward substitutions
non_inverts = [(v, k.xreplace(swap_sym).subs(swap_sym)) for k, v in non_inverts.items()]
# rationalize Floats
floats = False
if flags.get('rational', True) is not False:
for i, fi in enumerate(f):
if fi.has(Float):
floats = True
f[i] = nsimplify(fi, rational=True)
# capture any denominators before rewriting since
# they may disappear after the rewrite, e.g. issue 14779
flags['_denominators'] = _simple_dens(f[0], symbols)
# Any embedded piecewise functions need to be brought out to the
# top level so that the appropriate strategy gets selected.
# However, this is necessary only if one of the piecewise
# functions depends on one of the symbols we are solving for.
def _has_piecewise(e):
if e.is_Piecewise:
return e.has(*symbols)
return any(_has_piecewise(a) for a in e.args)
for i, fi in enumerate(f):
if _has_piecewise(fi):
f[i] = piecewise_fold(fi)
#
# try to get a solution
###########################################################################
if bare_f:
solution = _solve(f[0], *symbols, **flags)
else:
solution = _solve_system(f, symbols, **flags)
#
# postprocessing
###########################################################################
# Restore masked-off objects
if non_inverts:
def _do_dict(solution):
return {k: v.subs(non_inverts) for k, v in
solution.items()}
for i in range(1):
if isinstance(solution, dict):
solution = _do_dict(solution)
break
elif solution and isinstance(solution, list):
if isinstance(solution[0], dict):
solution = [_do_dict(s) for s in solution]
break
elif isinstance(solution[0], tuple):
solution = [tuple([v.subs(non_inverts) for v in s]) for s
in solution]
break
else:
solution = [v.subs(non_inverts) for v in solution]
break
elif not solution:
break
else:
raise NotImplementedError(filldedent('''
no handling of %s was implemented''' % solution))
# Restore original "symbols" if a dictionary is returned.
# This is not necessary for
# - the single univariate equation case
# since the symbol will have been removed from the solution;
# - the nonlinear poly_system since that only supports zero-dimensional
# systems and those results come back as a list
#
# ** unless there were Derivatives with the symbols, but those were handled
# above.
if swap_sym:
symbols = [swap_sym.get(k, k) for k in symbols]
if isinstance(solution, dict):
solution = {swap_sym.get(k, k): v.subs(swap_sym)
for k, v in solution.items()}
elif solution and isinstance(solution, list) and isinstance(solution[0], dict):
for i, sol in enumerate(solution):
solution[i] = {swap_sym.get(k, k): v.subs(swap_sym)
for k, v in sol.items()}
# Get assumptions about symbols, to filter solutions.
# Note that if assumptions about a solution can't be verified, it is still
# returned.
check = flags.get('check', True)
# restore floats
if floats and solution and flags.get('rational', None) is None:
solution = nfloat(solution, exponent=False)
if check and solution: # assumption checking
warn = flags.get('warn', False)
got_None = [] # solutions for which one or more symbols gave None
no_False = [] # solutions for which no symbols gave False
if isinstance(solution, tuple):
# this has already been checked and is in as_set form
return solution
elif isinstance(solution, list):
if isinstance(solution[0], tuple):
for sol in solution:
for symb, val in zip(symbols, sol):
test = check_assumptions(val, **symb.assumptions0)
if test is False:
break
if test is None:
got_None.append(sol)
else:
no_False.append(sol)
elif isinstance(solution[0], dict):
for sol in solution:
a_None = False
for symb, val in sol.items():
test = check_assumptions(val, **symb.assumptions0)
if test:
continue
if test is False:
break
a_None = True
else:
no_False.append(sol)
if a_None:
got_None.append(sol)
else: # list of expressions
for sol in solution:
test = check_assumptions(sol, **symbols[0].assumptions0)
if test is False:
continue
no_False.append(sol)
if test is None:
got_None.append(sol)
elif isinstance(solution, dict):
a_None = False
for symb, val in solution.items():
test = check_assumptions(val, **symb.assumptions0)
if test:
continue
if test is False:
no_False = None
break
a_None = True
else:
no_False = solution
if a_None:
got_None.append(solution)
elif isinstance(solution, (Relational, And, Or)):
if len(symbols) != 1:
raise ValueError("Length should be 1")
if warn and symbols[0].assumptions0:
warnings.warn(filldedent("""
\tWarning: assumptions about variable '%s' are
not handled currently.""" % symbols[0]))
# TODO: check also variable assumptions for inequalities
else:
raise TypeError('Unrecognized solution') # improve the checker
solution = no_False
if warn and got_None:
warnings.warn(filldedent("""
\tWarning: assumptions concerning following solution(s)
cannot be checked:""" + '\n\t' +
', '.join(str(s) for s in got_None)))
#
# done
###########################################################################
as_dict = flags.get('dict', False)
as_set = flags.get('set', False)
if solution is not None and type(solution) not in (list, dict):
return solution
if not solution:
return []
if (
# undo the dictionary solutions returned when the system was
# only partially solved with poly-system
not as_dict and
ordered_symbols and
type(solution) is list and
type(solution[0]) is dict
):
solution = [tuple([r.get(s, s) for s in symbols]) for r in solution]
# make orderings canonical for:
# - dict
# - list of
# * values
# * tuples
# * dicts
if type(solution) is dict:
solution = {k: solution[k] for k in ordered(solution.keys())}
elif not as_set: # for set, no point in ordering
solution.sort(key=default_sort_key)
if solution and type(solution[0]) is tuple:
# XXX is it better to handle at source of introduction?
# if we don't do it then (or now) then
# solve([x**2 + y -2, y**2 - 4], x, y) would
# otherwise have (0, 2) appearing twice
solution = list(uniq(solution))
if not (as_set or as_dict):
return solution
# convert all input to list of dicts
if type(solution) is list and type(solution[0]) is dict:
LOD = solution
else:
LOD = None
if as_dict or not LOD:
if isinstance(solution, dict):
LOD = [solution] # dict was made canonical above
elif type(solution[0]) is tuple:
LOD = [dict(zip(symbols, s)) for s in solution]
elif type(solution[0]) is dict:
if not as_set:
# put the keys in order within each dict
LOD = [{k: s[k] for k in ordered(s)} for s in solution]
else:
LOD = solution # we will order after unifying keys
else:
assert len(symbols) == 1, 'logical error'
LOD = [{symbols[0]: s} for s in solution]
else:
LOD = solution
if as_dict:
return LOD
# set output: (symbols, {t1, t2, ...}) from list of dictionaries;
# include all symbols for those that like a verbose solution
# and to resolve any differences in dictionary keys.
#
# The set results can easily be used to make a verbose dict as
# k, v = solve(eqs, syms, set=True)
# sol = [dict(zip(k,i)) for i in v]
#
if ordered_symbols:
k = symbols # keep preferred order
else:
# just unify the symbols for which solutions were found
k = list(ordered(set(flatten(tuple(i.keys()) for i in LOD))))
return k, {tuple([s.get(ki, ki) for ki in k]) for s in LOD}
|
3,855 |
def descendants(G, source):
"""Returns all nodes reachable from `source` in `G`.
Parameters
----------
G : NetworkX DiGraph
A directed graph
source : node in `G`
Returns
-------
set()
The descendants of `source` in `G`
"""
if not G.has_node(source):
raise nx.NetworkXError(f"The node {source} is not in the graph.")
des = {n for n, d in nx.shortest_path_length(G, source=source).items()}
return des - {source}
|
def descendants(G, source):
"""Returns all nodes reachable from `source` in `G`.
Parameters
----------
G : NetworkX DiGraph
A directed graph
source : node in `G`
Returns
-------
set()
The descendants of `source` in `G`
"""
if not G.has_node(source):
raise nx.NetworkXError(f"The node {source} is not in the graph.")
des = {n for n, d in nx.shortest_path_length(G, source=source).items()}
return des - {source}
|
22,597 |
def parse_txt(mnemo, attrib, txt, loc_db):
"""Parse an assembly listing. Returns an AsmCfg instance
@mnemo: architecture used
@attrib: architecture attribute
@txt: assembly listing
@loc_db: the LocationDB instance used to handle labels of the listing
"""
C_NEXT = asmblock.AsmConstraint.c_next
C_TO = asmblock.AsmConstraint.c_to
lines = []
# parse each line
for line in txt.split('\n'):
# empty
if EMPTY_RE.match(line):
continue
# comment
if COMMENT_RE.match(line):
continue
# labels to forget
if FORGET_LABEL_RE.match(line):
continue
# label beginning with .L
match_re = LABEL_RE.match(line)
if match_re:
label_name = match_re.group(1)
label = loc_db.get_or_create_name_location(label_name)
lines.append(label)
continue
# directive
if DIRECTIVE_START_RE.match(line):
match_re = DIRECTIVE_RE.match(line)
directive = match_re.group(1)
if directive in ['text', 'data', 'bss']:
continue
if directive in ['string', 'ascii']:
# XXX HACK
line = line.replace(r'\n', '\n').replace(r'\r', '\r')
raw = line[line.find(r'"') + 1:line.rfind(r'"')]
raw = codecs.escape_decode(raw)[0]
if directive == 'string':
raw += b"\x00"
lines.append(asmblock.AsmRaw(raw))
continue
if directive == 'ustring':
# XXX HACK
line = line.replace(r'\n', '\n').replace(r'\r', '\r')
raw = line[line.find(r'"') + 1:line.rfind(r'"')] + "\x00"
raw = codecs.escape_decode(raw)[0]
out = b''
for i in range(len(raw)):
out += raw[i:i+1] + b'\x00'
lines.append(asmblock.AsmRaw(out))
continue
if directive in declarator:
data_raw = line[match_re.end():].split(' ', 1)[1]
data_raw = data_raw.split(',')
size = declarator[directive]
expr_list = []
# parser
for element in data_raw:
element = element.strip()
element_parsed = base_expr.parseString(element)[0]
element_expr = asm_ast_to_expr_with_size(element_parsed, loc_db, size)
expr_list.append(element_expr)
raw_data = asmblock.AsmRaw(expr_list)
raw_data.element_size = size
lines.append(raw_data)
continue
if directive == 'comm':
# TODO
continue
if directive == 'split': # custom command
lines.append(DirectiveSplit())
continue
if directive == 'dontsplit': # custom command
lines.append(DirectiveDontSplit())
continue
if directive == "align":
align_value = int(line[match_re.end():], 0)
lines.append(DirectiveAlign(align_value))
continue
if directive in ['file', 'intel_syntax', 'globl', 'local',
'type', 'size', 'align', 'ident', 'section']:
continue
if directive[0:4] == 'cfi_':
continue
raise ValueError("unknown directive %s" % directive)
# label
match_re = LABEL_RE.match(line)
if match_re:
label_name = force_str(match_re.group(1))
label = loc_db.get_or_create_name_location(label_name)
lines.append(label)
continue
# code
if ';' in line:
line = line[:line.find(';')]
line = line.strip(' ').strip('\t')
instr = mnemo.fromstring(line, loc_db, attrib)
lines.append(instr)
asmblock.log_asmblock.info("___pre asm oki___")
# make asmcfg
cur_block = None
state = STATE_NO_BLOC
i = 0
asmcfg = asmblock.AsmCFG(loc_db)
block_to_nlink = None
delayslot = 0
while i < len(lines):
if delayslot:
delayslot -= 1
if delayslot == 0:
state = STATE_NO_BLOC
line = lines[i]
# no current block
if state == STATE_NO_BLOC:
if isinstance(line, DirectiveDontSplit):
block_to_nlink = cur_block
i += 1
continue
elif isinstance(line, DirectiveSplit):
block_to_nlink = None
i += 1
continue
elif not isinstance(line, LocKey):
# First line must be a label. If it's not the case, generate
# it.
loc = loc_db.add_location()
cur_block = asmblock.AsmBlock(loc_db, loc, alignment=mnemo.alignment)
else:
cur_block = asmblock.AsmBlock(loc_db, line, alignment=mnemo.alignment)
i += 1
# Generate the current block
asmcfg.add_block(cur_block)
state = STATE_IN_BLOC
if block_to_nlink:
block_to_nlink.addto(
asmblock.AsmConstraint(
cur_block.loc_key,
C_NEXT
)
)
block_to_nlink = None
continue
# in block
elif state == STATE_IN_BLOC:
if isinstance(line, DirectiveSplit):
state = STATE_NO_BLOC
block_to_nlink = None
elif isinstance(line, DirectiveDontSplit):
state = STATE_NO_BLOC
block_to_nlink = cur_block
elif isinstance(line, DirectiveAlign):
cur_block.alignment = line.alignment
elif isinstance(line, asmblock.AsmRaw):
cur_block.addline(line)
block_to_nlink = cur_block
elif isinstance(line, LocKey):
if block_to_nlink:
cur_block.addto(
asmblock.AsmConstraint(line, C_NEXT)
)
block_to_nlink = None
state = STATE_NO_BLOC
continue
# instruction
elif isinstance(line, instruction):
cur_block.addline(line)
block_to_nlink = cur_block
if not line.breakflow():
i += 1
continue
if delayslot:
raise RuntimeError("Cannot have breakflow in delayslot")
if line.dstflow():
for dst in line.getdstflow(loc_db):
if not isinstance(dst, ExprId):
continue
if dst in mnemo.regs.all_regs_ids:
continue
cur_block.addto(asmblock.AsmConstraint(dst.name, C_TO))
if not line.splitflow():
block_to_nlink = None
delayslot = line.delayslot + 1
else:
raise RuntimeError("unknown class %s" % line.__class__)
i += 1
for block in asmcfg.blocks:
# Fix multiple constraints
block.fix_constraints()
# Log block
asmblock.log_asmblock.info(block)
return asmcfg
|
def parse_txt(mnemo, attrib, txt, loc_db):
"""Parse an assembly listing. Returns an AsmCfg instance
@mnemo: architecture used
@attrib: architecture attribute
@txt: assembly listing
@loc_db: the LocationDB instance used to handle labels of the listing
"""
C_NEXT = asmblock.AsmConstraint.c_next
C_TO = asmblock.AsmConstraint.c_to
lines = []
# parse each line
for line in txt.split('\n'):
# empty
if EMPTY_RE.match(line):
continue
# comment
if COMMENT_RE.match(line):
continue
# labels to forget
if FORGET_LABEL_RE.match(line):
continue
# label beginning with .L
match_re = LABEL_RE.match(line)
if match_re:
label_name = match_re.group(1)
label = loc_db.get_or_create_name_location(label_name)
lines.append(label)
continue
# directive
if DIRECTIVE_START_RE.match(line):
match_re = DIRECTIVE_RE.match(line)
directive = match_re.group(1)
if directive in ['text', 'data', 'bss']:
continue
if directive in ['string', 'ascii']:
# XXX HACK
line = line.replace(r'\n', '\n').replace(r'\r', '\r')
raw = line[line.find(r'"') + 1:line.rfind(r'"')]
raw = codecs.escape_decode(raw)[0]
if directive == 'string':
raw += b"\x00"
lines.append(asmblock.AsmRaw(raw))
continue
if directive == 'ustring':
# XXX HACK
line = line.replace(r'\n', '\n').replace(r'\r', '\r')
raw = line[line.find(r'"') + 1:line.rfind(r'"')] + "\x00"
raw = codecs.escape_decode(raw)[0]
out = b''
for i in range(len(raw)):
out += raw[i:i+1] + b'\x00'
lines.append(asmblock.AsmRaw(out))
continue
if directive in declarator:
data_raw = line[match_re.end():].split(' ', 1)[1]
data_raw = data_raw.split(',')
size = declarator[directive]
expr_list = []
# parser
for element in data_raw:
element = element.strip()
element_parsed = base_expr.parseString(element)[0]
element_expr = asm_ast_to_expr_with_size(element_parsed, loc_db, size)
expr_list.append(element_expr)
raw_data = asmblock.AsmRaw(expr_list)
raw_data.element_size = size
lines.append(raw_data)
continue
if directive == 'comm':
# TODO
continue
if directive == 'split': # custom command
lines.append(DirectiveSplit())
continue
if directive == 'dontsplit': # custom command
lines.append(DirectiveDontSplit())
continue
if directive == "align":
align_value = int(line[match_re.end():], 0)
lines.append(DirectiveAlign(align_value))
continue
if directive in ['file', 'intel_syntax', 'globl', 'local',
'type', 'size', 'align', 'ident', 'section']:
continue
if directive[0:4] == 'cfi_':
continue
raise ValueError("unknown directive %s" % directive)
# label
match_re = LABEL_RE.match(line)
if match_re:
label_name = match_re.group(1)
label = loc_db.get_or_create_name_location(label_name)
lines.append(label)
continue
# code
if ';' in line:
line = line[:line.find(';')]
line = line.strip(' ').strip('\t')
instr = mnemo.fromstring(line, loc_db, attrib)
lines.append(instr)
asmblock.log_asmblock.info("___pre asm oki___")
# make asmcfg
cur_block = None
state = STATE_NO_BLOC
i = 0
asmcfg = asmblock.AsmCFG(loc_db)
block_to_nlink = None
delayslot = 0
while i < len(lines):
if delayslot:
delayslot -= 1
if delayslot == 0:
state = STATE_NO_BLOC
line = lines[i]
# no current block
if state == STATE_NO_BLOC:
if isinstance(line, DirectiveDontSplit):
block_to_nlink = cur_block
i += 1
continue
elif isinstance(line, DirectiveSplit):
block_to_nlink = None
i += 1
continue
elif not isinstance(line, LocKey):
# First line must be a label. If it's not the case, generate
# it.
loc = loc_db.add_location()
cur_block = asmblock.AsmBlock(loc_db, loc, alignment=mnemo.alignment)
else:
cur_block = asmblock.AsmBlock(loc_db, line, alignment=mnemo.alignment)
i += 1
# Generate the current block
asmcfg.add_block(cur_block)
state = STATE_IN_BLOC
if block_to_nlink:
block_to_nlink.addto(
asmblock.AsmConstraint(
cur_block.loc_key,
C_NEXT
)
)
block_to_nlink = None
continue
# in block
elif state == STATE_IN_BLOC:
if isinstance(line, DirectiveSplit):
state = STATE_NO_BLOC
block_to_nlink = None
elif isinstance(line, DirectiveDontSplit):
state = STATE_NO_BLOC
block_to_nlink = cur_block
elif isinstance(line, DirectiveAlign):
cur_block.alignment = line.alignment
elif isinstance(line, asmblock.AsmRaw):
cur_block.addline(line)
block_to_nlink = cur_block
elif isinstance(line, LocKey):
if block_to_nlink:
cur_block.addto(
asmblock.AsmConstraint(line, C_NEXT)
)
block_to_nlink = None
state = STATE_NO_BLOC
continue
# instruction
elif isinstance(line, instruction):
cur_block.addline(line)
block_to_nlink = cur_block
if not line.breakflow():
i += 1
continue
if delayslot:
raise RuntimeError("Cannot have breakflow in delayslot")
if line.dstflow():
for dst in line.getdstflow(loc_db):
if not isinstance(dst, ExprId):
continue
if dst in mnemo.regs.all_regs_ids:
continue
cur_block.addto(asmblock.AsmConstraint(dst.name, C_TO))
if not line.splitflow():
block_to_nlink = None
delayslot = line.delayslot + 1
else:
raise RuntimeError("unknown class %s" % line.__class__)
i += 1
for block in asmcfg.blocks:
# Fix multiple constraints
block.fix_constraints()
# Log block
asmblock.log_asmblock.info(block)
return asmcfg
|
31,910 |
def url_reputation_command():
"""
Execute SlashNext's url/reputation API against the requested url reputation command with the given parameters
@:return: None
"""
# 1. Get input url from Demisto
url = demisto.args().get('url')
# 2. Get the url reputation from SlashNext API
response = url_reputation(url=url)
if response.get('errorNo') != 0:
return
# 3. Parse and format the response
url_data = response.get('urlData')
snx_ioc_cont, dbot_score_cont, url_cont = get_snx_url_ioc_context(url_data)
ec = {
'SlashNext.URL(val.Value === obj.Value)': snx_ioc_cont[0],
'DBotScore': dbot_score_cont,
'URL': url_cont
}
title = 'SlashNext Phishing Incident Response - URL Reputation\n'\
'##### url = {}'.format(url_data.get('url'))
if response.get('normalizeData').get('normalizeStatus') == 1:
title += ' *\n*' + response.get('normalizeData').get('normalizeMessage')
md = tableToMarkdown(
title,
snx_ioc_cont,
['Value',
'Type',
'Verdict',
'ThreatStatus',
'ThreatName',
'ThreatType',
'FirstSeen',
'LastSeen']
)
return_outputs(md, ec, snx_ioc_cont)
|
def url_reputation_command():
"""
Execute SlashNext's url/reputation API against the requested url reputation command with the given parameters
@:return: None
"""
# 1. Get input url from Demisto
url = demisto.args().get('url')
# 2. Get the url reputation from SlashNext API
response = url_reputation(url=url)
if response.get('errorNo') != 0:
return
# 3. Parse and format the response
url_data = response.get('urlData')
snx_ioc_cont, dbot_score_cont, url_cont = get_snx_url_ioc_context(url_data)
ec = {
'SlashNext.URL(val.Value === obj.Value)': snx_ioc_cont[0],
'DBotScore': dbot_score_cont,
'URL': url_cont
}
title = 'SlashNext Phishing Incident Response - URL Lookup\n'\
'##### url = {}'.format(url_data.get('url'))
if response.get('normalizeData').get('normalizeStatus') == 1:
title += ' *\n*' + response.get('normalizeData').get('normalizeMessage')
md = tableToMarkdown(
title,
snx_ioc_cont,
['Value',
'Type',
'Verdict',
'ThreatStatus',
'ThreatName',
'ThreatType',
'FirstSeen',
'LastSeen']
)
return_outputs(md, ec, snx_ioc_cont)
|
22,701 |
def main():
args = sys.argv[1:]
# Default config is pebble
directory_url = os.environ.get('SERVER', 'https://localhost:14000/dir')
http_01_port = int(os.environ.get('HTTP_01_PORT', '5002'))
tls_alpn_01_port = int(os.environ.get('TLS_ALPN_01_PORT', '5001'))
# Execution of certbot in a self-contained workspace
workspace = os.environ.get('WORKSPACE', os.path.join(os.getcwd(), '.certbot_test_workspace'))
if not os.path.exists(workspace):
print('--> Create a workspace for certbot_test: {0}'.format(workspace))
os.mkdir(workspace)
else:
print('--> Use an existing workspace for certbot_test: {0}'.format(workspace))
config_dir = os.path.join(workspace, 'conf')
certbot_test(args, directory_url, http_01_port, tls_alpn_01_port,
config_dir, workspace, True, False)
|
def main():
args = sys.argv[1:]
# Default config is pebble
directory_url = os.environ.get('SERVER', 'https://localhost:14000/dir')
http_01_port = int(os.environ.get('HTTP_01_PORT', '5002'))
tls_alpn_01_port = int(os.environ.get('TLS_ALPN_01_PORT', '5001'))
# Execution of certbot in a self-contained workspace
workspace = os.environ.get('WORKSPACE', os.path.join(os.getcwd(), '.certbot_test_workspace'))
if not os.path.exists(workspace):
print('--> Create a workspace for certbot_test: {0}'.format(workspace))
os.mkdir(workspace)
else:
print('--> Using an existing workspace for certbot_test: {0}'.format(workspace))
config_dir = os.path.join(workspace, 'conf')
certbot_test(args, directory_url, http_01_port, tls_alpn_01_port,
config_dir, workspace, True, False)
|
48,962 |
def image_url(file_reference, alt_text, width, emoji=False, question=None, external=False, status=None):
if question and file_reference in question.interview.images:
if status and question.interview.images[file_reference].attribution is not None:
status.attributions.add(question.interview.images[file_reference].attribution)
file_reference = question.interview.images[file_reference].get_reference()
file_info = server.file_finder(file_reference, question=question)
if 'mimetype' in file_info and file_info['mimetype']:
if re.search(r'^audio', file_info['mimetype']):
urls = get_audio_urls([{'text': "[FILE " + file_reference + "]", 'package': None, 'type': 'audio'}], question=question)
if len(urls) > 0:
return audio_control(urls)
return ''
if re.search(r'^video', file_info['mimetype']):
urls = get_video_urls([{'text': "[FILE " + file_reference + "]", 'package': None, 'type': 'video'}], question=question)
if len(urls) > 0:
return video_control(urls)
return ''
if 'extension' in file_info and file_info['extension'] is not None:
if re.match(r'.*%$', width):
width_string = "width:" + width
else:
width_string = "max-width:" + width
if emoji:
width_string += ';vertical-align: middle'
alt_text = 'alt="" '
the_url = server.url_finder(file_reference, _question=question, display_filename=file_info['filename'], _external=external)
if the_url is None:
return '[ERROR: File reference ' + str(file_reference) + ' cannot be displayed]'
if width_string == 'width:100%':
extra_class = ' dawideimage'
else:
extra_class = ''
if file_info.get('extension', '') in ('png', 'jpg', 'gif', 'svg', 'jpe', 'jpeg'):
try:
if file_info.get('extension', '') == 'svg':
attributes = ET.parse(file_info['fullpath']).getroot().attrib
layout_width = attributes['width']
layout_height = attributes['height']
else:
im = PIL.Image.open(file_info['fullpath'])
layout_width, layout_height = im.size
return '<img ' + alt_text + 'class="daicon daimageref' + extra_class + '" width=' + str(layout_width) + ' height=' + str(layout_height) + ' style="' + width_string + '; height: auto;" src="' + the_url + '"/>'
except:
return '<img ' + alt_text + 'class="daicon daimageref' + extra_class + '" style="' + width_string + '; height: auto;" src="' + the_url + '"/>'
if file_info['extension'] in ('pdf', 'docx', 'rtf', 'doc', 'odt'):
if file_info['extension'] in ('docx', 'rtf', 'doc', 'odt') and not os.path.isfile(file_info['path'] + '.pdf'):
server.fg_make_pdf_for_word_path(file_info['path'], file_info['extension'])
server.fg_make_png_for_pdf_path(file_info['path'] + ".pdf", 'screen', page=1)
if re.match(r'[0-9]+', str(file_reference)):
sf = server.SavedFile(int(file_reference), fix=True)
sf.finalize()
if 'pages' not in file_info:
try:
reader = safe_pypdf_reader(file_info['path'] + '.pdf')
file_info['pages'] = reader.getNumPages()
except:
file_info['pages'] = 1
the_image_url = server.url_finder(file_reference, size="screen", page=1, _question=question, _external=external)
if the_image_url is None:
return '[ERROR: File reference ' + str(file_reference) + ' cannot be displayed]'
if 'filename' in file_info:
title = ' title="' + file_info['filename'] + '"'
else:
title = ''
if alt_text == '':
the_alt_text = 'alt=' + json.dumps(word("Thumbnail image of document")) + ' '
else:
the_alt_text = alt_text
try:
safe_pdf_reader = safe_pypdf_reader(file_info['path'] + '.pdf')
layout_width = str(safe_pdf_reader.getPage(0).mediaBox.getWidth())
layout_height = str(safe_pdf_reader.getPage(0).mediaBox.getHeight())
output = '<a target="_blank"' + title + ' class="daimageref" href="' + the_url + '"><img ' + the_alt_text + 'class="daicon dapdfscreen' + extra_class + '" width=' + layout_width + ' height=' + layout_height + ' style="' + width_string + '; height: auto;" src="' + the_image_url + '"/></a>'
except:
output = '<a target="_blank"' + title + ' class="daimageref" href="' + the_url + '"><img ' + the_alt_text + 'class="daicon dapdfscreen' + extra_class + '" style="' + width_string + '; height: auto;" src="' + the_image_url + '"/></a>'
if 'pages' in file_info and file_info['pages'] > 1:
output += " (" + str(file_info['pages']) + " " + word('pages') + ")"
return output
return '<a target="_blank" class="daimageref" href="' + the_url + '">' + file_info['filename'] + '</a>'
return '[Invalid image reference; reference=' + str(file_reference) + ', width=' + str(width) + ', filename=' + file_info.get('filename', 'unknown') + ']'
|
def image_url(file_reference, alt_text, width, emoji=False, question=None, external=False, status=None):
if question and file_reference in question.interview.images:
if status and question.interview.images[file_reference].attribution is not None:
status.attributions.add(question.interview.images[file_reference].attribution)
file_reference = question.interview.images[file_reference].get_reference()
file_info = server.file_finder(file_reference, question=question)
if 'mimetype' in file_info and file_info['mimetype']:
if re.search(r'^audio', file_info['mimetype']):
urls = get_audio_urls([{'text': "[FILE " + file_reference + "]", 'package': None, 'type': 'audio'}], question=question)
if len(urls) > 0:
return audio_control(urls)
return ''
if re.search(r'^video', file_info['mimetype']):
urls = get_video_urls([{'text': "[FILE " + file_reference + "]", 'package': None, 'type': 'video'}], question=question)
if len(urls) > 0:
return video_control(urls)
return ''
if 'extension' in file_info and file_info['extension'] is not None:
if re.match(r'.*%$', width):
width_string = "width:" + width
else:
width_string = "max-width:" + width
if emoji:
width_string += ';vertical-align: middle'
alt_text = 'alt="" '
the_url = server.url_finder(file_reference, _question=question, display_filename=file_info['filename'], _external=external)
if the_url is None:
return '[ERROR: File reference ' + str(file_reference) + ' cannot be displayed]'
if width_string == 'width:100%':
extra_class = ' dawideimage'
else:
extra_class = ''
if file_info.get('extension', '') in ('png', 'jpg', 'gif', 'svg', 'jpe', 'jpeg'):
try:
if file_info.get('extension', '') == 'svg':
attributes = ET.parse(file_info['fullpath']).getroot().attrib
layout_width = attributes['width']
layout_height = attributes['height']
else:
with PIL.Image.open(file_info['fullpath']) as im:
layout_width, layout_height = im.size
return '<img ' + alt_text + 'class="daicon daimageref' + extra_class + '" width=' + str(layout_width) + ' height=' + str(layout_height) + ' style="' + width_string + '; height: auto;" src="' + the_url + '"/>'
except:
return '<img ' + alt_text + 'class="daicon daimageref' + extra_class + '" style="' + width_string + '; height: auto;" src="' + the_url + '"/>'
if file_info['extension'] in ('pdf', 'docx', 'rtf', 'doc', 'odt'):
if file_info['extension'] in ('docx', 'rtf', 'doc', 'odt') and not os.path.isfile(file_info['path'] + '.pdf'):
server.fg_make_pdf_for_word_path(file_info['path'], file_info['extension'])
server.fg_make_png_for_pdf_path(file_info['path'] + ".pdf", 'screen', page=1)
if re.match(r'[0-9]+', str(file_reference)):
sf = server.SavedFile(int(file_reference), fix=True)
sf.finalize()
if 'pages' not in file_info:
try:
reader = safe_pypdf_reader(file_info['path'] + '.pdf')
file_info['pages'] = reader.getNumPages()
except:
file_info['pages'] = 1
the_image_url = server.url_finder(file_reference, size="screen", page=1, _question=question, _external=external)
if the_image_url is None:
return '[ERROR: File reference ' + str(file_reference) + ' cannot be displayed]'
if 'filename' in file_info:
title = ' title="' + file_info['filename'] + '"'
else:
title = ''
if alt_text == '':
the_alt_text = 'alt=' + json.dumps(word("Thumbnail image of document")) + ' '
else:
the_alt_text = alt_text
try:
safe_pdf_reader = safe_pypdf_reader(file_info['path'] + '.pdf')
layout_width = str(safe_pdf_reader.getPage(0).mediaBox.getWidth())
layout_height = str(safe_pdf_reader.getPage(0).mediaBox.getHeight())
output = '<a target="_blank"' + title + ' class="daimageref" href="' + the_url + '"><img ' + the_alt_text + 'class="daicon dapdfscreen' + extra_class + '" width=' + layout_width + ' height=' + layout_height + ' style="' + width_string + '; height: auto;" src="' + the_image_url + '"/></a>'
except:
output = '<a target="_blank"' + title + ' class="daimageref" href="' + the_url + '"><img ' + the_alt_text + 'class="daicon dapdfscreen' + extra_class + '" style="' + width_string + '; height: auto;" src="' + the_image_url + '"/></a>'
if 'pages' in file_info and file_info['pages'] > 1:
output += " (" + str(file_info['pages']) + " " + word('pages') + ")"
return output
return '<a target="_blank" class="daimageref" href="' + the_url + '">' + file_info['filename'] + '</a>'
return '[Invalid image reference; reference=' + str(file_reference) + ', width=' + str(width) + ', filename=' + file_info.get('filename', 'unknown') + ']'
|
26,453 |
def get_new_command(command):
cmdList = command.script.split(' ')
packageName = ""
# Find the argument that is the package name
for i in cmdList:
print(i)
if "choco" in i:
continue
if "cinst" in i:
continue
if "install" in i:
continue
if i.startswith('-'): # Some parameters start with hyphens; some packages contain them though
continue
if '=' in i: # Some paramaters contain '='
continue
if '/' in i: # Some parameters contain slashes
continue
else:
packageName = i
# Find the name of the broken package, and append metapackage names
if not packageName:
return False
return(command.script.replace(packageName, packageName + ".install"))
|
def get_new_command(command):
cmdList = command.script.split(' ')
packageName = ""
# Find the argument that is the package name
for script_part in command.script_parts:
print(i)
if "choco" in i:
continue
if "cinst" in i:
continue
if "install" in i:
continue
if i.startswith('-'): # Some parameters start with hyphens; some packages contain them though
continue
if '=' in i: # Some paramaters contain '='
continue
if '/' in i: # Some parameters contain slashes
continue
else:
packageName = i
# Find the name of the broken package, and append metapackage names
if not packageName:
return False
return(command.script.replace(packageName, packageName + ".install"))
|
56,257 |
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__)
parser.add_argument('--demo-build-dir', type=Path, required=True, metavar='DIR',
help='directory with demo binaries')
parser.add_argument('--test-data-dir', type=Path, required=True, metavar='DIR',
help='directory with test data')
parser.add_argument('--downloader-cache-dir', type=Path, required=True, metavar='DIR',
help='directory to use as the cache for the model downloader')
parser.add_argument('--demos', metavar='DEMO[,DEMO...]',
help='list of demos to run tests for (by default, every demo is tested)')
parser.add_argument('--mo', type=Path, metavar='MO.PY',
help='Model Optimizer entry point script')
parser.add_argument('--devices', default="CPU GPU",
help='list of devices to test')
parser.add_argument('--report-file', type=Path,
help='path to report file')
parser.add_argument('--suppressed-devices', type=Path, required=False,
help='path to file with suppressed devices for each model')
parser.add_argument('--precisions', type=str, nargs='+', default=['FP16', 'FP32'],
help='IR precisions for all models. By default, models are tested in FP32 precision')
return parser.parse_args()
|
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__)
parser.add_argument('--demo-build-dir', type=Path, required=True, metavar='DIR',
help='directory with demo binaries')
parser.add_argument('--test-data-dir', type=Path, required=True, metavar='DIR',
help='directory with test data')
parser.add_argument('--downloader-cache-dir', type=Path, required=True, metavar='DIR',
help='directory to use as the cache for the model downloader')
parser.add_argument('--demos', metavar='DEMO[,DEMO...]',
help='list of demos to run tests for (by default, every demo is tested)')
parser.add_argument('--mo', type=Path, metavar='MO.PY',
help='Model Optimizer entry point script')
parser.add_argument('--devices', default="CPU GPU",
help='list of devices to test')
parser.add_argument('--report-file', type=Path,
help='path to report file')
parser.add_argument('--suppressed-devices', type=Path, required=False,
help='path to file with suppressed devices for each model')
parser.add_argument('--precisions', type=str, nargs='+', default=['FP16'],
help='IR precisions for all models. By default, models are tested in FP16 precision')
return parser.parse_args()
|
34,201 |
def get_model_subdirectories(
unpacked_model_path: Text
) -> Tuple[Optional[Text], Optional[Text]]:
"""Returns paths for core and nlu model directories, if they exist.
If neither directories exist, a `NoModelData` exception is raised.
Args:
unpacked_model_path: Path to unpacked Rasa model.
Returns:
Tuple (path to Core subdirectory if it exists or `None` otherwise,
path to NLU subdirectory if it exists or `None` otherwise).
"""
core_path = os.path.join(unpacked_model_path, "core")
nlu_path = os.path.join(unpacked_model_path, "nlu")
if not os.path.isdir(core_path):
core_path = None
if not os.path.isdir(nlu_path):
nlu_path = None
if not core_path and not nlu_path:
raise NoModelData(
"No NLU or Core data for unpacked model at: '{}'.".format(
unpacked_model_path
)
)
return core_path, nlu_path
|
def get_model_subdirectories(
unpacked_model_path: Text
) -> Tuple[Optional[Text], Optional[Text]]:
"""Returns paths for Core and NLU model directories, if they exist.
If neither directories exist, a `NoModelData` exception is raised.
Args:
unpacked_model_path: Path to unpacked Rasa model.
Returns:
Tuple (path to Core subdirectory if it exists or `None` otherwise,
path to NLU subdirectory if it exists or `None` otherwise).
"""
core_path = os.path.join(unpacked_model_path, "core")
nlu_path = os.path.join(unpacked_model_path, "nlu")
if not os.path.isdir(core_path):
core_path = None
if not os.path.isdir(nlu_path):
nlu_path = None
if not core_path and not nlu_path:
raise NoModelData(
"No NLU or Core data for unpacked model at: '{}'.".format(
unpacked_model_path
)
)
return core_path, nlu_path
|
31,692 |
def unite_all_tweets_into_dict(twitter_response: Dict[str, Any]) -> None:
# The data for this response is a list of "responses", for each tweet of the user
"""
Unites all data from every tweet to a single dictionary
:type twitter_response: ``Dict[str, List]``
:param twitter_response: The data key from the API's response
"""
try:
response_data = twitter_response.get('data')
except Exception:
raise Exception('The response from the API is empty')
united_data = {}
for tweet in response_data:
for ioc_type, iocs in tweet.get('data').items():
for ioc in iocs:
if not united_data.get(ioc_type):
united_data[ioc_type] = []
united_data.get(ioc_type).append(ioc)
twitter_response['data'] = united_data
|
def unite_all_tweets_into_dict(twitter_response: Dict[str, List]) -> None:
# The data for this response is a list of "responses", for each tweet of the user
"""
Unites all data from every tweet to a single dictionary
:type twitter_response: ``Dict[str, List]``
:param twitter_response: The data key from the API's response
"""
try:
response_data = twitter_response.get('data')
except Exception:
raise Exception('The response from the API is empty')
united_data = {}
for tweet in response_data:
for ioc_type, iocs in tweet.get('data').items():
for ioc in iocs:
if not united_data.get(ioc_type):
united_data[ioc_type] = []
united_data.get(ioc_type).append(ioc)
twitter_response['data'] = united_data
|
54,669 |
def uniswap_v3_lp_token_balances(
userdb: 'DBHandler',
address: ChecksumEthAddress,
ethereum: 'EthereumManager',
known_assets: Set[EthereumToken],
unknown_assets: Set[EthereumToken],
) -> List[NFTLiquidityPool]:
nft_manager_contract = EthereumContract(
address=UNISWAP_NFT_MANAGER_ADDRESS,
abi=UNISWAP_V3_NFT_MANAGER_ABI,
deployed_block=UNISWAP_V3_DEPLOYED_BLOCK,
)
address = to_checksum_address(address)
my_positions = nft_manager_contract.call(
ethereum=ethereum,
method_name="balanceOf",
arguments=[address],
)
balances = []
if my_positions > 0:
chunks = list(get_chunks(list(range(my_positions)), n=10))
for chunk in chunks:
tokens_ids_multicall = multicall_2(
ethereum=ethereum,
require_success=True,
calls=[
(
UNISWAP_NFT_MANAGER_ADDRESS,
nft_manager_contract.encode('tokenOfOwnerByIndex', [address, index]),
)
for index in chunk
],
)
tokens_ids = [
nft_manager_contract.decode( # pylint: disable=unsubscriptable-object
result=data[1],
method_name='tokenOfOwnerByIndex',
arguments=[address, index],
)[0]
for index, data in enumerate(tokens_ids_multicall)
]
positions_multicall = multicall_2(
ethereum=ethereum,
require_success=True,
calls=[
(
UNISWAP_NFT_MANAGER_ADDRESS,
nft_manager_contract.encode('positions', [token_id]),
)
for token_id in tokens_ids
],
)
positions = [
nft_manager_contract.decode(
result=data[1],
method_name='positions',
arguments=[tokens_ids[index]],
)
for index, data in enumerate(positions_multicall)
]
pool_addresses = [
compute_pool_address(
factory_address=UNISWAP_V3_FACTORY_ADDRESS,
token0_address=position[2],
token1_address=position[3],
fee=position[4],
)
for position in positions
]
pool_contracts = [
EthereumContract(
address=pool_address,
abi=UNISWAP_V3_POOL_ABI,
deployed_block=UNISWAP_V3_DEPLOYED_BLOCK,
)
for pool_address in pool_addresses
]
slots_0_multicall = multicall_2(
ethereum=ethereum,
require_success=True,
calls=[
(entry[0], entry[1].encode('slot0'))
for entry in zip(pool_addresses, pool_contracts)
],
)
slots_0 = [
entry[0].decode(entry[1][1], 'slot0')
for entry in zip(pool_contracts, slots_0_multicall)
]
tokens_a = [
ethereum.get_basic_contract_info(to_checksum_address(position[2]))
for position in positions
]
tokens_b = [
ethereum.get_basic_contract_info(to_checksum_address(position[3]))
for position in positions
]
price_ranges = [
calculate_price_range(
tick_lower=entry[2][5],
tick_upper=entry[2][6],
decimal_a=entry[0]['decimals'],
decimal_b=entry[1]['decimals'],
)
for entry in zip(tokens_a, tokens_b, positions)
]
amounts_a = [
calculate_amount_a(
tick=entry[0][1],
tick_bottom=entry[1][5],
tick_top=entry[1][6],
liquidity=entry[1][7],
decimals=entry[2]['decimals'],
)
for entry in zip(slots_0, positions, tokens_a)
]
amounts_b = [
calculate_amount_b(
tick=entry[0][1],
tick_bottom=entry[1][5],
tick_top=entry[1][6],
liquidity=entry[1][7],
decimals=entry[2]['decimals'],
)
for entry in zip(slots_0, positions, tokens_b)
]
for entry in zip(tokens_ids, pool_addresses, positions, price_ranges, tokens_a, tokens_b, amounts_a, amounts_b): # noqa: 501
if entry[6] > ZERO or entry[7] > ZERO:
entry[4].update({'amount': entry[6], 'address': entry[2][2]})
entry[5].update({'amount': entry[7], 'address': entry[2][3]})
balances.append(_decode_v3_result(userdb, entry, known_assets, unknown_assets))
return balances
|
def uniswap_v3_lp_token_balances(
userdb: 'DBHandler',
address: ChecksumEthAddress,
ethereum: 'EthereumManager',
known_assets: Set[EthereumToken],
unknown_assets: Set[EthereumToken],
) -> List[NFTLiquidityPool]:
nft_manager_contract = EthereumContract(
address=UNISWAP_NFT_MANAGER_ADDRESS,
abi=UNISWAP_V3_NFT_MANAGER_ABI,
deployed_block=UNISWAP_V3_DEPLOYED_BLOCK,
)
address = to_checksum_address(address)
my_positions = nft_manager_contract.call(
ethereum=ethereum,
method_name="balanceOf",
arguments=[address],
)
balances = []
if my_positions <= 0:
return balances
chunks = list(get_chunks(list(range(my_positions)), n=10))
for chunk in chunks:
tokens_ids_multicall = multicall_2(
ethereum=ethereum,
require_success=True,
calls=[
(
UNISWAP_NFT_MANAGER_ADDRESS,
nft_manager_contract.encode('tokenOfOwnerByIndex', [address, index]),
)
for index in chunk
],
)
tokens_ids = [
nft_manager_contract.decode( # pylint: disable=unsubscriptable-object
result=data[1],
method_name='tokenOfOwnerByIndex',
arguments=[address, index],
)[0]
for index, data in enumerate(tokens_ids_multicall)
]
positions_multicall = multicall_2(
ethereum=ethereum,
require_success=True,
calls=[
(
UNISWAP_NFT_MANAGER_ADDRESS,
nft_manager_contract.encode('positions', [token_id]),
)
for token_id in tokens_ids
],
)
positions = [
nft_manager_contract.decode(
result=data[1],
method_name='positions',
arguments=[tokens_ids[index]],
)
for index, data in enumerate(positions_multicall)
]
pool_addresses = [
compute_pool_address(
factory_address=UNISWAP_V3_FACTORY_ADDRESS,
token0_address=position[2],
token1_address=position[3],
fee=position[4],
)
for position in positions
]
pool_contracts = [
EthereumContract(
address=pool_address,
abi=UNISWAP_V3_POOL_ABI,
deployed_block=UNISWAP_V3_DEPLOYED_BLOCK,
)
for pool_address in pool_addresses
]
slots_0_multicall = multicall_2(
ethereum=ethereum,
require_success=True,
calls=[
(entry[0], entry[1].encode('slot0'))
for entry in zip(pool_addresses, pool_contracts)
],
)
slots_0 = [
entry[0].decode(entry[1][1], 'slot0')
for entry in zip(pool_contracts, slots_0_multicall)
]
tokens_a = [
ethereum.get_basic_contract_info(to_checksum_address(position[2]))
for position in positions
]
tokens_b = [
ethereum.get_basic_contract_info(to_checksum_address(position[3]))
for position in positions
]
price_ranges = [
calculate_price_range(
tick_lower=entry[2][5],
tick_upper=entry[2][6],
decimal_a=entry[0]['decimals'],
decimal_b=entry[1]['decimals'],
)
for entry in zip(tokens_a, tokens_b, positions)
]
amounts_a = [
calculate_amount_a(
tick=entry[0][1],
tick_bottom=entry[1][5],
tick_top=entry[1][6],
liquidity=entry[1][7],
decimals=entry[2]['decimals'],
)
for entry in zip(slots_0, positions, tokens_a)
]
amounts_b = [
calculate_amount_b(
tick=entry[0][1],
tick_bottom=entry[1][5],
tick_top=entry[1][6],
liquidity=entry[1][7],
decimals=entry[2]['decimals'],
)
for entry in zip(slots_0, positions, tokens_b)
]
for entry in zip(tokens_ids, pool_addresses, positions, price_ranges, tokens_a, tokens_b, amounts_a, amounts_b): # noqa: 501
if entry[6] > ZERO or entry[7] > ZERO:
entry[4].update({'amount': entry[6], 'address': entry[2][2]})
entry[5].update({'amount': entry[7], 'address': entry[2][3]})
balances.append(_decode_v3_result(userdb, entry, known_assets, unknown_assets))
return balances
|
10,038 |
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
datacenter=dict(type='str', required=True),
show_network_policy=dict(type='bool', default=True),
show_teaming_policy=dict(type='bool', default=True),
show_port_policy=dict(type='bool', default=True),
dvswitch=dict(type='str', default='all'),
show_vlan_info=dict(type='bool', default=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
dvs_pg_mgr = DVSPortgroupFactsManager(module)
module.exit_json(changed=False,
dvs_portgroup_facts=dvs_pg_mgr.gather_dvs_portgroup_facts())
|
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
datacenter=dict(type='str', required=True),
show_network_policy=dict(type='bool', default=True),
show_teaming_policy=dict(type='bool', default=True),
show_port_policy=dict(type='bool', default=True),
dvswitch=dict(),
show_vlan_info=dict(type='bool', default=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
dvs_pg_mgr = DVSPortgroupFactsManager(module)
module.exit_json(changed=False,
dvs_portgroup_facts=dvs_pg_mgr.gather_dvs_portgroup_facts())
|
57,784 |
def main():
params = demisto.params()
args = demisto.args()
api_key = params.get('apikey')
base_url = params.get('base_url')
if base_url.endswith('/'):
base_url = base_url[:-1]
indicator_types = params.get('indicator_types')
max_fetch = params.get('max_indicator_to_fetch')
tlp_color = params.get('tlp_color')
if max_fetch:
max_fetch = int(max_fetch)
else:
max_fetch = 500
try:
client = OpenCTIApiClient(base_url, api_key, ssl_verify=params.get('insecure'), log_level='error')
command = demisto.command()
demisto.info(f"Command being called is {command}")
# Switch case
if command == "fetch-indicators":
indicators = fetch_indicators_command(client, indicator_types, max_fetch, tlp_color=tlp_color)
# we submit the indicators in batches
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
elif command == "test-module":
'''When setting up an OpenCTI Client it is checked that it is valid and allows requests to be sent.
and if not he immediately sends an error'''
fetch_indicators_command(client, indicator_types, max_fetch, is_test=True)
return_outputs('ok')
elif command == "opencti-get-indicators":
return_results(get_indicators_command(client, args))
elif command == "opencti-indicator-delete":
return_results(indicator_delete_command(client, args))
elif command == "opencti-indicator-field-update":
return_results(indicator_field_update_command(client, args))
elif command == "opencti-indicator-create":
return_results(indicator_create_command(client, args))
elif command == "opencti-indicator-field-add":
return_results(indicator_field_add_command(client, args))
elif command == "opencti-indicator-field-remove":
return_results(indicator_field_remove_command(client, args))
elif command == "opencti-organization-list":
return_results(organization_list_command(client, args))
elif command == "opencti-organization-create":
return_results(organization_create_command(client, args))
except Exception as e:
return_error(f"Error [{e}]")
|
def main():
params = demisto.params()
args = demisto.args()
api_key = params.get('apikey')
base_url = params.get('base_url')
if base_url.endswith('/'):
base_url = base_url[:-1]
indicator_types = params.get('indicator_types')
tlp_color = params.get('tlp_color')
max_fetch = arg_to_numberr(params.get('max_indicator_to_fetch', 500))
try:
client = OpenCTIApiClient(base_url, api_key, ssl_verify=params.get('insecure'), log_level='error')
command = demisto.command()
demisto.info(f"Command being called is {command}")
# Switch case
if command == "fetch-indicators":
indicators = fetch_indicators_command(client, indicator_types, max_fetch, tlp_color=tlp_color)
# we submit the indicators in batches
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
elif command == "test-module":
'''When setting up an OpenCTI Client it is checked that it is valid and allows requests to be sent.
and if not he immediately sends an error'''
fetch_indicators_command(client, indicator_types, max_fetch, is_test=True)
return_outputs('ok')
elif command == "opencti-get-indicators":
return_results(get_indicators_command(client, args))
elif command == "opencti-indicator-delete":
return_results(indicator_delete_command(client, args))
elif command == "opencti-indicator-field-update":
return_results(indicator_field_update_command(client, args))
elif command == "opencti-indicator-create":
return_results(indicator_create_command(client, args))
elif command == "opencti-indicator-field-add":
return_results(indicator_field_add_command(client, args))
elif command == "opencti-indicator-field-remove":
return_results(indicator_field_remove_command(client, args))
elif command == "opencti-organization-list":
return_results(organization_list_command(client, args))
elif command == "opencti-organization-create":
return_results(organization_create_command(client, args))
except Exception as e:
return_error(f"Error [{e}]")
|
55,490 |
def _numeric_only_reduce_fn(applier: Type[Function], *funcs) -> Callable:
"""
Build reduce function for statistic operations with `numeric_only` parameter.
Parameters
----------
applier : Function object to register `funcs`
*funcs : list of functions to register in `applier`
Returns
-------
callable
A callable function to be applied in the partitions
"""
def caller(self, *args, **kwargs):
# If `numeric_only` is None then we don't know what columns/indices will
# be dropped at the result of reduction function, and so can't preserve labels
preserve_index = kwargs.get("numeric_only", None) is not None
return applier.register(*funcs, preserve_index=preserve_index)(
self, *args, **kwargs
)
return caller
|
def _numeric_only_reduce_fn(applier: Type[Function], *funcs) -> Callable:
"""
Build reduce function for statistic operations with `numeric_only` parameter.
Parameters
----------
applier: Callable
Function object to register `funcs`
*funcs: list
List of functions to register in `applier`
Returns
-------
callable
A callable function to be applied in the partitions
"""
def caller(self, *args, **kwargs):
# If `numeric_only` is None then we don't know what columns/indices will
# be dropped at the result of reduction function, and so can't preserve labels
preserve_index = kwargs.get("numeric_only", None) is not None
return applier.register(*funcs, preserve_index=preserve_index)(
self, *args, **kwargs
)
return caller
|
705 |
def install_reactor(reactor_path, event_loop_path):
"""Installs the :mod:`~twisted.internet.reactor` with the specified
import path.Also installs asyncio event loop as specified in the import
path if asyncio reactor is enabled"""
reactor_class = load_object(reactor_path)
if reactor_class is asyncioreactor.AsyncioSelectorReactor:
with suppress(error.ReactorAlreadyInstalledError):
if event_loop_path is not None:
x = __import__(event_loop_path)
if x is not None:
loop = x.new_event_loop()
asyncio.set_event_loop(loop)
else:
loop = asyncio.get_event_loop()
asyncioreactor.install(loop)
else:
*module, _ = reactor_path.split(".")
installer_path = module + ["install"]
installer = load_object(".".join(installer_path))
with suppress(error.ReactorAlreadyInstalledError):
installer()
|
def install_reactor(reactor_path, event_loop_path):
"""Installs the :mod:`~twisted.internet.reactor` with the specified
import path. Also installs the asyncio event loop with the specified import
path if asyncio reactor is enabled"""
reactor_class = load_object(reactor_path)
if reactor_class is asyncioreactor.AsyncioSelectorReactor:
with suppress(error.ReactorAlreadyInstalledError):
if event_loop_path is not None:
x = __import__(event_loop_path)
if x is not None:
loop = x.new_event_loop()
asyncio.set_event_loop(loop)
else:
loop = asyncio.get_event_loop()
asyncioreactor.install(loop)
else:
*module, _ = reactor_path.split(".")
installer_path = module + ["install"]
installer = load_object(".".join(installer_path))
with suppress(error.ReactorAlreadyInstalledError):
installer()
|
11,756 |
def is_screen_black(frame: Optional[Frame] = None,
mask: MaskTypes = Region.ALL,
threshold: int = None,
region: Region = Region.ALL) -> "_IsScreenBlackResult":
"""Check for the presence of a black screen in a video frame.
:param Frame frame:
If this is specified it is used as the video frame to check; otherwise a
new frame is grabbed from the device-under-test. This is an image in
OpenCV format (for example as returned by `frames` and `get_frame`).
:param str|numpy.ndarray|Mask|Region mask:
A `Region` or a mask that specifies which parts of the image to
analyse. This accepts anything that can be converted to a Mask using
`stbt.load_mask`. See :ref:`Masks`.
:param int threshold:
Even when a video frame appears to be black, the intensity of its pixels
is not always 0. To differentiate almost-black from non-black pixels, a
binary threshold is applied to the frame. The ``threshold`` value is in
the range 0 (black) to 255 (white). The global default (20) can be
changed by setting ``threshold`` in the ``[is_screen_black]`` section of
:ref:`.stbt.conf`.
:param Region region:
Deprecated synonym for ``mask``. Use ``mask`` instead.
:returns:
An object that will evaluate to true if the frame was black, or false
if not black. The object has the following attributes:
* **black** (*bool*) – True if the frame was black.
* **frame** (`stbt.Frame`) – The video frame that was analysed.
Changed in v33: ``mask`` accepts anything that can be converted to a Mask
using `load_mask`. The ``region`` parameter is deprecated; pass your
`Region` to ``mask`` instead. You can't specify ``mask`` and ``region``
at the same time.
"""
if threshold is None:
threshold = get_config('is_screen_black', 'threshold', type_=int)
if frame is None:
from stbt_core import get_frame
frame = get_frame()
if region is not Region.ALL:
if mask is not Region.ALL:
raise ValueError("Cannot specify mask and region at the same time")
mask = region
mask_, region = load_mask(mask).to_array(frame.region)
imglog = ImageLogger("is_screen_black", mask=mask, threshold=threshold)
imglog.imwrite("source", frame)
greyframe = cv2.cvtColor(crop(frame, region), cv2.COLOR_BGR2GRAY)
if mask_ is not None:
imglog.imwrite("mask", mask_)
cv2.bitwise_and(greyframe, crop(mask_, region), dst=greyframe)
maxVal = greyframe.max()
result = _IsScreenBlackResult(bool(maxVal <= threshold), frame)
debug("is_screen_black: {found} black screen using mask={mask}, "
"threshold={threshold}: {result}, maximum_intensity={maxVal}".format(
found="Found" if result.black else "Didn't find",
mask=mask,
threshold=threshold,
result=result,
maxVal=maxVal))
if imglog.enabled:
imglog.imwrite("grey", greyframe)
_, thresholded = cv2.threshold(greyframe, threshold, 255,
cv2.THRESH_BINARY)
imglog.imwrite("non_black", thresholded)
imglog.set(maxVal=maxVal,
non_black_region=pixel_bounding_box(thresholded))
_log_image_debug(imglog, result)
return result
|
def is_screen_black(frame: Optional[Frame] = None,
mask: MaskTypes = Region.ALL,
threshold: int = None,
region: Region = Region.ALL) -> "_IsScreenBlackResult":
"""Check for the presence of a black screen in a video frame.
:param Frame frame:
If this is specified it is used as the video frame to check; otherwise a
new frame is grabbed from the device-under-test. This is an image in
OpenCV format (for example as returned by `frames` and `get_frame`).
:param str|numpy.ndarray|Mask|Region mask:
A `Region` or a mask that specifies which parts of the image to
analyse. This accepts anything that can be converted to a Mask using
`stbt.load_mask`. See :ref:`Masks`.
:param int threshold:
Even when a video frame appears to be black, the intensity of its pixels
is not always 0. To differentiate almost-black from non-black pixels, a
binary threshold is applied to the frame. The ``threshold`` value is in
the range 0 (black) to 255 (white). The global default (20) can be
changed by setting ``threshold`` in the ``[is_screen_black]`` section of
:ref:`.stbt.conf`.
:param Region region:
Deprecated synonym for ``mask``. Use ``mask`` instead.
:returns:
An object that will evaluate to true if the frame was black, or false
if not black. The object has the following attributes:
* **black** (*bool*) – True if the frame was black.
* **frame** (`stbt.Frame`) – The video frame that was analysed.
Changed in v33: ``mask`` accepts anything that can be converted to a Mask
using `load_mask`. The ``region`` parameter is deprecated; pass your
`Region` to ``mask`` instead. You can't specify ``mask`` and ``region``
at the same time.
"""
if threshold is None:
threshold = get_config('is_screen_black', 'threshold', type_=int)
if frame is None:
from stbt_core import get_frame
frame = get_frame()
if region is not Region.ALL:
if mask is not Region.ALL:
raise ValueError("Cannot specify mask and region at the same time")
mask = region
mask_, region = load_mask(mask).to_array(_image_region(frame))
imglog = ImageLogger("is_screen_black", mask=mask, threshold=threshold)
imglog.imwrite("source", frame)
greyframe = cv2.cvtColor(crop(frame, region), cv2.COLOR_BGR2GRAY)
if mask_ is not None:
imglog.imwrite("mask", mask_)
cv2.bitwise_and(greyframe, crop(mask_, region), dst=greyframe)
maxVal = greyframe.max()
result = _IsScreenBlackResult(bool(maxVal <= threshold), frame)
debug("is_screen_black: {found} black screen using mask={mask}, "
"threshold={threshold}: {result}, maximum_intensity={maxVal}".format(
found="Found" if result.black else "Didn't find",
mask=mask,
threshold=threshold,
result=result,
maxVal=maxVal))
if imglog.enabled:
imglog.imwrite("grey", greyframe)
_, thresholded = cv2.threshold(greyframe, threshold, 255,
cv2.THRESH_BINARY)
imglog.imwrite("non_black", thresholded)
imglog.set(maxVal=maxVal,
non_black_region=pixel_bounding_box(thresholded))
_log_image_debug(imglog, result)
return result
|
58,949 |
def main():
def get_influxdb_line(measurement: str, value: float, timestamp: datetime, tags: dict):
res = f"{measurement}"
for k, v in tags.items():
res += f",{k}={str(v)}"
res += f" value={value} {int(timestamp.timestamp() * 1e9)}\n"
return res
# open statistics socket
ctx = zmq.Context().instance()
sock = ctx.socket(zmq.PULL)
sock.bind(STATS_SOCKET)
# initialize stats directory
Path(STATS_DIR).mkdir(parents=True, exist_ok=True)
# initialize tags
tags = {
'dongleId': Params().get("DongleId", encoding='utf-8'),
'started': False,
'version': get_short_version(),
'branch': get_short_branch(),
'dirty': is_dirty(),
'origin': get_normalized_origin(),
'deviceType': HARDWARE.get_device_type(),
}
# subscribe to deviceState for started state
sm = SubMaster(['deviceState'])
last_flush_time = time.monotonic()
gauges = {}
samples = {}
while True:
started_prev = sm['deviceState'].started
sm.update()
# Update metrics
while True:
try:
metric = sock.recv_string(zmq.NOBLOCK)
try:
metric_type = metric.split('|')[1]
metric_name = metric.split(':')[0]
metric_value = float(metric.split('|')[0].split(':')[1])
if metric_type == METRIC_TYPE.GAUGE:
gauges[metric_name] = metric_value
if metric_type == METRIC_TYPE.SAMPLE:
if metric_name not in samples.keys():
samples[metric_name] = []
samples[metric_name].append(metric_value)
else:
cloudlog.event("unknown metric type", metric_type=metric_type)
except Exception:
cloudlog.event("malformed metric", metric=metric)
except zmq.error.Again:
break
# flush when started state changes or after FLUSH_TIME_S
if (time.monotonic() > last_flush_time + STATS_FLUSH_TIME_S) or (sm['deviceState'].started != started_prev):
result = ""
current_time = datetime.utcnow().replace(tzinfo=timezone.utc)
tags['started'] = sm['deviceState'].started
for gauge_key in gauges:
result += get_influxdb_line(f"gauge.{gauge_key}", gauges[gauge_key], current_time, tags)
for sample_key in samples:
samples[sample_key].sort()
sample_count = len(samples[sample_key])
sample_sum = sum(samples[sample_key])
result += get_influxdb_line(f"sample.{sample_key}.count", sample_count, current_time, tags)
result += get_influxdb_line(f"sample.{sample_key}.min", samples[sample_key][0], current_time, tags)
result += get_influxdb_line(f"sample.{sample_key}.max", samples[sample_key][-1], current_time, tags)
result += get_influxdb_line(f"sample.{sample_key}.mean", sample_sum / sample_count, current_time, tags)
for percentile in [0.05, 0.5, 0.95]:
value = samples[sample_key][int(round(percentile * (sample_count - 1)))]
result += get_influxdb_line(f"sample.{sample_key}.p{int(percentile * 100)}", value, current_time, tags)
# clear intermediate data
gauges = {}
samples = {}
last_flush_time = time.monotonic()
# check that we aren't filling up the drive
if len(os.listdir(STATS_DIR)) < STATS_DIR_FILE_LIMIT:
if len(result) > 0:
stats_path = os.path.join(STATS_DIR, str(int(current_time.timestamp())))
with atomic_write_in_dir(stats_path) as f:
f.write(result)
else:
cloudlog.error("stats dir full")
|
def main():
def get_influxdb_line(measurement: str, value: float, timestamp: datetime, tags: dict):
res = f"{measurement}"
for k, v in tags.items():
res += f",{k}={str(v)}"
res += f" value={value} {int(timestamp.timestamp() * 1e9)}\n"
return res
# open statistics socket
ctx = zmq.Context().instance()
sock = ctx.socket(zmq.PULL)
sock.bind(STATS_SOCKET)
# initialize stats directory
Path(STATS_DIR).mkdir(parents=True, exist_ok=True)
# initialize tags
tags = {
'dongleId': Params().get("DongleId", encoding='utf-8'),
'started': False,
'version': get_short_version(),
'branch': get_short_branch(),
'dirty': is_dirty(),
'origin': get_normalized_origin(),
'deviceType': HARDWARE.get_device_type(),
}
# subscribe to deviceState for started state
sm = SubMaster(['deviceState'])
last_flush_time = time.monotonic()
gauges = {}
samples = {}
while True:
started_prev = sm['deviceState'].started
sm.update()
# Update metrics
while True:
try:
metric = sock.recv_string(zmq.NOBLOCK)
try:
metric_type = metric.split('|')[1]
metric_name = metric.split(':')[0]
metric_value = float(metric.split('|')[0].split(':')[1])
if metric_type == METRIC_TYPE.GAUGE:
gauges[metric_name] = metric_value
if metric_type == METRIC_TYPE.SAMPLE:
if metric_name not in samples.keys():
samples[metric_name] = []
samples[metric_name].append(metric_value)
else:
cloudlog.event("unknown metric type", metric_type=metric_type)
except Exception:
cloudlog.event("malformed metric", metric=metric)
except zmq.error.Again:
break
# flush when started state changes or after FLUSH_TIME_S
if (time.monotonic() > last_flush_time + STATS_FLUSH_TIME_S) or (sm['deviceState'].started != started_prev):
result = ""
current_time = datetime.utcnow().replace(tzinfo=timezone.utc)
tags['started'] = sm['deviceState'].started
for gauge_key in gauges:
result += get_influxdb_line(f"gauge.{gauge_key}", gauges[gauge_key], current_time, tags)
for key, values in samples.items():
values.sort()
sample_count = len(values)
sample_sum = sum(values)
result += get_influxdb_line(f"sample.{key}.count", sample_count, current_time, tags)
result += get_influxdb_line(f"sample.{key}.min", values[0], current_time, tags)
result += get_influxdb_line(f"sample.{key}.max", values[-1], current_time, tags)
result += get_influxdb_line(f"sample.{key}.mean", sample_sum / sample_count, current_time, tags)
for percentile in (0.05, 0.5, 0.95):
value = values[int(round(percentile * (sample_count - 1)))]
result += get_influxdb_line(f"sample.{sample_key}.p{int(percentile * 100)}", value, current_time, tags)
# clear intermediate data
gauges = {}
samples = {}
last_flush_time = time.monotonic()
# check that we aren't filling up the drive
if len(os.listdir(STATS_DIR)) < STATS_DIR_FILE_LIMIT:
if len(result) > 0:
stats_path = os.path.join(STATS_DIR, str(int(current_time.timestamp())))
with atomic_write_in_dir(stats_path) as f:
f.write(result)
else:
cloudlog.error("stats dir full")
|
42,045 |
def test_plot_param_importances() -> None:
# Test with no trial.
study = create_study()
figure = plot_param_importances(study)
assert len(figure.get_lines()) == 0
study = prepare_study_with_trials(with_c_d=True)
# Test with a trial.
figure = plot_param_importances(study)
bars = figure.findobj(Rectangle)[:-1] # the last Rectangle is the plot itself
plotted_data = [bar.get_width() for bar in bars]
# get_yticklabels return a data structure of Text(0, 0, 'param_d')
labels = [label.get_text() for label in figure.get_yticklabels()]
assert len(figure.get_lines()) == 0
assert len(bars) == 2
assert set(labels) == set(("param_b", "param_d")) # "param_a", "param_c" are conditional.
assert math.isclose(1.0, sum(i for i in plotted_data), abs_tol=1e-5)
assert figure.xaxis.label.get_text() == "Importance for Objective Value"
# Test with an evaluator.
plot_param_importances(study, evaluator=MeanDecreaseImpurityImportanceEvaluator())
bars = figure.findobj(Rectangle)[:-1] # the last Rectangle is the plot itself
plotted_data = [bar.get_width() for bar in bars]
labels = [label.get_text() for label in figure.get_yticklabels()]
assert len(figure.get_lines()) == 0
assert len(bars) == 2
assert set(labels) == set(("param_b", "param_d")) # "param_a", "param_c" are conditional.
assert math.isclose(1.0, sum(i for i in plotted_data), abs_tol=1e-5)
assert figure.xaxis.label.get_text() == "Importance for Objective Value"
# Test with a trial to select parameter.
figure = plot_param_importances(study, params=["param_b"])
bars = figure.findobj(Rectangle)[:-1] # the last Rectangle is the plot itself
plotted_data = [bar.get_width() for bar in bars]
labels = [label.get_text() for label in figure.get_yticklabels()]
assert len(figure.get_lines()) == 0
assert len(bars) == 1
assert set(labels) == set(("param_b",)) # "param_a", "param_c" are conditional.
assert math.isclose(1.0, sum(i for i in plotted_data), abs_tol=1e-5)
assert figure.xaxis.label.get_text() == "Importance for Objective Value"
# Test with a customized target value.
with pytest.warns(UserWarning):
figure = plot_param_importances(
study, target=lambda t: t.params["param_b"] + t.params["param_d"]
)
bars = figure.findobj(Rectangle)[:-1] # the last Rectangle is the plot itself
plotted_data = [bar.get_width() for bar in bars]
labels = [label.get_text() for label in figure.get_yticklabels()]
assert len(bars) == 2
assert set(labels) == set(("param_b", "param_d")) # "param_a", "param_c" are conditional.
assert math.isclose(1.0, sum(i for i in plotted_data), abs_tol=1e-5)
assert len(figure.get_lines()) == 0
# Test with a customized target name.
figure = plot_param_importances(study, target_name="Target Name")
assert len(figure.get_lines()) == 0
assert figure.xaxis.label.get_text() == "Importance for Target Name"
# Test with wrong parameters.
with pytest.raises(ValueError):
plot_param_importances(study, params=["optuna"])
# Ignore failed trials.
def fail_objective(_: Trial) -> float:
raise ValueError
study = create_study()
study.optimize(fail_objective, n_trials=1, catch=(ValueError,))
figure = plot_param_importances(study)
assert len(figure.get_lines()) == 0
|
def test_plot_param_importances() -> None:
# Test with no trial.
study = create_study()
figure = plot_param_importances(study)
assert len(figure.get_lines()) == 0
study = prepare_study_with_trials(with_c_d=True)
# Test with a trial.
figure = plot_param_importances(study)
bars = figure.findobj(Rectangle)[:-1] # the last Rectangle is the plot itself
plotted_data = [bar.get_width() for bar in bars]
# get_yticklabels return a data structure of Text(0, 0, 'param_d')
labels = [label.get_text() for label in figure.get_yticklabels()]
assert len(figure.get_lines()) == 0
assert len(bars) == 2
assert set(labels) == set(("param_b", "param_d")) # "param_a", "param_c" are conditional.
assert math.isclose(1.0, sum(i for i in plotted_data), abs_tol=1e-5)
assert figure.xaxis.label.get_text() == "Importance for Objective Value"
# Test with an evaluator.
plot_param_importances(study, evaluator=MeanDecreaseImpurityImportanceEvaluator())
bars = figure.findobj(Rectangle)[:-1] # the last Rectangle is the plot itself
plotted_data = [bar.get_width() for bar in bars]
labels = [label.get_text() for label in figure.get_yticklabels()]
assert len(figure.get_lines()) == 0
assert len(bars) == 2
assert set(labels) == set(("param_b", "param_d")) # "param_a", "param_c" are conditional.
assert math.isclose(1.0, sum(i for i in plotted_data), abs_tol=1e-5)
assert figure.xaxis.label.get_text() == "Importance for Objective Value"
# Test with a trial to select parameter.
figure = plot_param_importances(study, params=["param_b"])
bars = figure.findobj(Rectangle)[:-1] # the last Rectangle is the plot itself
plotted_data = [bar.get_width() for bar in bars]
labels = [label.get_text() for label in figure.get_yticklabels()]
assert len(figure.get_lines()) == 0
assert len(bars) == 1
assert set(labels) == set(("param_b",))
assert math.isclose(1.0, sum(i for i in plotted_data), abs_tol=1e-5)
assert figure.xaxis.label.get_text() == "Importance for Objective Value"
# Test with a customized target value.
with pytest.warns(UserWarning):
figure = plot_param_importances(
study, target=lambda t: t.params["param_b"] + t.params["param_d"]
)
bars = figure.findobj(Rectangle)[:-1] # the last Rectangle is the plot itself
plotted_data = [bar.get_width() for bar in bars]
labels = [label.get_text() for label in figure.get_yticklabels()]
assert len(bars) == 2
assert set(labels) == set(("param_b", "param_d")) # "param_a", "param_c" are conditional.
assert math.isclose(1.0, sum(i for i in plotted_data), abs_tol=1e-5)
assert len(figure.get_lines()) == 0
# Test with a customized target name.
figure = plot_param_importances(study, target_name="Target Name")
assert len(figure.get_lines()) == 0
assert figure.xaxis.label.get_text() == "Importance for Target Name"
# Test with wrong parameters.
with pytest.raises(ValueError):
plot_param_importances(study, params=["optuna"])
# Ignore failed trials.
def fail_objective(_: Trial) -> float:
raise ValueError
study = create_study()
study.optimize(fail_objective, n_trials=1, catch=(ValueError,))
figure = plot_param_importances(study)
assert len(figure.get_lines()) == 0
|
47,291 |
def load_tf_weights_in_rembert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
# Checkpoint is 12Gb, save memory by not loading useless variables
# Output embedding and cls are reset at classification time
if any(deny in name for deny in ("adam_v", "adam_m", "output_embedding", "cls")):
# logger.info("Skipping loading of %s", name)
continue
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
# Replace prefix with right one
name = name.replace("bert/", "rembert/")
# The pooler is a linear layer
# name = name.replace("pooler/dense", "pooler")
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
if not pointer.shape == array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
|
def load_tf_weights_in_rembert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
# Checkpoint is 12Gb, save memory by not loading useless variables
# Output embedding and cls are reset at classification time
if any(deny in name for deny in ("adam_v", "adam_m", "output_embedding", "cls")):
# logger.info("Skipping loading of %s", name)
continue
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
# Replace prefix with right one
name = name.replace("bert/", "rembert/")
# The pooler is a linear layer
# name = name.replace("pooler/dense", "pooler")
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
|
8,241 |
def _bresenham(x1, y1, x2, y2):
"""
Returns an array of all pixel coordinates which the line defined by `x1, y1` and
`x2, y2` crosses. Uses Bresenham's line algorithm to enumerate the pixels along
a line. This was adapted from ginga.
Parameters
----------
x1, y1, x2, y2 :`int`
References
----------
* https://github.com/ejeschke/ginga/blob/c8ceaf8e559acc547bf25661842a53ed44a7b36f/ginga/BaseImage.py#L503
* http://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm
"""
for x in [x1, y1, x2, y2]:
if type(x) not in (int, np.int64):
raise TypeError('All pixel coordinates must be of type int')
dx = abs(x2 - x1)
dy = abs(y2 - y1)
sx = 1 if x1 < x2 else -1
sy = 1 if y1 < y2 else -1
err = dx - dy
res = []
x, y = x1, y1
while True:
res.append((x, y))
if (x == x2) and (y == y2):
break
e2 = 2 * err
if e2 > -dy:
err = err - dy
x += sx
if e2 < dx:
err = err + dx
y += sy
return np.array(res)
|
def _bresenham(x1, y1, x2, y2):
"""
Returns an array of all pixel coordinates which the line defined by `x1, y1` and
`x2, y2` crosses. Uses Bresenham's line algorithm to enumerate the pixels along
a line. This was adapted from ginga.
Parameters
----------
x1, y1, x2, y2 :`int`
References
----------
* https://github.com/ejeschke/ginga/blob/c8ceaf8e559acc547bf25661842a53ed44a7b36f/ginga/BaseImage.py#L503
* http://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm
"""
for x in [x1, y1, x2, y2]:
if not isinstance(x, (int, np.int_)):
raise TypeError('All pixel coordinates must be of type int')
dx = abs(x2 - x1)
dy = abs(y2 - y1)
sx = 1 if x1 < x2 else -1
sy = 1 if y1 < y2 else -1
err = dx - dy
res = []
x, y = x1, y1
while True:
res.append((x, y))
if (x == x2) and (y == y2):
break
e2 = 2 * err
if e2 > -dy:
err = err - dy
x += sx
if e2 < dx:
err = err + dx
y += sy
return np.array(res)
|
20,050 |
def _get_dataset_features_file(did_cache_dir: str, dataset_id: int) -> str:
"""API call to load dataset features. Loads the from cache or downloads them.
Features are feature descriptions for each column.
(name, index, categorical, ...)
This function is NOT thread/multiprocessing safe.
Parameters
----------
did_cache_dir : str
Cache subdirectory for this dataset
dataset_id : int
Dataset ID
Returns
-------
str
Path of the cached dataset feature file
"""
features_file = os.path.join(did_cache_dir, "features.xml")
# Dataset features aren't subject to change...
if not os.path.isfile(features_file):
url_extension = "data/features/{}".format(dataset_id)
features_xml = openml._api_calls._perform_api_call(url_extension, "get")
with io.open(features_file, "w", encoding="utf8") as fh:
fh.write(features_xml)
return features_file
|
def _get_dataset_features_file(did_cache_dir: str, dataset_id: int) -> str:
"""API call to load dataset features. Loads from cache or downloads them.
Features are feature descriptions for each column.
(name, index, categorical, ...)
This function is NOT thread/multiprocessing safe.
Parameters
----------
did_cache_dir : str
Cache subdirectory for this dataset
dataset_id : int
Dataset ID
Returns
-------
str
Path of the cached dataset feature file
"""
features_file = os.path.join(did_cache_dir, "features.xml")
# Dataset features aren't subject to change...
if not os.path.isfile(features_file):
url_extension = "data/features/{}".format(dataset_id)
features_xml = openml._api_calls._perform_api_call(url_extension, "get")
with io.open(features_file, "w", encoding="utf8") as fh:
fh.write(features_xml)
return features_file
|
34,380 |
def compare_nlu(
configs: List[Text],
data: TrainingData,
exclusion_percentages: List[int],
f_score_results: Dict[Text, Any],
model_names: List[Text],
output: Text,
runs: int,
) -> List[int]:
"""
Trains and compares multiple NLU models.
For each run and exclusion percentage a model per config file is trained.
Thereby, the model is trained only on the current percentage of training data.
Afterwards, the model is tested on the complete test data of that run.
All results are stored in the provided output directory.
Args:
configs: config files needed for training
data: training data
exclusion_percentages: percentages of training data to exclude during comparison
f_score_results: dictionary of model name to f-score results per run
model_names: names of the models to train
output: the output directory
runs: number of comparison runs
Returns: training examples per run
"""
from rasa.train import train_nlu
training_examples_per_run = []
for run in range(runs):
logger.info("Beginning comparison run {}/{}".format(run + 1, runs))
run_path = os.path.join(output, "run_{}".format(run + 1))
io_utils.create_path(run_path)
test_path = os.path.join(run_path, TEST_DATA_FILE)
io_utils.create_path(test_path)
train, test = data.train_test_split()
write_to_file(test_path, test.nlu_as_markdown())
training_examples_per_run = []
for percentage in exclusion_percentages:
percent_string = f"{percentage}%_exclusion"
_, train = train.train_test_split(percentage / 100)
training_examples_per_run.append(len(train.training_examples))
model_output_path = os.path.join(run_path, percent_string)
train_split_path = os.path.join(model_output_path, "train")
train_nlu_split_path = os.path.join(
model_output_path, "train", TRAIN_DATA_FILE
)
train_nlg_split_path = os.path.join(
model_output_path, "train", NLG_DATA_FILE
)
io_utils.create_path(train_nlu_split_path)
write_to_file(train_nlu_split_path, train.nlu_as_markdown())
write_to_file(train_nlg_split_path, train.nlg_as_markdown())
for nlu_config, model_name in zip(configs, model_names):
logger.info(
"Evaluating configuration '{}' with {} training data.".format(
model_name, percent_string
)
)
try:
model_path = train_nlu(
nlu_config,
train_split_path,
model_output_path,
fixed_model_name=model_name,
)
except Exception as e:
logger.warning(f"Training model '{model_name}' failed. Error: {e}")
f_score_results[model_name][run].append(0.0)
continue
model_path = os.path.join(get_model(model_path), "nlu")
output_path = os.path.join(model_output_path, f"{model_name}_report")
result = run_evaluation(
test_path, model_path, output_directory=output_path, errors=True
)
f1 = result["intent_evaluation"]["f1_score"]
f_score_results[model_name][run].append(f1)
return training_examples_per_run
|
def compare_nlu(
configs: List[Text],
data: TrainingData,
exclusion_percentages: List[int],
f_score_results: Dict[Text, Any],
model_names: List[Text],
output: Text,
runs: int,
) -> List[int]:
"""
Trains and compares multiple NLU models.
For each run and exclusion percentage a model per config file is trained.
Thereby, the model is trained only on the current percentage of training data.
Afterwards, the model is tested on the complete test data of that run.
All results are stored in the provided output directory.
Args:
configs: config files needed for training
data: training data
exclusion_percentages: percentages of training data to exclude during comparison
f_score_results: dictionary of model name to f-score results per run
model_names: names of the models to train
output: the output directory
runs: number of comparison runs
Returns: training examples per run
"""
from rasa.train import train_nlu
training_examples_per_run = []
for run in range(runs):
logger.info("Beginning comparison run {}/{}".format(run + 1, runs))
run_path = os.path.join(output, "run_{}".format(run + 1))
io_utils.create_path(run_path)
test_path = os.path.join(run_path, TEST_DATA_FILE)
io_utils.create_path(test_path)
train, test = data.train_test_split()
write_to_file(test_path, test.nlu_as_markdown())
training_examples_per_run = []
for percentage in exclusion_percentages:
percent_string = f"{percentage}%_exclusion"
_, train = train.train_test_split(percentage / 100)
training_examples_per_run.append(len(train.training_examples))
model_output_path = os.path.join(run_path, percent_string)
train_split_path = os.path.join(model_output_path, "train")
train_nlu_split_path = os.path.join(
train_split_path, TRAIN_DATA_FILE
)
train_nlg_split_path = os.path.join(
model_output_path, "train", NLG_DATA_FILE
)
io_utils.create_path(train_nlu_split_path)
write_to_file(train_nlu_split_path, train.nlu_as_markdown())
write_to_file(train_nlg_split_path, train.nlg_as_markdown())
for nlu_config, model_name in zip(configs, model_names):
logger.info(
"Evaluating configuration '{}' with {} training data.".format(
model_name, percent_string
)
)
try:
model_path = train_nlu(
nlu_config,
train_split_path,
model_output_path,
fixed_model_name=model_name,
)
except Exception as e:
logger.warning(f"Training model '{model_name}' failed. Error: {e}")
f_score_results[model_name][run].append(0.0)
continue
model_path = os.path.join(get_model(model_path), "nlu")
output_path = os.path.join(model_output_path, f"{model_name}_report")
result = run_evaluation(
test_path, model_path, output_directory=output_path, errors=True
)
f1 = result["intent_evaluation"]["f1_score"]
f_score_results[model_name][run].append(f1)
return training_examples_per_run
|
55,650 |
def get_hanning_kernel1d(kernel_size: int, device=torch.device('cpu'), dtype=torch.float) -> torch.Tensor:
r"""Returns Hanning (also known as Hann) kernel, used in signal processing and KCF tracker
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
See further in numpy docs https://numpy.org/doc/stable/reference/generated/numpy.hanning.html
Args:
kernel_size: It should be positive.
Returns:
1D tensor with Hanning filter coefficients.
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
Shape:
- Output: math:`(\text{kernel_size})`
Examples:
>>> get_hanning_kernel1d(4)
tensor([ 0., 0.75, 0.75, 0.])
"""
if not isinstance(kernel_size, int) or kernel_size <= 2:
raise TypeError(f"ksize must be an positive integer > 2. Got {kernel_size}")
x: torch.Tensor = torch.arange(kernel_size, device=device, dtype=dtype)
x = 0.5 - 0.5 * torch.cos(2.0 * pi * x / float(kernel_size - 1))
return x
|
def get_hanning_kernel1d(kernel_size: int, device=torch.device('cpu'), dtype=torch.float) -> torch.Tensor:
r"""Returns Hanning (also known as Hann) kernel, used in signal processing and KCF tracker.
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
See further in numpy docs https://numpy.org/doc/stable/reference/generated/numpy.hanning.html
Args:
kernel_size: It should be positive.
Returns:
1D tensor with Hanning filter coefficients.
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
Shape:
- Output: math:`(\text{kernel_size})`
Examples:
>>> get_hanning_kernel1d(4)
tensor([ 0., 0.75, 0.75, 0.])
"""
if not isinstance(kernel_size, int) or kernel_size <= 2:
raise TypeError(f"ksize must be an positive integer > 2. Got {kernel_size}")
x: torch.Tensor = torch.arange(kernel_size, device=device, dtype=dtype)
x = 0.5 - 0.5 * torch.cos(2.0 * pi * x / float(kernel_size - 1))
return x
|
23,664 |
def get_era5(latitude, longitude, start, end, api_key=None,
variables=ERA5_DEFAULT_VARIABLES,
dataset='reanalysis-era5-single-levels',
product_type='reanalysis', grid=(0.25, 0.25), save_path=None,
cds_client=None, output_format=None, map_variables=True):
"""
Retrieve ERA5 reanalysis data from the Copernicus Data Store (CDS).
* Temporal coverage: 1979 to present (latency of ~5 days)
* Temporal resolution: hourly
* Spatial coverage: global
* Spatial resolution: 0.25° by 0.25°
An overview of ERA5 is given in [1]_ and [2]_. Data is retrieved using the
CDSAPI [3]_.
.. admonition:: Time reference
ERA5 time stamps are in UTC and corresponds to the end of the period
(right labeled). E.g., the time stamp 12:00 for hourly data refers to
the period from 11.00 to 12:00.
.. admonition:: Usage notes
To use this function the package CDSAPI [4]_ needs to be installed
[3]_. The CDSAPI keywords are described in [5]_.
Requested variables should be specified according to the naming
convention used by the CDS. The returned data contains the short-name
versions of the variables. See [2]_ for a list of variables names and
units.
Access to the CDS requires user registration, see [6]_. The obtaining
API key can either be passed directly to the function or be saved in a
local file as described in [3]_.
It is possible to check your
`request status <https://cds.climate.copernicus.eu/cdsapp#!/yourrequests>`_
and the `status of all queued requests <https://cds.climate.copernicus.eu/live/queue>`_.
Parameters
----------
latitude: float or list
in decimal degrees, between -90 and 90, north is positive (ISO 19115).
If latitude is a list, it should have the format [S, N] and
latitudes within the range are selected according to the grid.
longitude: float or list
in decimal degrees, between -180 and 180, east is positive (ISO 19115).
If longitude is a list, it should have the format [W, E] and
longitudes within the range are selected according to the grid.
start: datetime like
First day of the requested period
end: datetime like
Last day of the requested period
api_key: str, optional
Personal API key for the CDS
variables: list, default: ERA5_DEFAULT_VARIABLES
List of variables to retrieve (according to CDS naming convention)
dataset: str, default 'reanalysis-era5-single-levels'
Name of the dataset to retrieve the variables from. Can be either
'reanalysis-era5-single-levels' or 'reanalysis-era5-land'.
product_type: str, {'reanalysis', 'ensemble_members', 'ensemble_mean',
'ensemble_spread'}, default: 'reanalysis'
ERA5 product type
grid: list or tuple, default: (0.25, 0.25)
User specified grid resolution
save_path: str or path-like, optional
Filename of where to save data. Should have ".nc" extension.
cds_client: CDS API client object, optional
CDS API client
output_format: {'dataframe', 'dataset'}, optional
Type of data object to return. Default is to return a pandas DataFrame
if file only contains one location and otherwise return an xarray
Dataset.
map_variables: bool, default: True
When true, renames columns of the DataFrame to pvlib variable names
where applicable. See variable ERA5_VARIABLE_MAP.
Notes
-----
The returned data includes the following fields by default:
======================== ====== =========================================
Key, mapped key Format Description
======================== ====== =========================================
*Mapped field names are returned when the map_variables argument is True*
---------------------------------------------------------------------------
2tm, temp_air float Air temperature at 2 m above ground (K)
u10 float Horizontal airspeed towards east at 10 m [m/s]
v10 float Horizontal airspeed towards north at 10 m [m/s]
sp, pressure float Atmospheric pressure at the ground (Pa)
msdwswrf, ghi float Mean surface downward short-wave radiation flux [W/m^2]
msdwswrfcs, ghi_clear float Mean surface downward short-wave radiation flux, clear sky [W/m^2]
msdrswrf, bhi float Mean surface direct short-wave radiation flux [W/m^2]
msdrswrfcs, bhi_clear float Mean surface direct short-wave radiation flux, clear sky [W/m^2]
======================== ====== =========================================
Returns
-------
data: DataFrame
ERA5 time-series data, fields depend on the requested data. The
returned object is either a pandas DataFrame or an xarray dataset, see
the output_format parameter.
metadata: dict
Metadata for the time-series.
See Also
--------
pvlib.iotools.read_era5
References
----------
.. [1] `ERA5 hourly data on single levels from 1979 to present
<https://cds.climate.copernicus.eu/cdsapp#!/dataset/reanalysis-era5-single-levels?tab=overview>`_
.. [2] `ERA5 data documentation
<https://confluence.ecmwf.int/display/CKB/ERA5%3A+data+documentation>`_
.. [3] `How to use the CDS API
<https://cds.climate.copernicus.eu/api-how-to>`_
.. [4] `CDSAPI source code
<https://github.com/ecmwf/cdsapi>`_
.. [5] `Climate Data Store (CDS) API Keywords
<https://confluence.ecmwf.int/display/CKB/Climate+Data+Store+%28CDS%29+API+Keywords>`_
.. [6] `Climate Data Storage user registration
<https://cds.climate.copernicus.eu/user/register>`_
""" # noqa: E501
if cds_client is None:
cds_client = cdsapi.Client(url=CDSAPI_URL, key=api_key)
# Area is selected by a box made by the four coordinates: [N, W, S, E]
if type(latitude) == list:
area = [latitude[1], longitude[0], latitude[0], longitude[1]]
else:
area = [latitude+0.005, longitude-0.005,
latitude-0.005, longitude+0.005]
params = {
'product_type': product_type,
'format': 'netcdf',
'variable': variables,
'date': start.strftime('%Y-%m-%d') + '/' + end.strftime('%Y-%m-%d'),
'time': ERA5_HOURS,
'grid': grid,
'area': area}
# Retrieve path to the file
file_location = cds_client.retrieve(dataset, params)
# Load file into memory
with requests.get(file_location.location) as res:
# Save the file locally if local_path has been specified
if save_path is not None:
with open(save_path, 'wb') as f:
f.write(res.content)
return read_era5(res.content, map_variables=map_variables,
output_format=output_format)
|
def get_era5(latitude, longitude, start, end, api_key=None,
variables=ERA5_DEFAULT_VARIABLES,
dataset='reanalysis-era5-single-levels',
product_type='reanalysis', grid=(0.25, 0.25), save_path=None,
cds_client=None, output_format=None, map_variables=True):
"""
Retrieve ERA5 reanalysis data from the Copernicus Data Store (CDS).
* Temporal coverage: 1979 to present (latency of ~5 days)
* Temporal resolution: hourly
* Spatial coverage: global
* Spatial resolution: 0.25° by 0.25°
An overview of ERA5 is given in [1]_ and [2]_. Data is retrieved using the
CDSAPI [3]_.
.. admonition:: Time reference
ERA5 time stamps are in UTC and corresponds to the end of the period
(right labeled). E.g., the time stamp 12:00 for hourly data refers to
the period from 11:00 to 12:00.
.. admonition:: Usage notes
To use this function the package CDSAPI [4]_ needs to be installed
[3]_. The CDSAPI keywords are described in [5]_.
Requested variables should be specified according to the naming
convention used by the CDS. The returned data contains the short-name
versions of the variables. See [2]_ for a list of variables names and
units.
Access to the CDS requires user registration, see [6]_. The obtaining
API key can either be passed directly to the function or be saved in a
local file as described in [3]_.
It is possible to check your
`request status <https://cds.climate.copernicus.eu/cdsapp#!/yourrequests>`_
and the `status of all queued requests <https://cds.climate.copernicus.eu/live/queue>`_.
Parameters
----------
latitude: float or list
in decimal degrees, between -90 and 90, north is positive (ISO 19115).
If latitude is a list, it should have the format [S, N] and
latitudes within the range are selected according to the grid.
longitude: float or list
in decimal degrees, between -180 and 180, east is positive (ISO 19115).
If longitude is a list, it should have the format [W, E] and
longitudes within the range are selected according to the grid.
start: datetime like
First day of the requested period
end: datetime like
Last day of the requested period
api_key: str, optional
Personal API key for the CDS
variables: list, default: ERA5_DEFAULT_VARIABLES
List of variables to retrieve (according to CDS naming convention)
dataset: str, default 'reanalysis-era5-single-levels'
Name of the dataset to retrieve the variables from. Can be either
'reanalysis-era5-single-levels' or 'reanalysis-era5-land'.
product_type: str, {'reanalysis', 'ensemble_members', 'ensemble_mean',
'ensemble_spread'}, default: 'reanalysis'
ERA5 product type
grid: list or tuple, default: (0.25, 0.25)
User specified grid resolution
save_path: str or path-like, optional
Filename of where to save data. Should have ".nc" extension.
cds_client: CDS API client object, optional
CDS API client
output_format: {'dataframe', 'dataset'}, optional
Type of data object to return. Default is to return a pandas DataFrame
if file only contains one location and otherwise return an xarray
Dataset.
map_variables: bool, default: True
When true, renames columns of the DataFrame to pvlib variable names
where applicable. See variable ERA5_VARIABLE_MAP.
Notes
-----
The returned data includes the following fields by default:
======================== ====== =========================================
Key, mapped key Format Description
======================== ====== =========================================
*Mapped field names are returned when the map_variables argument is True*
---------------------------------------------------------------------------
2tm, temp_air float Air temperature at 2 m above ground (K)
u10 float Horizontal airspeed towards east at 10 m [m/s]
v10 float Horizontal airspeed towards north at 10 m [m/s]
sp, pressure float Atmospheric pressure at the ground (Pa)
msdwswrf, ghi float Mean surface downward short-wave radiation flux [W/m^2]
msdwswrfcs, ghi_clear float Mean surface downward short-wave radiation flux, clear sky [W/m^2]
msdrswrf, bhi float Mean surface direct short-wave radiation flux [W/m^2]
msdrswrfcs, bhi_clear float Mean surface direct short-wave radiation flux, clear sky [W/m^2]
======================== ====== =========================================
Returns
-------
data: DataFrame
ERA5 time-series data, fields depend on the requested data. The
returned object is either a pandas DataFrame or an xarray dataset, see
the output_format parameter.
metadata: dict
Metadata for the time-series.
See Also
--------
pvlib.iotools.read_era5
References
----------
.. [1] `ERA5 hourly data on single levels from 1979 to present
<https://cds.climate.copernicus.eu/cdsapp#!/dataset/reanalysis-era5-single-levels?tab=overview>`_
.. [2] `ERA5 data documentation
<https://confluence.ecmwf.int/display/CKB/ERA5%3A+data+documentation>`_
.. [3] `How to use the CDS API
<https://cds.climate.copernicus.eu/api-how-to>`_
.. [4] `CDSAPI source code
<https://github.com/ecmwf/cdsapi>`_
.. [5] `Climate Data Store (CDS) API Keywords
<https://confluence.ecmwf.int/display/CKB/Climate+Data+Store+%28CDS%29+API+Keywords>`_
.. [6] `Climate Data Storage user registration
<https://cds.climate.copernicus.eu/user/register>`_
""" # noqa: E501
if cds_client is None:
cds_client = cdsapi.Client(url=CDSAPI_URL, key=api_key)
# Area is selected by a box made by the four coordinates: [N, W, S, E]
if type(latitude) == list:
area = [latitude[1], longitude[0], latitude[0], longitude[1]]
else:
area = [latitude+0.005, longitude-0.005,
latitude-0.005, longitude+0.005]
params = {
'product_type': product_type,
'format': 'netcdf',
'variable': variables,
'date': start.strftime('%Y-%m-%d') + '/' + end.strftime('%Y-%m-%d'),
'time': ERA5_HOURS,
'grid': grid,
'area': area}
# Retrieve path to the file
file_location = cds_client.retrieve(dataset, params)
# Load file into memory
with requests.get(file_location.location) as res:
# Save the file locally if local_path has been specified
if save_path is not None:
with open(save_path, 'wb') as f:
f.write(res.content)
return read_era5(res.content, map_variables=map_variables,
output_format=output_format)
|
40,748 |
def test_wrong_input_shapes():
m = MeanAbsoluteRelativeError()
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4, 1), torch.rand(4,)))
|
def test_wrong_input_shapes():
m = MeanAbsoluteRelativeError()
with raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4, 1), torch.rand(4,)))
|
10,534 |
def version(prog=None):
""" return ansible version """
if prog:
result = ["{0} [core {1}] ".format(prog, __version__)]
else:
result = [__version__]
gitinfo = _gitinfo()
if gitinfo:
result[0] = "{0} {1}".format(result[0], gitinfo)
result.append(" config file = %s" % C.CONFIG_FILE)
if C.DEFAULT_MODULE_PATH is None:
cpath = "Default w/o overrides"
else:
cpath = C.DEFAULT_MODULE_PATH
result.append(" configured module search path = %s" % cpath)
result.append(" ansible python module location = %s" % ':'.join(ansible.__path__))
result.append(" ansible collection location = %s" % ':'.join(C.COLLECTIONS_PATHS))
result.append(" executable location = %s" % sys.argv[0])
result.append(" python version = %s" % ''.join(sys.version.splitlines()))
result.append(" libyaml = %s" % HAS_LIBYAML)
return "\n".join(result)
|
def version(prog=None):
""" return ansible version """
if prog:
result = ["{0} [core {1}] ".format(prog, __version__)]
else:
result = ["{name} [core {ver}] ".format(name=prog, ver=__version__)]
gitinfo = _gitinfo()
if gitinfo:
result[0] = "{0} {1}".format(result[0], gitinfo)
result.append(" config file = %s" % C.CONFIG_FILE)
if C.DEFAULT_MODULE_PATH is None:
cpath = "Default w/o overrides"
else:
cpath = C.DEFAULT_MODULE_PATH
result.append(" configured module search path = %s" % cpath)
result.append(" ansible python module location = %s" % ':'.join(ansible.__path__))
result.append(" ansible collection location = %s" % ':'.join(C.COLLECTIONS_PATHS))
result.append(" executable location = %s" % sys.argv[0])
result.append(" python version = %s" % ''.join(sys.version.splitlines()))
result.append(" libyaml = %s" % HAS_LIBYAML)
return "\n".join(result)
|
47,302 |
def to_py_obj(obj):
"""
Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a python list.
"""
if isinstance(obj, (dict, UserDict)):
return {k: to_py_obj(v) for k, v in obj.items()}
elif isinstance(obj, (list, tuple)):
return [to_py_obj(o) for o in obj]
elif is_tf_available() and _is_tensorflow(obj):
return obj.numpy().tolist()
elif is_torch_available() and _is_torch(obj):
return obj.detach().cpu().tolist()
elif is_flax_available() and _is_jax(obj):
return np.asarray(obj).tolist()
elif isinstance(obj, (np.ndarray, np.int32, np.int64)):
return obj.tolist()
else:
return obj
|
def to_py_obj(obj):
"""
Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a python list.
"""
if isinstance(obj, (dict, UserDict)):
return {k: to_py_obj(v) for k, v in obj.items()}
elif isinstance(obj, (list, tuple)):
return [to_py_obj(o) for o in obj]
elif is_tf_available() and _is_tensorflow(obj):
return obj.numpy().tolist()
elif is_torch_available() and _is_torch(obj):
return obj.detach().cpu().tolist()
elif is_flax_available() and _is_jax(obj):
return np.asarray(obj).tolist()
elif isinstance(obj, (np.ndarray, np.number)): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
|
59,217 |
def getmembers_static(object, predicate=None):
"""Return all members of an object as (name, value) pairs sorted by name
without calling properties and other dynamic. Optionally, only return
members that satisfy a given predicate."""
return _getmembers(object, predicate, getattr_static)
|
def getmembers_static(object, predicate=None):
"""Return all members of an object as (name, value) pairs sorted by name
without triggering dynamic lookup via the descriptor protocol,
__getattr__ or __getattribute__. Optionally, only return members that
satisfy a given predicate.
Note: this function may not be able to retrieve all members
that getmembers can fetch (like dynamically created attributes)
and may find members that getmembers can't (like descriptors
that raise AttributeError). It can also return descriptor objects
instead of instance members in some cases.
"""
return _getmembers(object, predicate, getattr_static)
|
50,164 |
def bom_aware_open(filename, encoding="utf-8", mode="r", **kwargs):
assert "b" not in mode, "binary mode not support by bom_aware_open"
if encoding in {"utf-8", "utf8"} and "r" in mode:
encoding = "utf-8-sig"
return io.open(filename, encoding=encoding, mode=mode, **kwargs)
|
def bom_aware_open(filename, encoding="utf-8", mode="r", **kwargs):
assert "b" not in mode, "binary mode not support by bom_aware_open"
import codecs
codec = codecs.lookup(encoding)
potential_bom_attribute = "BOM_" + codec.name.replace("utf-", "utf").upper()
if hasattr(codecs, potential_bom_attribute):
encoding = codec.name + "-sig"
return io.open(filename, encoding=encoding, mode=mode, **kwargs)
|
4,731 |
def do_constrained_layout(fig, renderer, h_pad, w_pad,
hspace=None, wspace=None):
"""
Do the constrained_layout. Called at draw time in
``figure.constrained_layout()``
Parameters
----------
fig : Figure
is the ``figure`` instance to do the layout in.
renderer : Renderer
the renderer to use.
h_pad, w_pad : float
are in figure-normalized units, and are a padding around the axes
elements.
hspace, wspace : float
are in fractions of the subplot sizes.
"""
''' Steps:
1. get a list of unique gridspecs in this figure. Each gridspec will be
constrained separately.
2. Check for gaps in the gridspecs. i.e. if not every axes slot in the
gridspec has been filled. If empty, add a ghost axis that is made so
that it cannot be seen (though visible=True). This is needed to make
a blank spot in the layout.
3. Compare the tight_bbox of each axes to its `position`, and assume that
the difference is the space needed by the elements around the edge of
the axes (decorations) like the title, ticklabels, x-labels, etc. This
can include legends who overspill the axes boundaries.
4. Constrain gridspec elements to line up:
a) if colnum0 != colnumC, the two subplotspecs are stacked next to
each other, with the appropriate order.
b) if colnum0 == colnumC, line up the left or right side of the
_poslayoutbox (depending if it is the min or max num that is equal).
c) do the same for rows...
5. The above doesn't constrain relative sizes of the _poslayoutboxes at
all, and indeed zero-size is a solution that the solver often finds more
convenient than expanding the sizes. Right now the solution is to compare
subplotspec sizes (i.e. drowsC and drows0) and constrain the larger
_poslayoutbox to be larger than the ratio of the sizes. i.e. if drows0 >
drowsC, then ax._poslayoutbox > axc._poslayoutbox * drowsC / drows0. This
works fine *if* the decorations are similar between the axes. If the
larger subplotspec has much larger axes decorations, then the constraint
above is incorrect.
We need the greater than in the above, in general, rather than an equals
sign. Consider the case of the left column having 2 rows, and the right
column having 1 row. We want the top and bottom of the _poslayoutboxes to
line up. So that means if there are decorations on the left column axes
they will be smaller than half as large as the right hand axis.
This can break down if the decoration size for the right hand axis (the
margins) is very large. There must be a math way to check for this case.
'''
invTransFig = fig.transFigure.inverted().transform_bbox
# list of unique gridspecs that contain child axes:
gss = set()
for ax in fig.axes:
if hasattr(ax, 'get_subplotspec'):
gs = ax.get_subplotspec().get_gridspec()
if gs._layoutbox is not None:
gss.add(gs)
if len(gss) == 0:
cbook._warn_external('There are no gridspecs with layoutboxes. '
'Possibly did not call parent GridSpec with the'
' figure= keyword')
if fig._layoutbox.constrained_layout_called < 1:
for gs in gss:
# fill in any empty gridspec slots w/ ghost axes...
_make_ghost_gridspec_slots(fig, gs)
for nnn in range(2):
# do the algorithm twice. This has to be done because decorators
# change size after the first re-position (i.e. x/yticklabels get
# larger/smaller). This second reposition tends to be much milder,
# so doing twice makes things work OK.
for ax in fig.axes:
_log.debug(ax._layoutbox)
if ax._layoutbox is not None:
# make margins for each layout box based on the size of
# the decorators.
_make_layout_margins(ax, renderer, h_pad, w_pad)
# do layout for suptitle.
do_sup = (fig._suptitle is not None and
fig._suptitle._layoutbox is not None)
if do_sup:
sup = fig._suptitle
bbox = invTransFig(sup.get_window_extent(renderer=renderer))
height = bbox.y1 - bbox.y0
if np.isfinite(height):
sup._layoutbox.edit_height(height + h_pad * 2)
# OK, the above lines up ax._poslayoutbox with ax._layoutbox
# now we need to
# 1) arrange the subplotspecs. We do it at this level because
# the subplotspecs are meant to contain other dependent axes
# like colorbars or legends.
# 2) line up the right and left side of the ax._poslayoutbox
# that have the same subplotspec maxes.
if fig._layoutbox.constrained_layout_called < 1:
# arrange the subplotspecs... This is all done relative to each
# other. Some subplotspecs contain axes, and others contain
# gridspecs the ones that contain gridspecs are a set proportion
# of their parent gridspec. The ones that contain axes are
# not so constrained.
figlb = fig._layoutbox
for child in figlb.children:
if child._is_gridspec_layoutbox():
# This routine makes all the subplot spec containers
# have the correct arrangement. It just stacks the
# subplot layoutboxes in the correct order...
_arrange_subplotspecs(child, hspace=hspace, wspace=wspace)
for gs in gss:
_align_spines(fig, gs)
fig._layoutbox.constrained_layout_called += 1
fig._layoutbox.update_variables()
# check if any axes collapsed to zero. If not, don't change positions:
if _axes_all_finite_sized(fig):
# Now set the position of the axes...
for ax in fig.axes:
if ax._layoutbox is not None:
newpos = ax._poslayoutbox.get_rect()
# Now set the new position.
# ax.set_position will zero out the layout for
# this axis, allowing users to hard-code the position,
# so this does the same w/o zeroing layout.
ax._set_position(newpos, which='original')
if do_sup:
newpos = sup._layoutbox.get_rect()
sup.set_y(newpos[1] + newpos[3] - h_pad)
else:
cbook._warn_external('constrained_layout not applied. At least '
'one axes collapsed to zero width or height.')
|
def do_constrained_layout(fig, renderer, h_pad, w_pad,
hspace=None, wspace=None):
"""
Do the constrained_layout. Called at draw time in
``figure.constrained_layout()``
Parameters
----------
fig : Figure
is the ``figure`` instance to do the layout in.
renderer : Renderer
the renderer to use.
h_pad, w_pad : float
are in figure-normalized units, and are a padding around the axes
elements.
hspace, wspace : float
are in fractions of the subplot sizes.
"""
''' Steps:
1. get a list of unique gridspecs in this figure. Each gridspec will be
constrained separately.
2. Check for gaps in the gridspecs. i.e. if not every axes slot in the
gridspec has been filled. If empty, add a ghost axis that is made so
that it cannot be seen (though visible=True). This is needed to make
a blank spot in the layout.
3. Compare the tight_bbox of each axes to its `position`, and assume that
the difference is the space needed by the elements around the edge of
the axes (decorations) like the title, ticklabels, x-labels, etc. This
can include legends who overspill the axes boundaries.
4. Constrain gridspec elements to line up:
a) if colnum0 != colnumC, the two subplotspecs are stacked next to
each other, with the appropriate order.
b) if colnum0 == colnumC, line up the left or right side of the
_poslayoutbox (depending if it is the min or max num that is equal).
c) do the same for rows...
5. The above doesn't constrain relative sizes of the _poslayoutboxes at
all, and indeed zero-size is a solution that the solver often finds more
convenient than expanding the sizes. Right now the solution is to compare
subplotspec sizes (i.e. drowsC and drows0) and constrain the larger
_poslayoutbox to be larger than the ratio of the sizes. i.e. if drows0 >
drowsC, then ax._poslayoutbox > axc._poslayoutbox * drowsC / drows0. This
works fine *if* the decorations are similar between the axes. If the
larger subplotspec has much larger axes decorations, then the constraint
above is incorrect.
We need the greater than in the above, in general, rather than an equals
sign. Consider the case of the left column having 2 rows, and the right
column having 1 row. We want the top and bottom of the _poslayoutboxes to
line up. So that means if there are decorations on the left column axes
they will be smaller than half as large as the right hand axis.
This can break down if the decoration size for the right hand axis (the
margins) is very large. There must be a math way to check for this case.
'''
invTransFig = fig.transFigure.inverted().transform_bbox
# list of unique gridspecs that contain child axes:
gss = set()
for ax in fig.axes:
if hasattr(ax, 'get_subplotspec'):
gs = ax.get_subplotspec().get_gridspec()
if gs._layoutbox is not None:
gss.add(gs)
if len(gss) == 0:
cbook._warn_external('There are no gridspecs with layoutboxes. '
'Possibly did not call parent GridSpec with the'
' figure= keyword')
if fig._layoutbox.constrained_layout_called < 1:
for gs in gss:
# fill in any empty gridspec slots w/ ghost axes...
_make_ghost_gridspec_slots(fig, gs)
for nnn in range(2):
# do the algorithm twice. This has to be done because decorators
# change size after the first re-position (i.e. x/yticklabels get
# larger/smaller). This second reposition tends to be much milder,
# so doing twice makes things work OK.
for ax in fig.axes:
_log.debug(ax._layoutbox)
if ax._layoutbox is not None:
# make margins for each layout box based on the size of
# the decorators.
_make_layout_margins(ax, renderer, h_pad, w_pad)
# do layout for suptitle.
do_suptitle = (fig._suptitle is not None and
fig._suptitle._layoutbox is not None)
if do_sup:
sup = fig._suptitle
bbox = invTransFig(sup.get_window_extent(renderer=renderer))
height = bbox.y1 - bbox.y0
if np.isfinite(height):
sup._layoutbox.edit_height(height + h_pad * 2)
# OK, the above lines up ax._poslayoutbox with ax._layoutbox
# now we need to
# 1) arrange the subplotspecs. We do it at this level because
# the subplotspecs are meant to contain other dependent axes
# like colorbars or legends.
# 2) line up the right and left side of the ax._poslayoutbox
# that have the same subplotspec maxes.
if fig._layoutbox.constrained_layout_called < 1:
# arrange the subplotspecs... This is all done relative to each
# other. Some subplotspecs contain axes, and others contain
# gridspecs the ones that contain gridspecs are a set proportion
# of their parent gridspec. The ones that contain axes are
# not so constrained.
figlb = fig._layoutbox
for child in figlb.children:
if child._is_gridspec_layoutbox():
# This routine makes all the subplot spec containers
# have the correct arrangement. It just stacks the
# subplot layoutboxes in the correct order...
_arrange_subplotspecs(child, hspace=hspace, wspace=wspace)
for gs in gss:
_align_spines(fig, gs)
fig._layoutbox.constrained_layout_called += 1
fig._layoutbox.update_variables()
# check if any axes collapsed to zero. If not, don't change positions:
if _axes_all_finite_sized(fig):
# Now set the position of the axes...
for ax in fig.axes:
if ax._layoutbox is not None:
newpos = ax._poslayoutbox.get_rect()
# Now set the new position.
# ax.set_position will zero out the layout for
# this axis, allowing users to hard-code the position,
# so this does the same w/o zeroing layout.
ax._set_position(newpos, which='original')
if do_sup:
newpos = sup._layoutbox.get_rect()
sup.set_y(newpos[1] + newpos[3] - h_pad)
else:
cbook._warn_external('constrained_layout not applied. At least '
'one axes collapsed to zero width or height.')
|
30,918 |
def get_indicators_command(client: Client, insight_category: list, insight_data_type: list, tlp_color: Optional[str],
args: dict) -> List[Dict]:
"""Create indicators.
Arguments:
client {Client} -- Client derives from BaseClient.
insight_category {List[String]} -- List of SafeBreach insight category - using as filter.
insight_data_type {List[String]} -- List of data types - using as filter.
tlp_color {str}: Traffic Light Protocol color.
Keyword Arguments:
Returns:
List[Dict] -- List of insights from SafeBreach
"""
indicator_limit_param = demisto.params().get('indicatorLimit')
if indicator_limit_param == '':
indicator_limit_param = 1000
limit: int = int(args.get('limit') or indicator_limit_param)
indicators: List[Dict] = []
count: int = 0
# These variable be filled directly from the integration configuration or as arguments.
insight_category, insight_data_type = get_category_and_data_type_filters(args, insight_category,
insight_data_type)
# Convert category into insight id
insights_ids: Any = get_insights_ids_by_category(insight_category)
raw_insights: Any = client.get_insights().json()
# Filter insight by category
insights: Any = list([item for item in raw_insights if int(item.get('ruleId')) in insights_ids])
for insight in insights:
# Fetch remediation data for each insight
processed_data: List[Dict[str, Any]] = get_remediation_data_command(client,
{'insightId': insight.get('ruleId')}, False)
for item in processed_data:
# if the data type is not in the filter data types continue,
if INDICATOR_TYPE_SB_TO_DEMISTO_MAPPER.get(item['type']) not in insight_data_type:
continue
if not INDICATOR_TYPE_MAPPER.get(str(item['type'])) or item["value"] == 'N/A':
continue
if isinstance(item['type'], int):
demisto.info('Data type is int', item['type'], insight['ruleId'])
is_behaveioral = item['type'] not in ['Domain', 'FQDN/IP', 'SHA256', 'URI', 'Hash']
score_behavioral_reputation = DEMISTO_INDICATOR_REPUTATION.get(demisto.params().get('behavioralReputation'))
score_non_behavioral_reputation = DEMISTO_INDICATOR_REPUTATION.get(
demisto.params().get('nonBehavioralReputation'))
raw_json = {
'value': str(item["value"]),
'dataType': item['type'],
'insightId': insight.get('ruleId'),
'insightTime': insight.get('maxExecutionTime'),
}
mapping = {
'description': 'SafeBreach Insight - {0}'.format(insight['actionBasedTitle']),
item['type'].lower(): item["value"],
"safebreachinsightids": str(insight.get('ruleId')),
"safebreachseverity": insight.get('severity'),
"safebreachseverityscore": str(insight.get('severityScore')),
"safebreachisbehavioral": is_behaveioral,
"safebreachattackids": list(map(str, insight.get('attacks'))),
'tags': [
f"SafeBreachInsightId: {insight.get('ruleId')}",
]
}
if tlp_color:
mapping['trafficlightprotocol'] = tlp_color
mapping['tags'] = list((set(mapping['tags'])).union(set(client.tags)))
indicator = {
'value': str(item["value"]),
'type': INDICATOR_TYPE_MAPPER.get(str(item['type'])),
'rawJSON': raw_json,
'fields': mapping,
'score': score_behavioral_reputation if is_behaveioral else score_non_behavioral_reputation
}
if is_ip(item["value"]):
indicator['type'] = FeedIndicatorType.IP
count += 1
if count > limit:
return indicators
indicators.append(indicator)
return indicators
|
def get_indicators_command(client: Client, insight_category: list, insight_data_type: list, tlp_color: Optional[str],
args: dict) -> List[Dict]:
"""Create indicators.
Arguments:
client {Client} -- Client derives from BaseClient.
insight_category {List[String]} -- List of SafeBreach insight category - using as filter.
insight_data_type {List[String]} -- List of data types - using as filter.
tlp_color {str}: Traffic Light Protocol color.
Keyword Arguments:
Returns:
List[Dict] -- List of insights from SafeBreach
"""
indicator_limit_param = demisto.params().get('indicatorLimit', 200)
limit: int = int(args.get('limit') or indicator_limit_param)
indicators: List[Dict] = []
count: int = 0
# These variable be filled directly from the integration configuration or as arguments.
insight_category, insight_data_type = get_category_and_data_type_filters(args, insight_category,
insight_data_type)
# Convert category into insight id
insights_ids: Any = get_insights_ids_by_category(insight_category)
raw_insights: Any = client.get_insights().json()
# Filter insight by category
insights: Any = list([item for item in raw_insights if int(item.get('ruleId')) in insights_ids])
for insight in insights:
# Fetch remediation data for each insight
processed_data: List[Dict[str, Any]] = get_remediation_data_command(client,
{'insightId': insight.get('ruleId')}, False)
for item in processed_data:
# if the data type is not in the filter data types continue,
if INDICATOR_TYPE_SB_TO_DEMISTO_MAPPER.get(item['type']) not in insight_data_type:
continue
if not INDICATOR_TYPE_MAPPER.get(str(item['type'])) or item["value"] == 'N/A':
continue
if isinstance(item['type'], int):
demisto.info('Data type is int', item['type'], insight['ruleId'])
is_behaveioral = item['type'] not in ['Domain', 'FQDN/IP', 'SHA256', 'URI', 'Hash']
score_behavioral_reputation = DEMISTO_INDICATOR_REPUTATION.get(demisto.params().get('behavioralReputation'))
score_non_behavioral_reputation = DEMISTO_INDICATOR_REPUTATION.get(
demisto.params().get('nonBehavioralReputation'))
raw_json = {
'value': str(item["value"]),
'dataType': item['type'],
'insightId': insight.get('ruleId'),
'insightTime': insight.get('maxExecutionTime'),
}
mapping = {
'description': 'SafeBreach Insight - {0}'.format(insight['actionBasedTitle']),
item['type'].lower(): item["value"],
"safebreachinsightids": str(insight.get('ruleId')),
"safebreachseverity": insight.get('severity'),
"safebreachseverityscore": str(insight.get('severityScore')),
"safebreachisbehavioral": is_behaveioral,
"safebreachattackids": list(map(str, insight.get('attacks'))),
'tags': [
f"SafeBreachInsightId: {insight.get('ruleId')}",
]
}
if tlp_color:
mapping['trafficlightprotocol'] = tlp_color
mapping['tags'] = list((set(mapping['tags'])).union(set(client.tags)))
indicator = {
'value': str(item["value"]),
'type': INDICATOR_TYPE_MAPPER.get(str(item['type'])),
'rawJSON': raw_json,
'fields': mapping,
'score': score_behavioral_reputation if is_behaveioral else score_non_behavioral_reputation
}
if is_ip(item["value"]):
indicator['type'] = FeedIndicatorType.IP
count += 1
if count > limit:
return indicators
indicators.append(indicator)
return indicators
|
5,619 |
def _merge_clusters(peaks_above, peaks_below, runs=0):
"""
Generator that merges naive peak clusters according to some declustering
rule.
Parameters
----------
peaks_above : sequence
Sequence of arrays with indecies of peaks above a thershold within a
cluster. Assumes that clusters and indecies are sorted.
peaks_below : sequence
Sequence of arrays with indecies of peaks below a thershold within a
cluster. Assumes that clusters and indecies are sorted.
runs : int
Minimum required number of peaks below a threshold to sustain a
cluster. If number of peaks below a threshold is lower than ``runs``
then the cluster is merged with the subsequent cluster.
Yields
------
peaks : array
Array with indecies of peaks above a threshold within a (merged)
cluster.
"""
buffer = []
for pa, pb in zip(peaks_above, peaks_below):
if len(pb) >= runs:
if not buffer:
yield pa
else:
buffer.append(pa)
yield np.concatenate(buffer)
buffer = []
else:
buffer.append(pa)
if buffer:
yield np.concatenate(buffer)
|
def _merge_clusters(peaks_above, peaks_below, runs=0):
"""
Generator that merges naive peak clusters according to some declustering
rule.
Parameters
----------
peaks_above : sequence
Sequence of arrays with indecies of peaks above a thershold within a
cluster. Assumes that clusters and indecies are sorted.
peaks_below : sequence
Sequence of arrays with indecies of peaks below a thershold within a
cluster. Assumes that clusters and indecies are sorted.
runs : int
Minimum required number of peaks below a threshold to sustain a
cluster. If number of peaks below a threshold is lower than ``runs``
then the cluster is merged with the subsequent cluster.
Yields
------
peaks : array
Array with indices of peaks above a threshold within a (merged)
cluster.
"""
buffer = []
for pa, pb in zip(peaks_above, peaks_below):
if len(pb) >= runs:
if not buffer:
yield pa
else:
buffer.append(pa)
yield np.concatenate(buffer)
buffer = []
else:
buffer.append(pa)
if buffer:
yield np.concatenate(buffer)
|
55,051 |
def out_flow_constraint(graph: nx.DiGraph) -> qml.Hamiltonian:
r"""Calculates the Hamiltonian which imposes the constraint that each node has
an outflow of at most one.
The out flow constraint is, for all :math:`i`:
.. math:: \sum_{j,(i,j)\in E}x_{ij} \leq 1,
where :math:`E` are the edges of the graph and :math:`x_{ij}` is a binary number that selects
whether to include the edge :math:`(i, j)`.
The corresponding qubit Hamiltonian is:
.. math::
\frac{1}{4}\sum_{i\in V}\left(d_{i}^{out}(d_{i}^{out} - 2)\mathbb{I}
- 2(d_{i}^{out}-1)\sum_{j,(i,j)\in E}\hat{Z}_{ij} +
\left( \sum_{j,(i,j)\in E}\hat{Z}_{ij} \right)^{2}\right)
where :math:`V` are the graph vertices and :math:`Z_{ij}` is a qubit Pauli-Z matrix acting
upon the qubit specified by the pair :math:`(i, j)`. Note that this function omits the
:math:`1/4` constant factor.
This Hamiltonian is minimized by selecting edges such that each node has an outflow of at most one.
Args:
graph (nx.DiGraph): the graph specifying possible edges
Returns:
qml.Hamiltonian: the out flow constraint Hamiltonian
Raises:
ValueError: if the input graph is not directed
"""
if not hasattr(graph, "out_edges"):
raise ValueError("Input graph must be directed")
hamiltonian = qml.Hamiltonian([], [])
for node in graph.nodes:
hamiltonian += _inner_out_flow_constraint_hamiltonian(graph, node)
return hamiltonian
|
def out_flow_constraint(graph: nx.DiGraph) -> qml.Hamiltonian:
r"""Calculates the Hamiltonian which imposes the constraint that each node has
an outflow of at most one.
The out flow constraint is, for all :math:`i`:
.. math:: \sum_{j,(i,j)\in E}x_{ij} \leq 1,
where :math:`E` are the edges of the graph and :math:`x_{ij}` is a binary number that selects
whether to include the edge :math:`(i, j)`.
The corresponding qubit Hamiltonian is:
.. math::
\frac{1}{4}\sum_{i\in V}\left(d_{i}^{out}(d_{i}^{out} - 2)\mathbb{I}
- 2(d_{i}^{out}-1)\sum_{j,(i,j)\in E}\hat{Z}_{ij} +
\left( \sum_{j,(i,j)\in E}\hat{Z}_{ij} \right)^{2}\right)
where :math:`V` are the graph vertices and :math:`Z_{ij}` is a qubit Pauli-Z matrix acting
upon the qubit specified by the pair :math:`(i, j)`. Note that this function omits the
:math:`1/4` constant factor.
Args:
graph (nx.DiGraph): the graph specifying possible edges
Returns:
qml.Hamiltonian: the out flow constraint Hamiltonian
Raises:
ValueError: if the input graph is not directed
"""
if not hasattr(graph, "out_edges"):
raise ValueError("Input graph must be directed")
hamiltonian = qml.Hamiltonian([], [])
for node in graph.nodes:
hamiltonian += _inner_out_flow_constraint_hamiltonian(graph, node)
return hamiltonian
|
33,746 |
def node_ip_address_from_perspective(address="8.8.8.8:53"):
"""IP address by which the local node can be reached *from* the `address`.
Args:
address (str): The IP address and port of any known live service on the
network you care about.
Returns:
The IP address by which the local node can be reached from the address.
"""
ip_address, port = address.split(":")
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# This command will raise an exception if there is no internet
# connection.
s.connect((ip_address, int(port)))
node_ip_address = s.getsockname()[0]
except OSError as e:
node_ip_address = "127.0.0.1"
# [Errno 101] Network is unreachable
if e.errno == errno.ENETUNREACH:
try:
# try get node ip address from host name
host_name = socket.getfqdn(socket.gethostname())
node_ip_address = socket.gethostbyname(host_name)
except Exception:
pass
finally:
s.close()
return node_ip_address
|
def node_ip_address_from_perspective(address):
"""IP address by which the local node can be reached *from* the `address`.
Args:
address (str): The IP address and port of any known live service on the
network you care about.
Returns:
The IP address by which the local node can be reached from the address.
"""
ip_address, port = address.split(":")
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# This command will raise an exception if there is no internet
# connection.
s.connect((ip_address, int(port)))
node_ip_address = s.getsockname()[0]
except OSError as e:
node_ip_address = "127.0.0.1"
# [Errno 101] Network is unreachable
if e.errno == errno.ENETUNREACH:
try:
# try get node ip address from host name
host_name = socket.getfqdn(socket.gethostname())
node_ip_address = socket.gethostbyname(host_name)
except Exception:
pass
finally:
s.close()
return node_ip_address
|
30,666 |
def account_entry(person_object, custome_attributes):
# create an account entry from a person objects
account = {
'Type': 'AD',
'ID': person_object.get('dn'),
'Email': person_object.get('mail'),
'Username': person_object.get('sAMAccountName'),
'DisplayName': person_object.get('displayName'),
'Managr': person_object.get('manager'),
'Manager': person_object.get('manager'),
'Groups': person_object.get('memberOf')
}
for attr in custome_attributes:
try:
account[attr] = person_object[attr]
except Exception as e:
lower_cased_attr = attr[0].lower() + attr[1:]
if lower_cased_attr in person_object:
account[lower_cased_attr] = person_object[lower_cased_attr]
else:
demisto.debug(f'Failed parsing custom attribute {attr}, error: {e}')
return account
|
def account_entry(person_object, custome_attributes):
# create an account entry from a person objects
account = {
'Type': 'AD',
'ID': person_object.get('dn'),
'Email': person_object.get('mail'),
'Username': person_object.get('sAMAccountName'),
'DisplayName': person_object.get('displayName'),
'Managr': person_object.get('manager'),
'Manager': person_object.get('manager'),
'Groups': person_object.get('memberOf')
}
for attr in custome_attributes:
try:
account[attr] = person_object[attr]
except KeyError as e:
lower_cased_attr = attr[0].lower() + attr[1:]
if lower_cased_attr in person_object:
account[lower_cased_attr] = person_object[lower_cased_attr]
else:
demisto.debug(f'Failed parsing custom attribute {attr}, error: {e}')
return account
|
51,419 |
def _parse_iso8601_with_reso(date_type, timestr):
default = date_type(1, 1, 1)
result = parse_iso8601(timestr)
replace = {}
for attr in ["year", "month", "day", "hour", "minute", "second"]:
value = result.get(attr, None)
if value is not None:
# Note ISO8601 conventions allow for fractional seconds.
# TODO: Consider adding support for sub-second resolution?
replace[attr] = int(value)
resolution = attr
return default.replace(**replace), resolution
|
def _parse_iso8601_with_reso(date_type, timestr):
default = date_type(1, 1, 1)
result = parse_iso8601(timestr)
replace = {}
for attr in ["year", "month", "day", "hour", "minute", "second"]:
value = result.get(attr, None)
if value is not None:
# Note ISO8601 conventions allow for fractional seconds.
# TODO: Consider adding support for sub-second resolution?
replace[attr] = int(value)
resolution = attr
if LooseVersion(cftime.__version__) < LooseVersion("1.0.4"):
# dayofwk=-1 is required to update the dayofwk and dayofyr attributes of
# the returned date object in versions of cftime between 1.0.2 and
# 1.0.3.4. It can be removed for versions of cftime greater than
# 1.0.3.4.
replace["dayofwk"] = -1
return default.replace(**replace), resolution
|
45,898 |
def filter2d(
input: torch.Tensor, kernel: torch.Tensor, border_type: str = 'reflect', normalized: bool = False,
padding: str = 'same'
) -> torch.Tensor:
r"""Convolve a tensor with a 2d kernel.
The function applies a given kernel to a tensor. The kernel is applied
independently at each depth channel of the tensor. Before applying the
kernel, the function applies padding according to the specified mode so
that the output remains in the same shape.
Args:
input: the input tensor with shape of
:math:`(B, C, H, W)`.
kernel: the kernel to be convolved with the input
tensor. The kernel shape must be :math:`(1, kH, kW)` or :math:`(B, kH, kW)`.
border_type: the padding mode to be applied before convolving.
The expected modes are: ``'constant'``, ``'reflect'``,
``'replicate'`` or ``'circular'``.
normalized: If True, kernel will be L1 normalized.
padding: This defines the type of padding.
2 modes available ``'same'`` or ``'valid'``
Return:
torch.Tensor: the convolved tensor of same size and numbers of channels
as the input with shape :math:`(B, C, H, W)`.
Example:
>>> input = torch.tensor([[[
... [0., 0., 0., 0., 0.],
... [0., 0., 0., 0., 0.],
... [0., 0., 5., 0., 0.],
... [0., 0., 0., 0., 0.],
... [0., 0., 0., 0., 0.],]]])
>>> kernel = torch.ones(1, 3, 3)
>>> filter2d(input, kernel, padding='same')
tensor([[[[0., 0., 0., 0., 0.],
[0., 5., 5., 5., 0.],
[0., 5., 5., 5., 0.],
[0., 5., 5., 5., 0.],
[0., 0., 0., 0., 0.]]]])
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f"Input border_type is not torch.Tensor. Got {type(input)}")
if not isinstance(kernel, torch.Tensor):
raise TypeError(f"Input border_type is not torch.Tensor. Got {type(kernel)}")
if not isinstance(border_type, str):
raise TypeError(f"Input border_type is not string. Got {type(kernel)}")
if not isinstance(padding, str):
raise TypeError(f"Input padding is not string. Got {type(padding)}")
if not len(input.shape) == 4:
raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {input.shape}")
if not len(kernel.shape) == 3 and kernel.shape[0] != 1:
raise ValueError(f"Invalid kernel shape, we expect 1xHxW. Got: {kernel.shape}")
# prepare kernel
b, c, h, w = input.shape
tmp_kernel: torch.Tensor = kernel.unsqueeze(1).to(input)
if normalized:
tmp_kernel = normalize_kernel2d(tmp_kernel)
tmp_kernel = tmp_kernel.expand(-1, c, -1, -1)
height, width = tmp_kernel.shape[-2:]
# pad the input tensor
if padding == 'same':
padding_shape: List[int] = _compute_padding([height, width])
input = F.pad(input, padding_shape, mode=border_type)
# kernel and input tensor reshape to align element-wise or batch-wise params
tmp_kernel = tmp_kernel.reshape(-1, 1, height, width)
input = input.view(-1, tmp_kernel.size(0), input.size(-2), input.size(-1))
# convolve the tensor with the kernel.
output = F.conv2d(input, tmp_kernel, groups=tmp_kernel.size(0), padding=0, stride=1)
if padding == 'same':
return output.view(b, c, h, w)
else:
return output.view(b, c, h - height + 1, w - width + 1)
|
def filter2d(
input: torch.Tensor, kernel: torch.Tensor, border_type: str = 'reflect', normalized: bool = False,
padding: str = 'same'
) -> torch.Tensor:
r"""Convolve a tensor with a 2d kernel.
The function applies a given kernel to a tensor. The kernel is applied
independently at each depth channel of the tensor. Before applying the
kernel, the function applies padding according to the specified mode so
that the output remains in the same shape.
Args:
input: the input tensor with shape of
:math:`(B, C, H, W)`.
kernel: the kernel to be convolved with the input
tensor. The kernel shape must be :math:`(1, kH, kW)` or :math:`(B, kH, kW)`.
border_type: the padding mode to be applied before convolving.
The expected modes are: ``'constant'``, ``'reflect'``,
``'replicate'`` or ``'circular'``.
normalized: If True, kernel will be L1 normalized.
padding: This defines the type of padding.
2 modes available ``'same'`` or ``'valid'``.
Return:
torch.Tensor: the convolved tensor of same size and numbers of channels
as the input with shape :math:`(B, C, H, W)`.
Example:
>>> input = torch.tensor([[[
... [0., 0., 0., 0., 0.],
... [0., 0., 0., 0., 0.],
... [0., 0., 5., 0., 0.],
... [0., 0., 0., 0., 0.],
... [0., 0., 0., 0., 0.],]]])
>>> kernel = torch.ones(1, 3, 3)
>>> filter2d(input, kernel, padding='same')
tensor([[[[0., 0., 0., 0., 0.],
[0., 5., 5., 5., 0.],
[0., 5., 5., 5., 0.],
[0., 5., 5., 5., 0.],
[0., 0., 0., 0., 0.]]]])
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f"Input border_type is not torch.Tensor. Got {type(input)}")
if not isinstance(kernel, torch.Tensor):
raise TypeError(f"Input border_type is not torch.Tensor. Got {type(kernel)}")
if not isinstance(border_type, str):
raise TypeError(f"Input border_type is not string. Got {type(kernel)}")
if not isinstance(padding, str):
raise TypeError(f"Input padding is not string. Got {type(padding)}")
if not len(input.shape) == 4:
raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {input.shape}")
if not len(kernel.shape) == 3 and kernel.shape[0] != 1:
raise ValueError(f"Invalid kernel shape, we expect 1xHxW. Got: {kernel.shape}")
# prepare kernel
b, c, h, w = input.shape
tmp_kernel: torch.Tensor = kernel.unsqueeze(1).to(input)
if normalized:
tmp_kernel = normalize_kernel2d(tmp_kernel)
tmp_kernel = tmp_kernel.expand(-1, c, -1, -1)
height, width = tmp_kernel.shape[-2:]
# pad the input tensor
if padding == 'same':
padding_shape: List[int] = _compute_padding([height, width])
input = F.pad(input, padding_shape, mode=border_type)
# kernel and input tensor reshape to align element-wise or batch-wise params
tmp_kernel = tmp_kernel.reshape(-1, 1, height, width)
input = input.view(-1, tmp_kernel.size(0), input.size(-2), input.size(-1))
# convolve the tensor with the kernel.
output = F.conv2d(input, tmp_kernel, groups=tmp_kernel.size(0), padding=0, stride=1)
if padding == 'same':
return output.view(b, c, h, w)
else:
return output.view(b, c, h - height + 1, w - width + 1)
|
48,413 |
def main():
module = AnsibleModule(
argument_spec=dict(
project_id=dict(required=True),
instance_id=dict(required=True),
),
supports_check_mode=True
)
# Get parameters
project_id = module.params.get('project_id')
instance_id = module.params.get('instance_id')
project = ""
instance = ""
result = ""
# Connect to OVH API
client = ovh.Client()
if not HAS_OVH:
module.fail_json(msg='python-ovh is required to run this module, see https://github.com/ovh/python-ovh')
# Check that the instance exists
try:
project = client.get('/cloud/project/{0}'.format(project_id))
except ovh.exceptions.ResourceNotFoundError:
module.fail_json(msg='project {0} does not exist'.format(project_id))
# Check that the instance exists
try:
instance = client.get('/cloud/project/{0}/instance/{1}'.format(project_id, instance_id))
except ovh.exceptions.ResourceNotFoundError:
module.fail_json(msg='instance {0} does not exist in project {1}'.format(instance_id, project_id))
# Is monthlyBilling already enabled or pending ?
if instance['monthlyBilling'] is not None:
if instance['monthlyBilling']['status'] in ['ok', 'activationPending']:
module.exit_json(changed=False, result=instance['monthlyBilling'])
if module.check_mode:
module.exit_json(changed=True, msg="Dry Run!")
else:
try:
result = client.post('/cloud/project/{0}/instance/{1}/activeMonthlyBilling'.format(project_id, instance_id))
module.exit_json(changed=True, result=result['monthlyBilling'])
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
# We should never reach here
module.fail_json(msg='Internal ovh_monthly_billing module error')
|
def main():
module = AnsibleModule(
argument_spec=dict(
project_id=dict(required=True),
instance_id=dict(required=True),
),
supports_check_mode=True
)
# Get parameters
project_id = module.params.get('project_id')
instance_id = module.params.get('instance_id')
project = ""
instance = ""
result = ""
# Connect to OVH API
client = ovh.Client()
if not HAS_OVH:
module.fail_json(msg='python-ovh is required to run this module, see https://github.com/ovh/python-ovh')
# Check that the instance exists
try:
project = client.get('/cloud/project/{0}'.format(project_id))
except ovh.exceptions.ResourceNotFoundError:
module.fail_json(msg='project {0} does not exist'.format(project_id))
# Check that the instance exists
try:
instance = client.get('/cloud/project/{0}/instance/{1}'.format(project_id, instance_id))
except ovh.exceptions.ResourceNotFoundError:
module.fail_json(msg='instance {0} does not exist in project {1}'.format(instance_id, project_id))
# Is monthlyBilling already enabled or pending ?
if instance['monthlyBilling'] is not None:
if instance['monthlyBilling']['status'] in ['ok', 'activationPending']:
module.exit_json(changed=False, result=instance['monthlyBilling'])
if module.check_mode:
module.exit_json(changed=True, msg="Dry Run!")
try:
result = client.post('/cloud/project/{0}/instance/{1}/activeMonthlyBilling'.format(project_id, instance_id))
module.exit_json(changed=True, result=result['monthlyBilling'])
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
# We should never reach here
module.fail_json(msg='Internal ovh_monthly_billing module error')
|
30,796 |
def get_incident_events_command(client: Client, args: dict) -> Tuple:
incident_id = args.get("incident_id", 0)
limit = args.get("limit", 100)
limit = "1000" if int(limit) > 1000 else limit
offset = args.get("offset", 0)
try:
events = client.get_incident_events(incident_id, limit, offset)
except DemistoException as e:
if "429" in e.args[0]:
raise DemistoException(
"The allowed amount of API calls per minute in Illusive Attack Management has exceeded. In case this"
" message repeats, please contact Illusive Networks support")
else:
raise DemistoException("{}".format(e.args[0]))
readable_output = tableToMarkdown('Illusive get incident\'s events', events, metadata="Number of events {}"
.format(len(events)))
outputs = {
'Illusive.Incident(val.incidentId == obj.incidentId)': {
'eventsNumber': len(events),
'incidentId': int(incident_id),
'Event': events
}
}
return (
readable_output,
outputs,
events # raw response - the original response
)
|
def get_incident_events_command(client: Client, args: dict) -> Tuple:
incident_id = args.get("incident_id", 0)
limit = args.get("limit", 100)
limit = "1000" if int(limit) > 1000 else limit
offset = args.get("offset", 0)
try:
events = client.get_incident_events(incident_id, limit, offset)
except DemistoException as e:
if "429" in e.args[0]:
raise DemistoException(
"The allowed amount of API calls per minute in Illusive Attack Management has exceeded. In case this"
" message repeats, please contact Illusive Networks support")
else:
raise DemistoException("{}".format(e.args[0]))
readable_output = tableToMarkdown('Illusive get incident\'s events', events, metadata="Number of events {}"
.format(len(events)))
outputs = {
'Illusive.Incident(val.incidentId == obj.incidentId)': {
'eventsNumber': len(events),
'IncidentID': int(incident_id),
'Event': events
}
}
return (
readable_output,
outputs,
events # raw response - the original response
)
|
13,165 |
def warn(
*,
message: str,
category: Optional[Type] = None,
source: Optional[Any] = None,
) -> None:
"""This `warnings.warn` wrapper function attempts to show the location causing the
warning in the user code that called the library.
It does this by walking up the stack trace to find the first frame located outside
the `gitlab/` directory. This is helpful to users as it shows them their code that
is causing the warning.
"""
# Get `stacklevel` for user code so we indicate where issue is in
# their code.
pg_dir = os.path.abspath(os.path.dirname(__file__))
stack = traceback.extract_stack()
stacklevel = 1
for stacklevel, frame in enumerate(reversed(stack), start=1):
frame_dir = os.path.abspath(os.path.dirname(frame.filename))
if not frame_dir.startswith(pg_dir):
break
warnings.warn(
message=message, category=category, stacklevel=stacklevel, source=source
)
|
def warn(
*,
message: str,
category: Optional[Type] = None,
source: Optional[Any] = None,
) -> None:
"""This `warnings.warn` wrapper function attempts to show the location causing the
warning in the user code that called the library.
It does this by walking up the stack trace to find the first frame located outside
the `gitlab/` directory. This is helpful to users as it shows them their code that
is causing the warning.
"""
# Get `stacklevel` for user code so we indicate where issue is in
# their code.
gitlab_dir = Path(__file__).parent.resolve()
stack = traceback.extract_stack()
stacklevel = 1
for stacklevel, frame in enumerate(reversed(stack), start=1):
frame_dir = os.path.abspath(os.path.dirname(frame.filename))
if not frame_dir.startswith(pg_dir):
break
warnings.warn(
message=message, category=category, stacklevel=stacklevel, source=source
)
|
47,118 |
def main():
# Parse the arguments
args = parse_args()
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
accelerator = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)
else:
data_files = {}
if args.train_file is not None:
data_files["train"] = args.train_file
if args.validation_file is not None:
data_files["validation"] = args.validation_file
extension = args.train_file.split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if args.config_name:
config = AutoConfig.from_pretrained(args.model_name_or_path)
elif args.model_name_or_path:
config = AutoConfig.from_pretrained(args.model_name_or_path)
else:
config = CONFIG_MAPPING[args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=not args.use_slow_tokenizer)
elif args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if args.model_name_or_path:
model = AutoModelForSeq2SeqLM.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForSeq2SeqLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# Set decoder_start_token_id
if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):
assert (args.target_lang is not None and args.source_lang is not None
), "mBart requires --target_lang and --source_lang"
if isinstance(tokenizer, MBartTokenizer):
model.config.decoder_start_token_id = tokenizer.lang_code_to_id[args.target_lang]
else:
model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(args.target_lang)
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
prefix = args.source_prefix if args.source_prefix is not None else ""
# Preprocessing the datasets.
# First we tokenize all the texts.
column_names = raw_datasets["train"].column_names
# For translation we set the codes of our source and target languages (only useful for mBART, the others will
# ignore those attributes).
if isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):
if args.source_lang is not None:
tokenizer.src_lang = args.source_lang
if args.target_lang is not None:
tokenizer.tgt_lang = args.target_lang
# Get the language codes for input/target.
source_lang = args.source_lang.split("_")[0]
target_lang = args.target_lang.split("_")[0]
padding = "max_length" if args.pad_to_max_length else False
# Temporarily set max_target_length for training.
max_target_length = args.max_target_length
padding = "max_length" if args.pad_to_max_length else False
def preprocess_function(examples):
inputs = [ex[source_lang] for ex in examples["translation"]]
targets = [ex[target_lang] for ex in examples["translation"]]
inputs = [prefix + inp for inp in inputs]
model_inputs = tokenizer(inputs, max_length=args.max_source_length, padding=padding, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True)
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
if padding == "max_length" and args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
processed_datasets = raw_datasets.map(
preprocess_function,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
)
train_dataset = processed_datasets["train"]
eval_dataset = processed_datasets["validation"]
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# DataLoaders creation:
label_pad_token_id = -100 if args.ignore_pad_token_for_loss else tokenizer.pad_token_id
if args.pad_to_max_length:
# If padding was already done ot max length, we use the default data collator that will just convert everything
# to tensors.
data_collator = default_data_collator
else:
# Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of
# the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple
# of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if accelerator.use_fp16 else None,
)
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size
)
eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size)
# Optimizer
# Split weights in two groups, one with weight decay and the other not.
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
# Prepare everything with our `accelerator`.
model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader
)
# Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be
# shorter in multiprocess)
# Scheduler and math around the number of training steps.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
else:
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.max_train_steps,
)
metric = load_metric("sacrebleu")
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [[label.strip()] for label in labels]
return preds, labels
# Train!
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
completed_steps = 0
for epoch in range(args.num_train_epochs):
model.train()
for step, batch in enumerate(train_dataloader):
outputs = model(**batch)
loss = outputs.loss
loss = loss / args.gradient_accumulation_steps
accelerator.backward(loss)
if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
completed_steps += 1
if completed_steps >= args.max_train_steps:
break
model.eval()
if args.val_max_target_length is None:
args.val_max_target_length = args.max_target_length
gen_kwargs = {
"max_length": args.val_max_target_length if args is not None else config.max_length,
"num_beams": args.num_beams,
}
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
generated_tokens = model.generate(
batch["input_ids"],
attention_mask=batch["attention_mask"],
**gen_kwargs,
)
generated_tokens = accelerator.pad_across_processes(generated_tokens, dim=1,
pad_index=tokenizer.pad_token_id)
labels = batch["labels"]
if not args.pad_to_max_length:
# If we did not pad to max length, we need to pad the labels too
labels = accelerator.pad_across_processes(batch["labels"], dim=1, pad_index=tokenizer.pad_token_id)
generated_tokens = accelerator.gather(generated_tokens).cpu().numpy()
labels = accelerator.gather(labels).cpu().numpy()
if args.ignore_pad_token_for_loss:
# Replace -100 in the labels as we can't decode them.
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
metric.add_batch(
predictions=accelerator.gather(decoded_preds),
references=accelerator.gather(decoded_labels),
)
eval_metric = metric.compute()
logger.info(f"epoch {epoch}: {eval_metric}")
if args.output_dir is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
|
def main():
# Parse the arguments
args = parse_args()
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
accelerator = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)
else:
data_files = {}
if args.train_file is not None:
data_files["train"] = args.train_file
if args.validation_file is not None:
data_files["validation"] = args.validation_file
extension = args.train_file.split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if args.config_name:
config = AutoConfig.from_pretrained(args.model_name_or_path)
elif args.model_name_or_path:
config = AutoConfig.from_pretrained(args.model_name_or_path)
else:
config = CONFIG_MAPPING[args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=not args.use_slow_tokenizer)
elif args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if args.model_name_or_path:
model = AutoModelForSeq2SeqLM.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForSeq2SeqLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# Set decoder_start_token_id
if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):
assert (args.target_lang is not None and args.source_lang is not None
), "mBart requires --target_lang and --source_lang"
if isinstance(tokenizer, MBartTokenizer):
model.config.decoder_start_token_id = tokenizer.lang_code_to_id[args.target_lang]
else:
model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(args.target_lang)
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
prefix = args.source_prefix if args.source_prefix is not None else ""
# Preprocessing the datasets.
# First we tokenize all the texts.
column_names = raw_datasets["train"].column_names
# For translation we set the codes of our source and target languages (only useful for mBART, the others will
# ignore those attributes).
if isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):
if args.source_lang is not None:
tokenizer.src_lang = args.source_lang
if args.target_lang is not None:
tokenizer.tgt_lang = args.target_lang
# Get the language codes for input/target.
source_lang = args.source_lang.split("_")[0]
target_lang = args.target_lang.split("_")[0]
padding = "max_length" if args.pad_to_max_length else False
# Temporarily set max_target_length for training.
max_target_length = args.max_target_length
padding = "max_length" if args.pad_to_max_length else False
def preprocess_function(examples):
inputs = [ex[source_lang] for ex in examples["translation"]]
targets = [ex[target_lang] for ex in examples["translation"]]
inputs = [prefix + inp for inp in inputs]
model_inputs = tokenizer(inputs, max_length=args.max_source_length, padding=padding, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True)
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
if padding == "max_length" and args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
processed_datasets = raw_datasets.map(
preprocess_function,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
)
train_dataset = processed_datasets["train"]
eval_dataset = processed_datasets["validation"]
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# DataLoaders creation:
label_pad_token_id = -100 if args.ignore_pad_token_for_loss else tokenizer.pad_token_id
if args.pad_to_max_length:
# If padding was already done ot max length, we use the default data collator that will just convert everything
# to tensors.
data_collator = default_data_collator
else:
# Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of
# the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple
# of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if accelerator.use_fp16 else None,
)
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size
)
eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size)
# Optimizer
# Split weights in two groups, one with weight decay and the other not.
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
# Prepare everything with our `accelerator`.
model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader
)
# Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be
# shorter in multiprocess)
# Scheduler and math around the number of training steps.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
else:
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.max_train_steps,
)
metric = load_metric("sacrebleu")
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [[label.strip()] for label in labels]
return preds, labels
# Train!
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
completed_steps = 0
for epoch in range(args.num_train_epochs):
model.train()
for step, batch in enumerate(train_dataloader):
outputs = model(**batch)
loss = outputs.loss
loss = loss / args.gradient_accumulation_steps
accelerator.backward(loss)
if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
completed_steps += 1
if completed_steps >= args.max_train_steps:
break
model.eval()
if args.val_max_target_length is None:
args.val_max_target_length = args.max_target_length
gen_kwargs = {
"max_length": args.val_max_target_length if args is not None else config.max_length,
"num_beams": args.num_beams,
}
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
generated_tokens = accelerator.unwrap_model(model).generate(
batch["input_ids"],
attention_mask=batch["attention_mask"],
**gen_kwargs,
)
generated_tokens = accelerator.pad_across_processes(generated_tokens, dim=1,
pad_index=tokenizer.pad_token_id)
labels = batch["labels"]
if not args.pad_to_max_length:
# If we did not pad to max length, we need to pad the labels too
labels = accelerator.pad_across_processes(batch["labels"], dim=1, pad_index=tokenizer.pad_token_id)
generated_tokens = accelerator.gather(generated_tokens).cpu().numpy()
labels = accelerator.gather(labels).cpu().numpy()
if args.ignore_pad_token_for_loss:
# Replace -100 in the labels as we can't decode them.
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
metric.add_batch(
predictions=accelerator.gather(decoded_preds),
references=accelerator.gather(decoded_labels),
)
eval_metric = metric.compute()
logger.info(f"epoch {epoch}: {eval_metric}")
if args.output_dir is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
|
59,811 |
def setup_args(parser=None) -> ParlaiParser:
"""
Build the ParlAI parser, adding command line args if necessary.
:param ParlaiParser parser:
Preexisting parser to append options to. Will be created if needed.
:returns:
the ParlaiParser with CLI options added.
"""
if parser is None:
parser = ParlaiParser(True, True, 'Train a model')
train = parser.add_argument_group('Training Loop Arguments')
train.add_argument(
'-et',
'--evaltask',
help='task to use for valid/test (defaults to the one used for training)',
)
train.add_argument(
'--eval-batchsize',
type=int,
hidden=True,
help='Eval time batch size (defaults to same as -bs)',
)
train.add_argument(
'--eval-dynamic-batching', # FIXME: see https://github.com/facebookresearch/ParlAI/issues/3367
default=None,
type='nonestr',
choices={None, 'off', 'full', 'batchsort'},
help='Temporary fix for an issue tracked in #3367. Eval dynamic batching; '
'if None, defaults to whatever is set in --dynamic-batching. If "off", sets dynamic batching to None',
)
train.add_argument('--display-examples', type='bool', default=False, hidden=True)
train.add_argument('-eps', '--num-epochs', type=float, default=-1)
train.add_argument('-ttim', '--max-train-time', type=float, default=-1)
train.add_argument('-ltim', '--log-every-n-secs', type=float, default=10)
train.add_argument(
'-vtim',
'--validation-every-n-secs',
type=float,
default=-1,
help='Validate every n seconds. Saves model to model_file '
'(if set) whenever best val metric is found',
)
train.add_argument(
'-stim',
'--save-every-n-secs',
type=float,
default=-1,
help='Saves the model to model_file.checkpoint after '
'every n seconds (default -1, never).',
)
train.add_argument(
'-sval',
'--save-after-valid',
type='bool',
default=False,
help='Saves the model to model_file.checkpoint after '
'every validation (default %(default)s).',
)
train.add_argument(
'-veps',
'--validation-every-n-epochs',
type=float,
default=-1,
help='Validate every n epochs. Saves model to model_file '
'(if set) whenever best val metric is found',
)
train.add_argument(
'-vme',
'--validation-max-exs',
type=int,
default=-1,
hidden=True,
help='max examples to use during validation (default -1 uses all)',
)
train.add_argument(
'--short-final-eval',
default=False,
hidden=True,
type='bool',
help='If true, obeys --validation-max-exs in the final '
'validation and test evaluations.',
)
train.add_argument(
'-vp',
'--validation-patience',
type=int,
default=10,
help=(
'number of iterations of validation where result'
' does not improve before we stop training'
),
)
train.add_argument(
'-vmt',
'--validation-metric',
default='accuracy',
help='key into report table for selecting best validation',
)
train.add_argument(
'-vmm',
'--validation-metric-mode',
type=str,
choices=['max', 'min'],
help='how to optimize validation metric (max or min)',
)
train.add_argument(
'-vcut',
'--validation-cutoff',
type=float,
default=1.0,
hidden=True,
help='value at which training will stop if exceeded by metric',
)
train.add_argument(
'-lfc',
'--load-from-checkpoint',
type='bool',
default=True,
hidden=True,
help='load model from checkpoint if available',
)
train.add_argument(
'-vshare',
'--validation-share-agent',
default=False,
hidden=True,
help='use a shared copy of the agent for validation. '
'this will eventually default to True, but '
'currently defaults to False.',
)
train.add_argument(
'-mcs',
'--metrics',
type=str,
default='default',
help='list of metrics to show/compute, e.g. all, default,'
'or give a list split by , like '
'ppl,f1,accuracy,hits@1,rouge,bleu'
'the rouge metrics will be computed as rouge-1, rouge-2 and rouge-l',
)
train.add_argument(
'-micro',
'--aggregate-micro',
type='bool',
default=False,
help='Report micro-averaged metrics instead of macro averaged metrics.',
recommended=False,
)
TensorboardLogger.add_cmdline_args(parser, partial_opt=None)
parser = setup_dict_args(parser)
return parser
|
def setup_args(parser=None) -> ParlaiParser:
"""
Build the ParlAI parser, adding command line args if necessary.
:param ParlaiParser parser:
Preexisting parser to append options to. Will be created if needed.
:returns:
the ParlaiParser with CLI options added.
"""
if parser is None:
parser = ParlaiParser(True, True, 'Train a model')
train = parser.add_argument_group('Training Loop Arguments')
train.add_argument(
'-et',
'--evaltask',
help='task to use for valid/test (defaults to the one used for training)',
)
train.add_argument(
'--eval-batchsize',
type=int,
hidden=True,
help='Eval time batch size (defaults to same as -bs)',
)
train.add_argument(
'--eval-dynamic-batching', # FIXME: see https://github.com/facebookresearch/ParlAI/issues/3367
default=None,
type='nonestr',
choices={None, 'off', 'full', 'batchsort'},
help=(
'Set dynamic batching at evaluation time. Set to off for '
'train-only dynamic batching. Set to none (default) to use same '
'setting as --dynamic-batching.'
),
)
train.add_argument('--display-examples', type='bool', default=False, hidden=True)
train.add_argument('-eps', '--num-epochs', type=float, default=-1)
train.add_argument('-ttim', '--max-train-time', type=float, default=-1)
train.add_argument('-ltim', '--log-every-n-secs', type=float, default=10)
train.add_argument(
'-vtim',
'--validation-every-n-secs',
type=float,
default=-1,
help='Validate every n seconds. Saves model to model_file '
'(if set) whenever best val metric is found',
)
train.add_argument(
'-stim',
'--save-every-n-secs',
type=float,
default=-1,
help='Saves the model to model_file.checkpoint after '
'every n seconds (default -1, never).',
)
train.add_argument(
'-sval',
'--save-after-valid',
type='bool',
default=False,
help='Saves the model to model_file.checkpoint after '
'every validation (default %(default)s).',
)
train.add_argument(
'-veps',
'--validation-every-n-epochs',
type=float,
default=-1,
help='Validate every n epochs. Saves model to model_file '
'(if set) whenever best val metric is found',
)
train.add_argument(
'-vme',
'--validation-max-exs',
type=int,
default=-1,
hidden=True,
help='max examples to use during validation (default -1 uses all)',
)
train.add_argument(
'--short-final-eval',
default=False,
hidden=True,
type='bool',
help='If true, obeys --validation-max-exs in the final '
'validation and test evaluations.',
)
train.add_argument(
'-vp',
'--validation-patience',
type=int,
default=10,
help=(
'number of iterations of validation where result'
' does not improve before we stop training'
),
)
train.add_argument(
'-vmt',
'--validation-metric',
default='accuracy',
help='key into report table for selecting best validation',
)
train.add_argument(
'-vmm',
'--validation-metric-mode',
type=str,
choices=['max', 'min'],
help='how to optimize validation metric (max or min)',
)
train.add_argument(
'-vcut',
'--validation-cutoff',
type=float,
default=1.0,
hidden=True,
help='value at which training will stop if exceeded by metric',
)
train.add_argument(
'-lfc',
'--load-from-checkpoint',
type='bool',
default=True,
hidden=True,
help='load model from checkpoint if available',
)
train.add_argument(
'-vshare',
'--validation-share-agent',
default=False,
hidden=True,
help='use a shared copy of the agent for validation. '
'this will eventually default to True, but '
'currently defaults to False.',
)
train.add_argument(
'-mcs',
'--metrics',
type=str,
default='default',
help='list of metrics to show/compute, e.g. all, default,'
'or give a list split by , like '
'ppl,f1,accuracy,hits@1,rouge,bleu'
'the rouge metrics will be computed as rouge-1, rouge-2 and rouge-l',
)
train.add_argument(
'-micro',
'--aggregate-micro',
type='bool',
default=False,
help='Report micro-averaged metrics instead of macro averaged metrics.',
recommended=False,
)
TensorboardLogger.add_cmdline_args(parser, partial_opt=None)
parser = setup_dict_args(parser)
return parser
|
41,045 |
def _get_parser():
"""
Parses command line inputs for tedana
Returns
-------
parser.parse_args() : argparse dict
"""
from ..info import __version__
verstr = 'tedana v{}'.format(__version__)
parser = argparse.ArgumentParser()
# Argument parser follow templtate provided by RalphyZ
# https://stackoverflow.com/a/43456577
optional = parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
required.add_argument('-d',
dest='data',
nargs='+',
metavar='FILE',
type=lambda x: is_valid_file(parser, x),
help=('Multi-echo dataset for analysis. May be a '
'single file with spatially concatenated data '
'or a set of echo-specific files, in the same '
'order as the TEs are listed in the -e '
'argument.'),
required=True)
required.add_argument('-e',
dest='tes',
nargs='+',
metavar='TE',
type=float,
help='Echo times (in ms). E.g., 15.0 39.0 63.0',
required=True)
optional.add_argument('--out-dir',
dest='out_dir',
type=str,
metavar='PATH',
help='Output directory.',
default='.')
optional.add_argument('--mask',
dest='mask',
metavar='FILE',
type=lambda x: is_valid_file(parser, x),
help=("Binary mask of voxels to include in TE "
"Dependent ANAlysis. Must be in the same "
"space as `data`. If an explicit mask is not "
"provided, then Nilearn's compute_epi_mask "
"function will be used to derive a mask "
"from the first echo's data."),
default=None)
optional.add_argument('--fittype',
dest='fittype',
action='store',
choices=['loglin', 'curvefit'],
help=('Desired T2*/S0 fitting method. '
'"loglin" means that a linear model is fit '
'to the log of the data. '
'"curvefit" means that a more computationally '
'demanding monoexponential model is fit '
'to the raw data. '
'Default is "loglin".'),
default='loglin')
optional.add_argument('--combmode',
dest='combmode',
action='store',
choices=['t2s'],
help=('Combination scheme for TEs: '
't2s (Posse 1999, default)'),
default='t2s')
optional.add_argument('--tedpca',
dest='tedpca',
help=('Method with which to select components in TEDPCA. '
'PCA decomposition with the mdl, kic and aic options '
'is based on a Moving Average (stationary Gaussian) '
'process and are ordered from most to least aggresive. '
'Default=\'mdl\'.'),
choices=['kundu', 'kundu-stabilize', 'mdl', 'aic', 'kic'],
default='mdl')
optional.add_argument('--seed',
dest='fixed_seed',
metavar='INT',
type=int,
help=('Value used for random initialization of ICA '
'algorithm. Set to an integer value for '
'reproducible ICA results. Set to -1 for '
'varying results across ICA calls. '
'Default=42.'),
default=42)
optional.add_argument('--maxit',
dest='maxit',
metavar='INT',
type=int,
help=('Maximum number of iterations for ICA.'),
default=500)
optional.add_argument('--maxrestart',
dest='maxrestart',
metavar='INT',
type=int,
help=('Maximum number of attempts for ICA. If ICA '
'fails to converge, the fixed seed will be '
'updated and ICA will be run again. If '
'convergence is achieved before maxrestart '
'attempts, ICA will finish early.'),
default=10)
optional.add_argument('--tedort',
dest='tedort',
action='store_true',
help=('Orthogonalize rejected components w.r.t. '
'accepted components prior to denoising.'),
default=False)
optional.add_argument('--gscontrol',
dest='gscontrol',
required=False,
action='store',
nargs='+',
help=('Perform additional denoising to remove '
'spatially diffuse noise. Default is None. '
'This argument can be single value or a space '
'delimited list'),
choices=['t1c', 'gsr'],
default=None)
optional.add_argument('--no-png',
dest='no_png',
action='store_true',
help=('Creates a figures folder with static component '
'maps, timecourse plots and other diagnostic '
'images'),
default=False)
optional.add_argument('--png-cmap',
dest='png_cmap',
type=str,
help='Colormap for figures',
default='coolwarm')
optional.add_argument('--verbose',
dest='verbose',
action='store_true',
help='Generate intermediate and additional files.',
default=False)
optional.add_argument('--lowmem',
dest='low_mem',
action='store_true',
help=('Enables low-memory processing, including the '
'use of IncrementalPCA. May increase workflow '
'duration.'),
default=False)
optional.add_argument('--n-threads',
dest='n_threads',
type=int,
action='store',
help=('Number of threads to use. Used by '
'threadcountctl to set the parameter outside '
'of the workflow function.'),
default=-1)
optional.add_argument('--debug',
dest='debug',
action='store_true',
help=('Logs in the terminal will have increased '
'verbosity, and will also be written into '
'a .tsv file in the output directory.'),
default=False)
optional.add_argument('--quiet',
dest='quiet',
help=argparse.SUPPRESS,
action='store_true',
default=False)
optional.add_argument('-v', '--version', action='version', version=verstr)
parser._action_groups.append(optional)
rerungrp = parser.add_argument_group('arguments for rerunning the workflow')
rerungrp.add_argument('--t2smap',
dest='t2smap',
metavar='FILE',
type=lambda x: is_valid_file(parser, x),
help=('Precalculated T2* map in the same space as '
'the input data.'),
default=None)
rerungrp.add_argument('--mix',
dest='mixm',
metavar='FILE',
type=lambda x: is_valid_file(parser, x),
help=('File containing mixing matrix. If not '
'provided, ME-PCA & ME-ICA is done.'),
default=None)
rerungrp.add_argument('--ctab',
dest='ctab',
metavar='FILE',
type=lambda x: is_valid_file(parser, x),
help=('File containing a component table from which '
'to extract pre-computed classifications.'),
default=None)
rerungrp.add_argument('--manacc',
dest='manacc',
help=('Comma separated list of manually '
'accepted components'),
default=None)
return parser
|
def _get_parser():
"""
Parses command line inputs for tedana
Returns
-------
parser.parse_args() : argparse dict
"""
from ..info import __version__
verstr = 'tedana v{}'.format(__version__)
parser = argparse.ArgumentParser()
# Argument parser follow templtate provided by RalphyZ
# https://stackoverflow.com/a/43456577
optional = parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
required.add_argument('-d',
dest='data',
nargs='+',
metavar='FILE',
type=lambda x: is_valid_file(parser, x),
help=('Multi-echo dataset for analysis. May be a '
'single file with spatially concatenated data '
'or a set of echo-specific files, in the same '
'order as the TEs are listed in the -e '
'argument.'),
required=True)
required.add_argument('-e',
dest='tes',
nargs='+',
metavar='TE',
type=float,
help='Echo times (in ms). E.g., 15.0 39.0 63.0',
required=True)
optional.add_argument('--out-dir',
dest='out_dir',
type=str,
metavar='PATH',
help='Output directory.',
default='.')
optional.add_argument('--mask',
dest='mask',
metavar='FILE',
type=lambda x: is_valid_file(parser, x),
help=("Binary mask of voxels to include in TE "
"Dependent ANAlysis. Must be in the same "
"space as `data`. If an explicit mask is not "
"provided, then Nilearn's compute_epi_mask "
"function will be used to derive a mask "
"from the first echo's data."),
default=None)
optional.add_argument('--fittype',
dest='fittype',
action='store',
choices=['loglin', 'curvefit'],
help=('Desired T2*/S0 fitting method. '
'"loglin" means that a linear model is fit '
'to the log of the data. '
'"curvefit" means that a more computationally '
'demanding monoexponential model is fit '
'to the raw data. '
'Default is "loglin".'),
default='loglin')
optional.add_argument('--combmode',
dest='combmode',
action='store',
choices=['t2s'],
help=('Combination scheme for TEs: '
't2s (Posse 1999, default)'),
default='t2s')
optional.add_argument('--tedpca',
dest='tedpca',
help=('Method with which to select components in TEDPCA. '
'PCA decomposition with the mdl, kic and aic options '
'is based on a Moving Average (stationary Gaussian) '
'process and are ordered from most to least aggresive. '
'Default=\'mdl\'.'),
choices=['kundu', 'kundu-stabilize', 'mdl', 'aic', 'kic'],
default='mdl')
optional.add_argument('--seed',
dest='fixed_seed',
metavar='INT',
type=int,
help=('Value used for random initialization of ICA '
'algorithm. Set to an integer value for '
'reproducible ICA results. Set to -1 for '
'varying results across ICA calls. '
'Default=42.'),
default=42)
optional.add_argument('--maxit',
dest='maxit',
metavar='INT',
type=int,
help=('Maximum number of iterations for ICA.'),
default=500)
optional.add_argument('--maxrestart',
dest='maxrestart',
metavar='INT',
type=int,
help=('Maximum number of attempts for ICA. If ICA '
'fails to converge, the fixed seed will be '
'updated and ICA will be run again. If '
'convergence is achieved before maxrestart '
'attempts, ICA will finish early.'),
default=10)
optional.add_argument('--tedort',
dest='tedort',
action='store_true',
help=('Orthogonalize rejected components w.r.t. '
'accepted components prior to denoising.'),
default=False)
optional.add_argument('--gscontrol',
dest='gscontrol',
required=False,
action='store',
nargs='+',
help=('Perform additional denoising to remove '
'spatially diffuse noise. Default is None. '
'This argument can be single value or a space '
'delimited list'),
choices=['t1c', 'gsr'],
default=None)
optional.add_argument('--no-png',
dest='no_png',
action='store_true',
help=('Creates a figures folder with static component '
'maps, timecourse plots and other diagnostic '
'images'),
default=False)
optional.add_argument('--png-cmap',
dest='png_cmap',
type=str,
help='Colormap for figures',
default='coolwarm')
optional.add_argument('--verbose',
dest='verbose',
action='store_true',
help='Generate intermediate and additional files.',
default=False)
optional.add_argument('--lowmem',
dest='low_mem',
action='store_true',
help=('Enables low-memory processing, including the '
'use of IncrementalPCA. May increase workflow '
'duration.'),
default=False)
optional.add_argument('--n-threads',
dest='n_threads',
type=int,
action='store',
help=('Number of threads to use. Used by '
'threadpoolctl to set the parameter outside '
'of the workflow function.'),
default=-1)
optional.add_argument('--debug',
dest='debug',
action='store_true',
help=('Logs in the terminal will have increased '
'verbosity, and will also be written into '
'a .tsv file in the output directory.'),
default=False)
optional.add_argument('--quiet',
dest='quiet',
help=argparse.SUPPRESS,
action='store_true',
default=False)
optional.add_argument('-v', '--version', action='version', version=verstr)
parser._action_groups.append(optional)
rerungrp = parser.add_argument_group('arguments for rerunning the workflow')
rerungrp.add_argument('--t2smap',
dest='t2smap',
metavar='FILE',
type=lambda x: is_valid_file(parser, x),
help=('Precalculated T2* map in the same space as '
'the input data.'),
default=None)
rerungrp.add_argument('--mix',
dest='mixm',
metavar='FILE',
type=lambda x: is_valid_file(parser, x),
help=('File containing mixing matrix. If not '
'provided, ME-PCA & ME-ICA is done.'),
default=None)
rerungrp.add_argument('--ctab',
dest='ctab',
metavar='FILE',
type=lambda x: is_valid_file(parser, x),
help=('File containing a component table from which '
'to extract pre-computed classifications.'),
default=None)
rerungrp.add_argument('--manacc',
dest='manacc',
help=('Comma separated list of manually '
'accepted components'),
default=None)
return parser
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.