id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
38,953 |
def normalize(name: str) -> str:
return " ".join((word.capitalize()) for word in name.split(" "))
|
def normalize(name: str) -> str:
return ' '.join(word.capitalize() for word in name.split(' '))
|
44,297 |
def sparse_expand_matrix(base_matrix, wires, wire_order=None, format="csr"):
"""Re-express a sparse base matrix acting on a subspace defined by a set of wire labels
according to a global wire order.
Args:
base_matrix (tensor_like): base matrix to expand
wires (Iterable): wires determining the subspace that base matrix acts on; a base matrix of
dimension :math:`2^n` acts on a subspace of :math:`n` wires
wire_order (Iterable): global wire order, which has to contain all wire labels in ``wires``, but can also
contain additional labels
Returns:
tensor_like: expanded matrix
"""
if (wire_order is None) or (wire_order == wires):
return base_matrix
interface = qml.math.get_interface(base_matrix) # pylint: disable=protected-access
if interface != "scipy" and not issparse(base_matrix):
raise ValueError("base_matrix must be a scipy sparse matrix")
n_wires = len(wires)
n_total_wires = len(wire_order)
expanded_wires = copy.copy(wires)
for wire in wire_order:
if wire not in wires:
expanded_wires.append(wire)
num_missing_wires = n_total_wires - n_wires
expanded_matrix = kron(
base_matrix, eye(2**num_missing_wires, format=format), format=format
) # added missing wires at the end
U = eye(2**n_total_wires)
while not expanded_wires == wire_order:
for i in range(n_total_wires):
if expanded_wires[i] != wire_order[i]:
j = wire_order.index(expanded_wires[i])
U = U @ _sparse_swap_mat(i, j, n_total_wires, format=format)
temp = expanded_wires[i]
expanded_wires[i] = expanded_wires[j]
expanded_wires[j] = temp
break
expanded_matrix = csr_matrix(U.T) @ expanded_matrix @ U
return expanded_matrix
|
def sparse_expand_matrix(base_matrix, wires, wire_order=None, format="csr"):
"""Re-express a sparse base matrix acting on a subspace defined by a set of wire labels
according to a global wire order.
Args:
base_matrix (tensor_like): base matrix to expand
wires (Iterable): wires determining the subspace that base matrix acts on; a base matrix of
dimension :math:`2^n` acts on a subspace of :math:`n` wires
wire_order (Iterable): global wire order, which has to contain all wire labels in ``wires``, but can also
contain additional labels
Returns:
tensor_like: expanded matrix
"""
if (wire_order is None) or (wire_order == wires):
return base_matrix
interface = qml.math.get_interface(base_matrix) # pylint: disable=protected-access
if interface != "scipy" and not issparse(base_matrix):
raise ValueError("base_matrix must be a scipy sparse matrix")
n_wires = len(wires)
n_total_wires = len(wire_order)
expanded_wires = copy.copy(wires)
for wire in wire_order:
if wire not in wires:
expanded_wires.append(wire)
num_missing_wires = n_total_wires - n_wires
expanded_matrix = kron(
base_matrix, eye(2**num_missing_wires, format=format), format=format
) # added missing wires at the end
U = eye(2**n_total_wires)
while not expanded_wires == wire_order:
for i in range(n_total_wires):
if expanded_wires[i] != wire_order[i]:
j = wire_order.index(expanded_wires[i])
U = U @ _sparse_swap_mat(i, j, n_total_wires, format=format)
expanded_wires[i], expanded_wires[j] = expanded_wires[j], expanded_wires[i]
break
expanded_matrix = csr_matrix(U.T) @ expanded_matrix @ U
return expanded_matrix
|
27,394 |
def uncomment(lines: List[str]) -> str:
""" Remove comments from lines in an .xvg file
Parameters
----------
lines : list of str
Lines as directly read from .xvg file
Yields
------
str
The next non-comment line, with any trailing comments removed
"""
for line in lines:
stripped_line = line.strip()
# ignore blank lines
if not stripped_line:
continue
# '@' must be at the beginning of a line to be a grace instruction
if stripped_line[0] == '@':
continue
# '#' can be anywhere in the line, everything after is a comment
comment_position = stripped_line.find('#')
if comment_position > 0 and stripped_line[:comment_position]:
yield stripped_line[:comment_position]
elif comment_position < 0 and stripped_line:
yield stripped_line
# if comment_position == 0, then the line is empty
|
def uncomment(lines: List[str]) -> Generator[str, None, None]:
""" Remove comments from lines in an .xvg file
Parameters
----------
lines : list of str
Lines as directly read from .xvg file
Yields
------
str
The next non-comment line, with any trailing comments removed
"""
for line in lines:
stripped_line = line.strip()
# ignore blank lines
if not stripped_line:
continue
# '@' must be at the beginning of a line to be a grace instruction
if stripped_line[0] == '@':
continue
# '#' can be anywhere in the line, everything after is a comment
comment_position = stripped_line.find('#')
if comment_position > 0 and stripped_line[:comment_position]:
yield stripped_line[:comment_position]
elif comment_position < 0 and stripped_line:
yield stripped_line
# if comment_position == 0, then the line is empty
|
29,672 |
def test_expects_comm():
class A:
def empty(self):
...
def one_arg(self, arg):
...
def comm_arg(self, comm):
...
def stream_arg(self, stream):
...
def two_arg(self, arg, other):
...
def comm_arg_other(self, comm, other):
...
def stream_arg_other(self, stream, other):
...
def arg_kwarg(self, arg, other=None):
...
def comm_argonly(self, comm, *, other):
...
def comm_kwarg_only(self, /, comm, other):
...
assert not _expects_comm(A.empty)
assert not _expects_comm(A.one_arg)
assert _expects_comm(A.comm_arg)
assert _expects_comm(A.stream_arg)
assert not _expects_comm(A.two_arg)
assert _expects_comm(A.comm_arg_other)
assert _expects_comm(A.stream_arg_other)
assert not _expects_comm(A.arg_kwarg)
assert _expects_comm(A.comm_argonly)
assert _expects_comm(A.comm_kwarg_only)
|
def test_expects_comm():
class A:
def empty(self):
...
def one_arg(self, arg):
...
def comm_arg(self, comm):
...
def stream_arg(self, stream):
...
def two_arg(self, arg, other):
...
def comm_arg_other(self, comm, other):
...
def stream_arg_other(self, stream, other):
...
def arg_kwarg(self, arg, other=None):
...
def comm_argonly(self, comm, *, other):
...
def comm_kwarg_only(self, /, comm, other):
...
assert not _expects_comm(A.empty)
assert not _expects_comm(A.one_arg)
assert _expects_comm(A.comm_arg)
assert _expects_comm(A.stream_arg)
assert not _expects_comm(A.two_arg)
assert _expects_comm(A.comm_arg_other)
assert _expects_comm(A.stream_arg_other)
assert not _expects_comm(A.arg_kwarg)
assert _expects_comm(A.comm_posarg_only)
assert _expects_comm(A.comm_kwarg_only)
|
4,600 |
def fmriprep_confounds_strategy(img_files, denoise_strategy="simple",
**kwargs):
"""
Use preset strategy to load confounds from :term:`fMRIPrep`.
`fmriprep_confounds_strategy` provides an interface to select confounds
based on past literature with limited parameters for user customisation.
.. versionadded:: 0.8.2
Parameters
----------
img_files : path to processed image files, optionally as a list.
Processed nii.gz/dtseries.nii/func.gii file reside in a
:term:`fMRIPrep` generated functional derivative directory (i.e.The
associated confound files should be in the same directory as the image
file). As long as the image file, confound related tsv and json are in
the same directory with BIDS-complied names, `fmriprep_confounds` can
retrieve the relevant files correctly.
- `nii.gz` or `dtseries.nii`: path to files, optionally as a list.
- `func.gii`: list of a pair of paths to files, optionally as a list
of lists.
denoise_strategy : {'simple', 'srubbing', 'compcor', 'ica_aroma'}
Name of preset denoising strategies. Each strategy has a set of
associated configurable parameters. For the documentation on
additional parameters, please refer to
:func:`nilearn.input_data.fmriprep_confounds`.
- 'simple': Load confounds for a simple denosing strategy commonly
used in resting state functional connectivity, described in
:footcite:`Fox2005`. Default as: full motion parameters,
full WM/CSF signals, and high pass filter, with an option to
extract global signal confounds.
Additional parameters: motion, wm_csf, global_signal, demean
- 'srubbing': Load confounds for scrubbing describbed in
:footcite:`Power2012`.Default as: full motion parameters,
full WM/CSF signals, remove segment smaller than 5 continuous
volumes (see docstring of
:func:`nilearn.input_data.fmriprep_confounds`),
high pass filter, with an option to extract global signal confounds.
Additional parameters: motion, wm_csf, scrub, fd_thresh,
std_dvars_thresh, global_signal, demean
- 'compcor': Load confounds using the CompCor strategy from
:footcite:`BEHZADI200790`.Default with full motion parameters,
high pass filter, and anatomical compcor with combined mask.
Additional parameters: motion, n_compcor, compcor, demean
- 'ica_aroma': Load confounds for non-aggresive ICA-AROMA strategy
described in :footcite:`Pruim2015`. The strategy requires
:term:`fMRIPrep` outputs generated with `--use-aroma` suffixed with
`desc-smoothAROMAnonaggr_bold`. See notes for more details about
this option.
Additional parameters: wm_csf, global_signal, demean
Other keyword arguments:
See additional parameters associated with denoise_strategy and refer
to the documentation of :func:`nilearn.input_data.fmriprep_confounds`
Returns
-------
confounds : pandas.DataFrame, or list of
A reduced version of :term:`fMRIPrep` confounds based on selected
strategy and flags.
An intercept is automatically added to the list of confounds.
The columns contains the labels of the regressors.
sample_mask : None, numpy.ndarray, or list of
When no volumns require removal, the value is None.
Otherwise, shape: (number of scans - number of volumes removed, )
The index of the niimgs along time/fourth dimension for valid volumes
for subsequent analysis.
This attribute should be passed to parameter `sample_mask` of
:class:`nilearn.input_data.NiftiMasker` or
:func:`nilearn.signal.clean`.
Volumns are removed if flagged as following:
- Non-steady-state volumes (if present)
- Motion outliers detected by scrubbing
Notes
-----
ICA-AROMA is implemented in two steps in :footcite:`Pruim2015`:
1. A non-aggressive denoising immediately after ICA classification.
A linear regression estimates signals with all independent
components as predictors. A partial regression is then applied to
remove variance associated with noise independent components.
:term:`fMRIPrep` performs this step and generates files in
`MNI152NLin6Asym` template, suffixed with
`desc-smoothAROMAnonaggr_bold`.
One can produce `desc-smoothAROMAnonaggr_bold` in other spatial
templates, please refer to :term:`fMRIPrep` documentation on ICA-AROMA
`<https://fmriprep.org/en/latest/workflows.html#ica-aroma>`_
2. Confound regression step (mean signals from WM and CSF).
Confound regressors generated by this function with
`denoise_strategy="ica_aroma"`.
For more discussion regarding choosing the nuisance regressors before or
after denoising with ICA-AROMA has a detriment on outcome measures,
please see notebook 5.
`<https://github.com/nipreps/fmriprep-notebooks/>`_
References
-----------
.. footbibliography::
"""
default_parameters = preset_strategies[denoise_strategy].copy()
check_parameters = list(default_parameters.keys())
check_parameters.remove("strategy")
# ICA-AROMA only accept the non-aggressive strategy
# ignore user passed value
if "ica_aroma" in default_parameters:
check_parameters.remove("ica_aroma")
user_parameters, not_needed = _update_user_inputs(kwargs,
default_parameters,
check_parameters)
# raise warning about parameters not needed
if not_needed:
warnings.warn("The following parameters are not needed for the "
f"selected strategy '{denoise_strategy}': {not_needed}; "
f"parameters accepted: {check_parameters}"
)
return fmriprep_confounds(img_files, **user_parameters)
|
def fmriprep_confounds_strategy(img_files, denoise_strategy="simple",
**kwargs):
"""
Use preset strategy to load confounds from :term:`fMRIPrep`.
`fmriprep_confounds_strategy` provides an interface to select confounds
based on past literature with limited parameters for user customisation.
.. versionadded:: 0.8.2
Parameters
----------
img_files : path to processed image files, optionally as a list.
Processed nii.gz/dtseries.nii/func.gii file reside in a
:term:`fMRIPrep` generated functional derivative directory (i.e.The
associated confound files should be in the same directory as the image
file). As long as the image file, confound related tsv and json are in
the same directory with BIDS-complied names, `fmriprep_confounds` can
retrieve the relevant files correctly.
- `nii.gz` or `dtseries.nii`: path to files, optionally as a list.
- `func.gii`: list of a pair of paths to files, optionally as a list
of lists.
denoise_strategy : {'simple', 'srubbing', 'compcor', 'ica_aroma'}
Name of preset denoising strategies. Each strategy has a set of
associated configurable parameters. For the documentation on
additional parameters, please refer to
:func:`nilearn.input_data.fmriprep_confounds`.
- 'simple': Load confounds for a simple denosing strategy commonly
used in resting state functional connectivity, described in
:footcite:`Fox2005`. Default as: full motion parameters,
full WM/CSF signals, and high pass filter, with an option to
extract global signal confounds.
Additional parameters: motion, wm_csf, global_signal, demean
- 'srubbing': Load confounds for scrubbing describbed in
:footcite:`Power2012`.Default as: full motion parameters,
full WM/CSF signals, remove segment smaller than 5 continuous
volumes (see docstring of
:func:`nilearn.input_data.fmriprep_confounds`),
high pass filter, with an option to extract global signal confounds.
Additional parameters: motion, wm_csf, scrub, fd_thresh,
std_dvars_thresh, global_signal, demean
- 'compcor': Load confounds using the CompCor strategy from
:footcite:`BEHZADI200790`.Default with full motion parameters,
high pass filter, and anatomical compcor with combined mask.
Additional parameters: motion, n_compcor, compcor, demean
- 'ica_aroma': Load confounds for non-aggresive ICA-AROMA strategy
described in :footcite:`Pruim2015`. The strategy requires
:term:`fMRIPrep` outputs generated with `--use-aroma` suffixed with
`desc-smoothAROMAnonaggr_bold`. See notes for more details about
this option.
Additional parameters: wm_csf, global_signal, demean
Other keyword arguments:
See additional parameters associated with denoise_strategy and refer
to the documentation of :func:`nilearn.input_data.fmriprep_confounds`
Returns
-------
confounds : pandas.DataFrame, or list of
A reduced version of :term:`fMRIPrep` confounds based on selected
strategy and flags.
An intercept is automatically added to the list of confounds.
The columns contains the labels of the regressors.
sample_mask : None, numpy.ndarray, or list of
When no volume requires removal, the value is None.
Otherwise, shape: (number of scans - number of volumes removed, )
The index of the niimgs along time/fourth dimension for valid volumes
for subsequent analysis.
This attribute should be passed to parameter `sample_mask` of
:class:`nilearn.input_data.NiftiMasker` or
:func:`nilearn.signal.clean`.
Volumns are removed if flagged as following:
- Non-steady-state volumes (if present)
- Motion outliers detected by scrubbing
Notes
-----
ICA-AROMA is implemented in two steps in :footcite:`Pruim2015`:
1. A non-aggressive denoising immediately after ICA classification.
A linear regression estimates signals with all independent
components as predictors. A partial regression is then applied to
remove variance associated with noise independent components.
:term:`fMRIPrep` performs this step and generates files in
`MNI152NLin6Asym` template, suffixed with
`desc-smoothAROMAnonaggr_bold`.
One can produce `desc-smoothAROMAnonaggr_bold` in other spatial
templates, please refer to :term:`fMRIPrep` documentation on ICA-AROMA
`<https://fmriprep.org/en/latest/workflows.html#ica-aroma>`_
2. Confound regression step (mean signals from WM and CSF).
Confound regressors generated by this function with
`denoise_strategy="ica_aroma"`.
For more discussion regarding choosing the nuisance regressors before or
after denoising with ICA-AROMA has a detriment on outcome measures,
please see notebook 5.
`<https://github.com/nipreps/fmriprep-notebooks/>`_
References
-----------
.. footbibliography::
"""
default_parameters = preset_strategies[denoise_strategy].copy()
check_parameters = list(default_parameters.keys())
check_parameters.remove("strategy")
# ICA-AROMA only accept the non-aggressive strategy
# ignore user passed value
if "ica_aroma" in default_parameters:
check_parameters.remove("ica_aroma")
user_parameters, not_needed = _update_user_inputs(kwargs,
default_parameters,
check_parameters)
# raise warning about parameters not needed
if not_needed:
warnings.warn("The following parameters are not needed for the "
f"selected strategy '{denoise_strategy}': {not_needed}; "
f"parameters accepted: {check_parameters}"
)
return fmriprep_confounds(img_files, **user_parameters)
|
8,945 |
def setup(bot):
"""Setup the coretasks plugin.
The setup phase is used to activate the throttle feature to prevent a flood
of JOIN commands when there are too many channels to join.
"""
bot.memory['join_events_queue'] = collections.deque()
# Manage JOIN flood protection
if bot.settings.core.throttle_join:
wait_interval = max(bot.settings.core.throttle_wait, 1)
@module.interval(wait_interval)
@plugin.label('throttle_join')
def processing_job(bot):
_join_event_processing(bot)
loader.clean_callable(processing_job, bot.settings)
processing_job.plugin_name = 'coretasks'
bot.register_jobs([processing_job])
|
def setup(bot):
"""Set up the coretasks plugin.
The setup phase is used to activate the throttle feature to prevent a flood
of JOIN commands when there are too many channels to join.
"""
bot.memory['join_events_queue'] = collections.deque()
# Manage JOIN flood protection
if bot.settings.core.throttle_join:
wait_interval = max(bot.settings.core.throttle_wait, 1)
@module.interval(wait_interval)
@plugin.label('throttle_join')
def processing_job(bot):
_join_event_processing(bot)
loader.clean_callable(processing_job, bot.settings)
processing_job.plugin_name = 'coretasks'
bot.register_jobs([processing_job])
|
34,878 |
def device_copy(data, src_dev, dst_dev):
"""Copy data from thte source device to the destination device. This
operator helps data transferring between difference contexts for
heterogeneous execution.
Parameters
----------
data : tvm.relay.Expr
The tensor to be copied.
src_dev : Union(:py:class:`TVMContext`, str)
The source device where the data is copied from.
dst_dev : Union(:py:class:`TVMContext`, str)
The destination device where the data is copied to.
Returns
-------
result : tvm.relay.Expr
The copied result.
"""
if isinstance(src_dev, _TVMContext):
src_dev = src_dev.device_type
elif isinstance(src_dev, str):
src_dev = _nd.context(src_dev).device_type
else:
raise ValueError("src_dev is expected to be the type of TVMContext or "
"str, but received %s" % (type(src_dev)))
if isinstance(dst_dev, _TVMContext):
dst_dev = dst_dev.device_type
elif isinstance(dst_dev, str):
dst_dev = _nd.context(dst_dev).device_type
else:
raise ValueError("dst_dev is expected to be the type of TVMContext or "
"str, but received %s" % (type(dst_dev)))
return _make.device_copy(data, src_dev, dst_dev)
|
def device_copy(data, src_dev, dst_dev):
"""Copy data from the source device to the destination device. This
operator helps data transferring between difference contexts for
heterogeneous execution.
Parameters
----------
data : tvm.relay.Expr
The tensor to be copied.
src_dev : Union(:py:class:`TVMContext`, str)
The source device where the data is copied from.
dst_dev : Union(:py:class:`TVMContext`, str)
The destination device where the data is copied to.
Returns
-------
result : tvm.relay.Expr
The copied result.
"""
if isinstance(src_dev, _TVMContext):
src_dev = src_dev.device_type
elif isinstance(src_dev, str):
src_dev = _nd.context(src_dev).device_type
else:
raise ValueError("src_dev is expected to be the type of TVMContext or "
"str, but received %s" % (type(src_dev)))
if isinstance(dst_dev, _TVMContext):
dst_dev = dst_dev.device_type
elif isinstance(dst_dev, str):
dst_dev = _nd.context(dst_dev).device_type
else:
raise ValueError("dst_dev is expected to be the type of TVMContext or "
"str, but received %s" % (type(dst_dev)))
return _make.device_copy(data, src_dev, dst_dev)
|
46,880 |
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name.",
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--plot_data_dir",
default="./plotting/",
type=str,
required=False,
help="The directory to store data for plotting figures.",
)
# Other parameters
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument(
"--evaluate_during_training", action="store_true", help="Rul evaluation during training at each logging step."
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model."
)
parser.add_argument("--eval_each_highway", action="store_true", help="Set this flag to evaluate each highway.")
parser.add_argument(
"--eval_after_first_stage",
action="store_true",
help="Set this flag to evaluate after training only bert (not highway).",
)
parser.add_argument("--eval_highway", action="store_true", help="Set this flag if it's evaluating highway models")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.")
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation."
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform."
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--early_exit_entropy", default=-1, type=float, help="Entropy threshold for early exit.")
parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory"
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.model_type == "bert":
model.bert.encoder.set_early_exit_entropy(args.early_exit_entropy)
model.bert.init_highway_pooler()
else:
model.roberta.encoder.set_early_exit_entropy(args.early_exit_entropy)
model.roberta.init_highway_pooler()
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
if args.eval_after_first_stage:
result = evaluate(args, model, tokenizer, prefix="")
print_result = get_wanted_result(result)
train(args, train_dataset, model, tokenizer, train_highway=True)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = model_class.from_pretrained(checkpoint)
if args.model_type == "bert":
model.bert.encoder.set_early_exit_entropy(args.early_exit_entropy)
else:
model.roberta.encoder.set_early_exit_entropy(args.early_exit_entropy)
model.to(args.device)
result = evaluate(args, model, tokenizer, prefix=prefix, eval_highway=args.eval_highway)
print_result = get_wanted_result(result)
logger.info("Result: {}".format(print_result))
if args.eval_each_highway:
last_layer_results = print_result
each_layer_results = []
for i in range(model.num_layers):
logger.info("\n")
_result = evaluate(
args, model, tokenizer, prefix=prefix, output_layer=i, eval_highway=args.eval_highway
)
if i + 1 < model.num_layers:
each_layer_results.append(get_wanted_result(_result))
each_layer_results.append(last_layer_results)
save_fname = args.plot_data_dir + "/" + args.model_name_or_path[2:] + "/each_layer.npy"
if not os.path.exists(os.path.dirname(save_fname)):
os.makedirs(os.path.dirname(save_fname))
np.save(save_fname, np.array(each_layer_results))
info_str = "Score of each layer:"
for i in range(model.num_layers):
info_str += " {:.2f}".format(100 * each_layer_results[i])
logger.info(info_str)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
|
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name.",
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--plot_data_dir",
default="./plotting/",
type=str,
required=False,
help="The directory to store data for plotting figures.",
)
# Other parameters
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument(
"--evaluate_during_training", action="store_true", help="Rul evaluation during training at each logging step."
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model."
)
parser.add_argument("--eval_each_highway", action="store_true", help="Set this flag to evaluate each highway.")
parser.add_argument(
"--eval_after_first_stage",
action="store_true",
help="Set this flag to evaluate after training only bert (not highway).",
)
parser.add_argument("--eval_highway", action="store_true", help="Set this flag if it's evaluating highway models")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.")
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation."
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform."
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--early_exit_entropy", default=-1, type=float, help="Entropy threshold for early exit.")
parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory"
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.model_type == "bert":
model.bert.encoder.set_early_exit_entropy(args.early_exit_entropy)
model.bert.init_highway_pooler()
elif args.model_type == "roberta":
model.roberta.encoder.set_early_exit_entropy(args.early_exit_entropy)
model.roberta.init_highway_pooler()
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
if args.eval_after_first_stage:
result = evaluate(args, model, tokenizer, prefix="")
print_result = get_wanted_result(result)
train(args, train_dataset, model, tokenizer, train_highway=True)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = model_class.from_pretrained(checkpoint)
if args.model_type == "bert":
model.bert.encoder.set_early_exit_entropy(args.early_exit_entropy)
else:
model.roberta.encoder.set_early_exit_entropy(args.early_exit_entropy)
model.to(args.device)
result = evaluate(args, model, tokenizer, prefix=prefix, eval_highway=args.eval_highway)
print_result = get_wanted_result(result)
logger.info("Result: {}".format(print_result))
if args.eval_each_highway:
last_layer_results = print_result
each_layer_results = []
for i in range(model.num_layers):
logger.info("\n")
_result = evaluate(
args, model, tokenizer, prefix=prefix, output_layer=i, eval_highway=args.eval_highway
)
if i + 1 < model.num_layers:
each_layer_results.append(get_wanted_result(_result))
each_layer_results.append(last_layer_results)
save_fname = args.plot_data_dir + "/" + args.model_name_or_path[2:] + "/each_layer.npy"
if not os.path.exists(os.path.dirname(save_fname)):
os.makedirs(os.path.dirname(save_fname))
np.save(save_fname, np.array(each_layer_results))
info_str = "Score of each layer:"
for i in range(model.num_layers):
info_str += " {:.2f}".format(100 * each_layer_results[i])
logger.info(info_str)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
|
31,678 |
def test_copy_dict_value():
"""
Scenario: Copy a value from one Dict toi another.
Given:
- 2 Dicts and 2 keys.
When:
- Generic method.
Then:
- Copy the value, if present.
"""
from GoogleDrive import copy_dict_value
dict_1 = {
'key1': 'val1',
'key2': 'val2',
}
dict_2 = {
'key3': 'val3',
}
# copy_dict_value(source_dict: dict_1, dest_dict: dict_2, source_dict_key: str, dest_dict_key: str = None):
assert 'key1' not in dict_2
copy_dict_value(source_dict=dict_1, dest_dict=dict_2, source_dict_key='key1')
assert dict_2['key1'] == 'val1'
assert 'key21' not in dict_2
copy_dict_value(source_dict=dict_1, dest_dict=dict_2, source_dict_key='key2', dest_dict_key='key_21')
assert dict_2['key_21'] == 'val2'
|
def test_copy_dict_value():
"""
Scenario: Copy a value from one Dict to another.
Given:
- 2 Dicts and 2 keys.
When:
- Generic method.
Then:
- Copy the value, if present.
"""
from GoogleDrive import copy_dict_value
dict_1 = {
'key1': 'val1',
'key2': 'val2',
}
dict_2 = {
'key3': 'val3',
}
# copy_dict_value(source_dict: dict_1, dest_dict: dict_2, source_dict_key: str, dest_dict_key: str = None):
assert 'key1' not in dict_2
copy_dict_value(source_dict=dict_1, dest_dict=dict_2, source_dict_key='key1')
assert dict_2['key1'] == 'val1'
assert 'key21' not in dict_2
copy_dict_value(source_dict=dict_1, dest_dict=dict_2, source_dict_key='key2', dest_dict_key='key_21')
assert dict_2['key_21'] == 'val2'
|
10,537 |
def main():
ORIGINAL_FILE = 'requirements.txt'
VENDORED_COPY = 'test/lib/ansible_test/_data/requirements/sanity.import-plugins.txt'
requirements_1 = read_file(ORIGINAL_FILE)
requirements_2 = read_file(VENDORED_COPY)
if requirements_1 is not None and requirements_2 is not None:
if requirements_1 != requirements_2:
print('%s:%d:%d: Not identical to %s' % (VENDORED_COPY, 0, 0, ORIGINAL_FILE))
sys.exit()
|
def main():
ORIGINAL_FILE = 'requirements.txt'
VENDORED_COPY = 'test/lib/ansible_test/_data/requirements/sanity.import-plugins.txt'
requirements_1 = read_file(ORIGINAL_FILE)
requirements_2 = read_file(VENDORED_COPY)
if requirements_1 is not None and requirements_2 is not None:
if requirements_1 != requirements_2:
print('%s:%d:%d: must be identical to %s' % (VENDORED_COPY, 0, 0, ORIGINAL_FILE))
sys.exit()
|
49,948 |
def _save(im, fp, filename):
fp.write(_MAGIC) # (2+2)
sizes = im.encoderinfo.get(
"sizes",
[(16, 16), (24, 24), (32, 32), (48, 48), (64, 64), (128, 128), (256, 256)],
)
width, height = im.size
sizes = filter(
lambda x: False
if (x[0] > width or x[1] > height or x[0] > 256 or x[1] > 256)
else True,
sizes,
)
sizes = list(sizes)
fp.write(struct.pack("<H", len(sizes))) # idCount(2)
offset = fp.tell() + len(sizes) * 16
alt_images = {im.size: im for im in im.encoderinfo.get("append_images", [])}
for size in sizes:
width, height = size
# 0 means 256
fp.write(struct.pack("B", width if width < 256 else 0)) # bWidth(1)
fp.write(struct.pack("B", height if height < 256 else 0)) # bHeight(1)
fp.write(b"\0") # bColorCount(1)
fp.write(b"\0") # bReserved(1)
fp.write(b"\0\0") # wPlanes(2)
fp.write(struct.pack("<H", 32)) # wBitCount(2)
image_io = BytesIO()
if size in alt_images:
tmp = alt_images[size]
else:
# TODO: invent a more convenient method for proportional scalings
tmp = im.copy()
tmp.thumbnail(size, Image.LANCZOS, reducing_gap=None)
tmp.save(image_io, "png")
image_io.seek(0)
image_bytes = image_io.read()
bytes_len = len(image_bytes)
fp.write(struct.pack("<I", bytes_len)) # dwBytesInRes(4)
fp.write(struct.pack("<I", offset)) # dwImageOffset(4)
current = fp.tell()
fp.seek(offset)
fp.write(image_bytes)
offset = offset + bytes_len
fp.seek(current)
|
def _save(im, fp, filename):
fp.write(_MAGIC) # (2+2)
sizes = im.encoderinfo.get(
"sizes",
[(16, 16), (24, 24), (32, 32), (48, 48), (64, 64), (128, 128), (256, 256)],
)
width, height = im.size
sizes = filter(
lambda x: False
if (x[0] > width or x[1] > height or x[0] > 256 or x[1] > 256)
else True,
sizes,
)
sizes = list(sizes)
fp.write(struct.pack("<H", len(sizes))) # idCount(2)
offset = fp.tell() + len(sizes) * 16
alt_images = {im.size: im for im in im.encoderinfo.get("append_images", [])}
for size in sizes:
width, height = size
# 0 means 256
fp.write(struct.pack("B", width if width < 256 else 0)) # bWidth(1)
fp.write(struct.pack("B", height if height < 256 else 0)) # bHeight(1)
fp.write(b"\0") # bColorCount(1)
fp.write(b"\0") # bReserved(1)
fp.write(b"\0\0") # wPlanes(2)
fp.write(struct.pack("<H", 32)) # wBitCount(2)
image_io = BytesIO()
tmp = alt_images.get(size)
if not tmp:
# TODO: invent a more convenient method for proportional scalings
tmp = im.copy()
tmp.thumbnail(size, Image.LANCZOS, reducing_gap=None)
tmp.save(image_io, "png")
image_io.seek(0)
image_bytes = image_io.read()
bytes_len = len(image_bytes)
fp.write(struct.pack("<I", bytes_len)) # dwBytesInRes(4)
fp.write(struct.pack("<I", offset)) # dwImageOffset(4)
current = fp.tell()
fp.seek(offset)
fp.write(image_bytes)
offset = offset + bytes_len
fp.seek(current)
|
8,867 |
def subreddit_sorting(bot, trigger, s, sorting):
if sorting == 'new':
submissions = list(s.new())
elif sorting == 'top':
submissions = list(s.top())
elif sorting == 'hot':
submissions = list(s.hot())
elif sorting == 'controversial':
submissions = list(s.controversial())
elif sorting == 'gilded':
submissions = list(s.gilded())
elif sorting == 'rising':
submissions = list(s.rising())
elif sorting == 'sticky':
try:
submissions = [s.sticky()]
except prawcore.exceptions.NotFound:
bot.say("r/" + s.display_name + " appears to not have a stickied post!")
return
elif sorting == 'random':
submissions = [s.random()] or []
else:
return
if not len(submissions):
bot.say("r/" + s.display_name + ' ' + sorting + " appears to have no items!")
return NOLIMIT
if sorting != 'sticky':
submissions_filter = []
for submission in submissions:
if not submission.stickied:
submissions_filter.append(submission)
submissions = submissions_filter
submission = submissions[0]
link = "https://reddit.com/r/" + s.display_name + "/comments/" + str(submission)
say_post_info(
bot, trigger, re.match(post_url, link).group(1), False, True)
|
def subreddit_sorting(bot, trigger, s, sorting):
if sorting == 'new':
submissions = list(s.new())
elif sorting == 'top':
submissions = list(s.top())
elif sorting == 'hot':
submissions = list(s.hot())
elif sorting == 'controversial':
submissions = list(s.controversial())
elif sorting == 'gilded':
submissions = list(s.gilded(limit=10))
elif sorting == 'rising':
submissions = list(s.rising())
elif sorting == 'sticky':
try:
submissions = [s.sticky()]
except prawcore.exceptions.NotFound:
bot.say("r/" + s.display_name + " appears to not have a stickied post!")
return
elif sorting == 'random':
submissions = [s.random()] or []
else:
return
if not len(submissions):
bot.say("r/" + s.display_name + ' ' + sorting + " appears to have no items!")
return NOLIMIT
if sorting != 'sticky':
submissions_filter = []
for submission in submissions:
if not submission.stickied:
submissions_filter.append(submission)
submissions = submissions_filter
submission = submissions[0]
link = "https://reddit.com/r/" + s.display_name + "/comments/" + str(submission)
say_post_info(
bot, trigger, re.match(post_url, link).group(1), False, True)
|
28,559 |
def plot_joint(
ax,
figsize,
plotters,
kind,
contour,
fill_last,
joint_kwargs,
gridsize,
textsize,
marginal_kwargs,
backend_kwargs,
show,
):
"""Bokeh joint plot."""
if backend_kwargs is None:
backend_kwargs = {}
backend_kwargs = {
**backend_kwarg_defaults(("dpi", "plot.bokeh.figure.dpi"),),
**backend_kwargs,
}
dpi = backend_kwargs.pop("dpi")
figsize, _, _, xt_labelsize, linewidth, _ = _scale_fig_size(figsize, textsize)
joint_kwargs = {} if joint_kwargs is None else joint_kwargs
if marginal_kwargs is None:
marginal_kwargs = {}
marginal_kwargs.setdefault("plot_kwargs", {})
marginal_kwargs["plot_kwargs"]["line_width"] = linewidth
if ax is None:
backend_kwargs_join = backend_kwargs.copy()
backend_kwargs_join.setdefault("width", int(figsize[0] * dpi * 0.8))
backend_kwargs_join.setdefault("height", int(figsize[1] * dpi * 0.8))
backend_kwargs_hist_x = backend_kwargs.copy()
backend_kwargs_hist_x.setdefault("width", int(figsize[0] * dpi * 0.8))
backend_kwargs_hist_x.setdefault("height", int(figsize[1] * dpi * 0.2))
backend_kwargs_hist_y = backend_kwargs.copy()
backend_kwargs_hist_y.setdefault("width", int(figsize[0] * dpi * 0.2))
backend_kwargs_hist_y.setdefault("height", int(figsize[1] * dpi * 0.8))
axjoin = bkp.figure(**backend_kwargs_join)
backend_kwargs_hist_x["x_range"] = axjoin.x_range
backend_kwargs_hist_y["y_range"] = axjoin.y_range
ax_hist_x = bkp.figure(**backend_kwargs_hist_x)
ax_hist_y = bkp.figure(**backend_kwargs_hist_y)
elif len(ax) == 2 and len(ax[0]) == 2 and len(ax[1]) == 2:
ax_hist_x, _ = ax[0]
axjoin, ax_hist_y = ax[1]
else:
raise ValueError("ax must be of lenght 3 but found {}".format(len(ax)))
# Set labels for axes
x_var_name = make_label(plotters[0][0], plotters[0][1])
y_var_name = make_label(plotters[1][0], plotters[1][1])
axjoin.xaxis.axis_label = x_var_name
axjoin.yaxis.axis_label = y_var_name
# Flatten data
x = plotters[0][2].flatten()
y = plotters[1][2].flatten()
if kind == "scatter":
axjoin.circle(x, y, **joint_kwargs)
elif kind == "kde":
plot_kde(
x,
y,
contour=contour,
fill_last=fill_last,
ax=axjoin,
backend="bokeh",
backend_kwargs={},
show=False,
**joint_kwargs
)
else:
if gridsize == "auto":
gridsize = int(len(x) ** 0.35)
gridsize = gridsize / 10
axjoin.hexbin(x, y, size=gridsize, **joint_kwargs)
marginal_kwargs["plot_kwargs"].setdefault("line_color", "black")
for val, ax_, rotate in ((x, ax_hist_x, False), (y, ax_hist_y, True)):
plot_dist(
val,
textsize=xt_labelsize,
rotated=rotate,
ax=ax_,
backend="bokeh",
backend_kwargs={},
show=False,
**marginal_kwargs
)
show_layout([[ax_hist_x, None], [axjoin, ax_hist_y]], show, force_layout=True)
return np.array([[ax_hist_x, None], [axjoin, ax_hist_y]])
|
def plot_joint(
ax,
figsize,
plotters,
kind,
contour,
fill_last,
joint_kwargs,
gridsize,
textsize,
marginal_kwargs,
backend_kwargs,
show,
):
"""Bokeh joint plot."""
if backend_kwargs is None:
backend_kwargs = {}
backend_kwargs = {
**backend_kwarg_defaults(("dpi", "plot.bokeh.figure.dpi"),),
**backend_kwargs,
}
dpi = backend_kwargs.pop("dpi")
figsize, _, _, xt_labelsize, linewidth, _ = _scale_fig_size(figsize, textsize)
joint_kwargs = {} if joint_kwargs is None else joint_kwargs
if marginal_kwargs is None:
marginal_kwargs = {}
marginal_kwargs.setdefault("plot_kwargs", {})
marginal_kwargs["plot_kwargs"].setdefault("line_width", linewidth)
if ax is None:
backend_kwargs_join = backend_kwargs.copy()
backend_kwargs_join.setdefault("width", int(figsize[0] * dpi * 0.8))
backend_kwargs_join.setdefault("height", int(figsize[1] * dpi * 0.8))
backend_kwargs_hist_x = backend_kwargs.copy()
backend_kwargs_hist_x.setdefault("width", int(figsize[0] * dpi * 0.8))
backend_kwargs_hist_x.setdefault("height", int(figsize[1] * dpi * 0.2))
backend_kwargs_hist_y = backend_kwargs.copy()
backend_kwargs_hist_y.setdefault("width", int(figsize[0] * dpi * 0.2))
backend_kwargs_hist_y.setdefault("height", int(figsize[1] * dpi * 0.8))
axjoin = bkp.figure(**backend_kwargs_join)
backend_kwargs_hist_x["x_range"] = axjoin.x_range
backend_kwargs_hist_y["y_range"] = axjoin.y_range
ax_hist_x = bkp.figure(**backend_kwargs_hist_x)
ax_hist_y = bkp.figure(**backend_kwargs_hist_y)
elif len(ax) == 2 and len(ax[0]) == 2 and len(ax[1]) == 2:
ax_hist_x, _ = ax[0]
axjoin, ax_hist_y = ax[1]
else:
raise ValueError("ax must be of lenght 3 but found {}".format(len(ax)))
# Set labels for axes
x_var_name = make_label(plotters[0][0], plotters[0][1])
y_var_name = make_label(plotters[1][0], plotters[1][1])
axjoin.xaxis.axis_label = x_var_name
axjoin.yaxis.axis_label = y_var_name
# Flatten data
x = plotters[0][2].flatten()
y = plotters[1][2].flatten()
if kind == "scatter":
axjoin.circle(x, y, **joint_kwargs)
elif kind == "kde":
plot_kde(
x,
y,
contour=contour,
fill_last=fill_last,
ax=axjoin,
backend="bokeh",
backend_kwargs={},
show=False,
**joint_kwargs
)
else:
if gridsize == "auto":
gridsize = int(len(x) ** 0.35)
gridsize = gridsize / 10
axjoin.hexbin(x, y, size=gridsize, **joint_kwargs)
marginal_kwargs["plot_kwargs"].setdefault("line_color", "black")
for val, ax_, rotate in ((x, ax_hist_x, False), (y, ax_hist_y, True)):
plot_dist(
val,
textsize=xt_labelsize,
rotated=rotate,
ax=ax_,
backend="bokeh",
backend_kwargs={},
show=False,
**marginal_kwargs
)
show_layout([[ax_hist_x, None], [axjoin, ax_hist_y]], show, force_layout=True)
return np.array([[ax_hist_x, None], [axjoin, ax_hist_y]])
|
4,702 |
def figure(num=None, # autoincrement if None, else integer from 1-N
figsize=None, # defaults to rc figure.figsize
dpi=None, # defaults to rc figure.dpi
facecolor=None, # defaults to rc figure.facecolor
edgecolor=None, # defaults to rc figure.edgecolor
frameon=True,
FigureClass=Figure,
clear=False,
**kwargs
):
"""
Create a new figure.
Parameters
----------
num : integer or string, optional, default: None
If not provided, a new figure will be created, and the figure number
will be incremented. The figure objects holds this number in a `number`
attribute.
If num is provided, and a figure with this id already exists, make
it active, and returns a reference to it. If this figure does not
exists, create it and returns it.
If num is a string, the window title will be set to this figure's
`num`.
figsize : tuple of floats, optional, default: None
width, height in inches. If not provided, defaults to
:rc:`figure.figsize` = ``[6.4, 4.8]``.
dpi : integer, optional, default: None
resolution of the figure. If not provided, defaults to
:rc:`figure.dpi` = ``100``.
facecolor : color spec
the background color. If not provided, defaults to
:rc:`figure.facecolor` = ``'w'``.
edgecolor : color spec
the border color. If not provided, defaults to
:rc:`figure.edgecolor` = ``'w'``.
frameon : bool, optional, default: True
If False, suppress drawing the figure frame.
FigureClass : subclass of `~matplotlib.figure.Figure`
Optionally use a custom `.Figure` instance.
clear : bool, optional, default: False
If True and the figure already exists, then it is cleared.
Returns
-------
figure : `~matplotlib.figure.Figure`
The `.Figure` instance returned will also be passed to
new_figure_manager in the backends, which allows to hook custom
`.Figure` classes into the pyplot interface. Additional kwargs will be
passed to the `.Figure` init function.
Notes
-----
If you are creating many figures, make sure you explicitly call
:func:`.pyplot.close` on the figures you are not using, because this will
enable pyplot to properly clean up the memory.
`~matplotlib.rcParams` defines the default values, which can be modified
in the matplotlibrc file.
"""
if figsize is None:
figsize = rcParams['figure.figsize']
if dpi is None:
dpi = rcParams['figure.dpi']
if facecolor is None:
facecolor = rcParams['figure.facecolor']
if edgecolor is None:
edgecolor = rcParams['figure.edgecolor']
allnums = get_fignums()
next_num = max(allnums) + 1 if allnums else 1
figLabel = ''
if num is None:
num = next_num
elif isinstance(num, str):
figLabel = num
allLabels = get_figlabels()
if figLabel not in allLabels:
if figLabel == 'all':
cbook._warn_external(
"close('all') closes all existing figures")
num = next_num
else:
inum = allLabels.index(figLabel)
num = allnums[inum]
else:
num = int(num) # crude validation of num argument
figManager = _pylab_helpers.Gcf.get_fig_manager(num)
if figManager is None:
max_open_warning = rcParams['figure.max_open_warning']
if len(allnums) >= max_open_warning >= 1:
cbook._warn_external(
"More than %d figures have been opened. Figures "
"created through the pyplot interface "
"(`matplotlib.pyplot.figure`) are retained until "
"explicitly closed and may consume too much memory. "
"(To control this warning, see the rcParam "
"`figure.max_open_warning`)." %
max_open_warning, RuntimeWarning)
if get_backend().lower() == 'ps':
dpi = 72
figManager = new_figure_manager(num, figsize=figsize,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
frameon=frameon,
FigureClass=FigureClass,
**kwargs)
if figLabel:
figManager.set_window_title(figLabel)
figManager.canvas.figure.set_label(figLabel)
# make this figure current on button press event
def make_active(event):
_pylab_helpers.Gcf.set_active(figManager)
cid = figManager.canvas.mpl_connect('button_press_event', make_active)
figManager._cidgcf = cid
_pylab_helpers.Gcf.set_active(figManager)
fig = figManager.canvas.figure
fig.number = num
# make sure backends (inline) that we don't ship that expect this
# to be called in plotting commands to make the figure call show
# still work. There is probably a better way to do this in the
# FigureManager base class.
if matplotlib.is_interactive():
draw_if_interactive()
if _INSTALL_FIG_OBSERVER:
fig.stale_callback = _auto_draw_if_interactive
if clear:
figManager.canvas.figure.clear()
return figManager.canvas.figure
|
def figure(num=None, # autoincrement if None, else integer from 1-N
figsize=None, # defaults to rc figure.figsize
dpi=None, # defaults to rc figure.dpi
facecolor=None, # defaults to rc figure.facecolor
edgecolor=None, # defaults to rc figure.edgecolor
frameon=True,
FigureClass=Figure,
clear=False,
**kwargs
):
"""
Create a new figure.
Parameters
----------
num : integer or string, optional, default: None
If not provided, a new figure will be created, and the figure number
will be incremented. The figure objects holds this number in a `number`
attribute.
If num is provided, and a figure with this id already exists, make
it active, and returns a reference to it. If this figure does not
exists, create it and returns it.
If num is a string, the window title will be set to this figure's
`num`.
figsize : (float, float), optional, default: None
width, height in inches. If not provided, defaults to
:rc:`figure.figsize` = ``[6.4, 4.8]``.
dpi : integer, optional, default: None
resolution of the figure. If not provided, defaults to
:rc:`figure.dpi` = ``100``.
facecolor : color spec
the background color. If not provided, defaults to
:rc:`figure.facecolor` = ``'w'``.
edgecolor : color spec
the border color. If not provided, defaults to
:rc:`figure.edgecolor` = ``'w'``.
frameon : bool, optional, default: True
If False, suppress drawing the figure frame.
FigureClass : subclass of `~matplotlib.figure.Figure`
Optionally use a custom `.Figure` instance.
clear : bool, optional, default: False
If True and the figure already exists, then it is cleared.
Returns
-------
figure : `~matplotlib.figure.Figure`
The `.Figure` instance returned will also be passed to
new_figure_manager in the backends, which allows to hook custom
`.Figure` classes into the pyplot interface. Additional kwargs will be
passed to the `.Figure` init function.
Notes
-----
If you are creating many figures, make sure you explicitly call
:func:`.pyplot.close` on the figures you are not using, because this will
enable pyplot to properly clean up the memory.
`~matplotlib.rcParams` defines the default values, which can be modified
in the matplotlibrc file.
"""
if figsize is None:
figsize = rcParams['figure.figsize']
if dpi is None:
dpi = rcParams['figure.dpi']
if facecolor is None:
facecolor = rcParams['figure.facecolor']
if edgecolor is None:
edgecolor = rcParams['figure.edgecolor']
allnums = get_fignums()
next_num = max(allnums) + 1 if allnums else 1
figLabel = ''
if num is None:
num = next_num
elif isinstance(num, str):
figLabel = num
allLabels = get_figlabels()
if figLabel not in allLabels:
if figLabel == 'all':
cbook._warn_external(
"close('all') closes all existing figures")
num = next_num
else:
inum = allLabels.index(figLabel)
num = allnums[inum]
else:
num = int(num) # crude validation of num argument
figManager = _pylab_helpers.Gcf.get_fig_manager(num)
if figManager is None:
max_open_warning = rcParams['figure.max_open_warning']
if len(allnums) >= max_open_warning >= 1:
cbook._warn_external(
"More than %d figures have been opened. Figures "
"created through the pyplot interface "
"(`matplotlib.pyplot.figure`) are retained until "
"explicitly closed and may consume too much memory. "
"(To control this warning, see the rcParam "
"`figure.max_open_warning`)." %
max_open_warning, RuntimeWarning)
if get_backend().lower() == 'ps':
dpi = 72
figManager = new_figure_manager(num, figsize=figsize,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
frameon=frameon,
FigureClass=FigureClass,
**kwargs)
if figLabel:
figManager.set_window_title(figLabel)
figManager.canvas.figure.set_label(figLabel)
# make this figure current on button press event
def make_active(event):
_pylab_helpers.Gcf.set_active(figManager)
cid = figManager.canvas.mpl_connect('button_press_event', make_active)
figManager._cidgcf = cid
_pylab_helpers.Gcf.set_active(figManager)
fig = figManager.canvas.figure
fig.number = num
# make sure backends (inline) that we don't ship that expect this
# to be called in plotting commands to make the figure call show
# still work. There is probably a better way to do this in the
# FigureManager base class.
if matplotlib.is_interactive():
draw_if_interactive()
if _INSTALL_FIG_OBSERVER:
fig.stale_callback = _auto_draw_if_interactive
if clear:
figManager.canvas.figure.clear()
return figManager.canvas.figure
|
29,741 |
def _ffmpeg_call(infile, output, fmt='f32le', sample_rate=None, num_channels=1,
skip=None, max_len=None, cmd='ffmpeg',
rg_mode=None, rg_preamp_db=0.0):
"""
Create a sequence of strings indicating ffmpeg how to be called as well as
the parameters necessary to decode the given input (file) to the given
format, at the given offset and for the given length to the given output.
Parameters
----------
infile : str
Name of the audio sound file to decode.
output : str
Where to decode to.
fmt : {'f32le', 's16le'}, optional
Format of the samples:
- 'f32le' for float32, little-endian,
- 's16le' for signed 16-bit int, little-endian.
sample_rate : int, optional
Sample rate to re-sample the signal to (if set) [Hz].
num_channels : int, optional
Number of channels to reduce the signal to.
skip : float, optional
Number of seconds to skip at beginning of file.
max_len : float, optional
Maximum length in seconds to decode.
cmd : {'ffmpeg','avconv'}, optional
Decoding command (defaults to ffmpeg, alternatively supports avconv).
rg_mode : {'track','album', None}, optional
Specify the ReplayGain volume-levelling mode (None to disable).
rg_preamp_db : float, optional
Increase the volume by this many dB after applying ReplayGain tags.
Returns
-------
list
ffmpeg call.
Notes
-----
'avconv' rounds decoding positions and decodes in blocks of 4096 length
resulting in incorrect start and stop positions. Thus it should only be
used to decode complete files.
"""
# Note: avconv rounds decoding positions and decodes in blocks of 4096
# length resulting in incorrect start and stop positions
if cmd == 'avconv' and skip is not None and max_len is not None:
raise RuntimeError('avconv has a bug, which results in wrong audio '
'slices! Decode the audio files to .wav first or '
'use ffmpeg.')
# input type handling
if isinstance(infile, Signal):
in_fmt = _ffmpeg_fmt(infile.dtype)
in_ac = str(int(infile.num_channels))
in_ar = str(int(infile.sample_rate))
infile = str("pipe:0")
else:
infile = str(infile)
# general options
call = [cmd, "-v", "quiet", "-y"]
# input options
if skip:
# use "%f" to avoid scientific float notation
call.extend(["-ss", "%f" % float(skip)])
# if we decode from STDIN, the format must be specified
if infile == "pipe:0":
call.extend(["-f", in_fmt, "-ac", in_ac, "-ar", in_ar])
call.extend(["-i", infile])
if rg_mode:
audio_filter = ("volume=replaygain=%s:replaygain_preamp=%.1f"
% (rg_mode, rg_preamp_db))
call.extend(["-af", audio_filter])
# output options
call.extend(["-f", str(fmt)])
if max_len:
# use "%f" to avoid scientific float notation
call.extend(["-t", "%f" % float(max_len)])
# output options
if num_channels:
call.extend(["-ac", str(int(num_channels))])
if sample_rate:
call.extend(["-ar", str(int(sample_rate))])
call.append(output)
return call
|
def _ffmpeg_call(infile, output, fmt='f32le', sample_rate=None, num_channels=1,
skip=None, max_len=None, cmd='ffmpeg',
replaygain_mode=None, replaygain_preamp=0.):
"""
Create a sequence of strings indicating ffmpeg how to be called as well as
the parameters necessary to decode the given input (file) to the given
format, at the given offset and for the given length to the given output.
Parameters
----------
infile : str
Name of the audio sound file to decode.
output : str
Where to decode to.
fmt : {'f32le', 's16le'}, optional
Format of the samples:
- 'f32le' for float32, little-endian,
- 's16le' for signed 16-bit int, little-endian.
sample_rate : int, optional
Sample rate to re-sample the signal to (if set) [Hz].
num_channels : int, optional
Number of channels to reduce the signal to.
skip : float, optional
Number of seconds to skip at beginning of file.
max_len : float, optional
Maximum length in seconds to decode.
cmd : {'ffmpeg','avconv'}, optional
Decoding command (defaults to ffmpeg, alternatively supports avconv).
rg_mode : {'track','album', None}, optional
Specify the ReplayGain volume-levelling mode (None to disable).
rg_preamp_db : float, optional
Increase the volume by this many dB after applying ReplayGain tags.
Returns
-------
list
ffmpeg call.
Notes
-----
'avconv' rounds decoding positions and decodes in blocks of 4096 length
resulting in incorrect start and stop positions. Thus it should only be
used to decode complete files.
"""
# Note: avconv rounds decoding positions and decodes in blocks of 4096
# length resulting in incorrect start and stop positions
if cmd == 'avconv' and skip is not None and max_len is not None:
raise RuntimeError('avconv has a bug, which results in wrong audio '
'slices! Decode the audio files to .wav first or '
'use ffmpeg.')
# input type handling
if isinstance(infile, Signal):
in_fmt = _ffmpeg_fmt(infile.dtype)
in_ac = str(int(infile.num_channels))
in_ar = str(int(infile.sample_rate))
infile = str("pipe:0")
else:
infile = str(infile)
# general options
call = [cmd, "-v", "quiet", "-y"]
# input options
if skip:
# use "%f" to avoid scientific float notation
call.extend(["-ss", "%f" % float(skip)])
# if we decode from STDIN, the format must be specified
if infile == "pipe:0":
call.extend(["-f", in_fmt, "-ac", in_ac, "-ar", in_ar])
call.extend(["-i", infile])
if rg_mode:
audio_filter = ("volume=replaygain=%s:replaygain_preamp=%.1f"
% (rg_mode, rg_preamp_db))
call.extend(["-af", audio_filter])
# output options
call.extend(["-f", str(fmt)])
if max_len:
# use "%f" to avoid scientific float notation
call.extend(["-t", "%f" % float(max_len)])
# output options
if num_channels:
call.extend(["-ac", str(int(num_channels))])
if sample_rate:
call.extend(["-ar", str(int(sample_rate))])
call.append(output)
return call
|
43,627 |
def decompose_hamiltonian(H, hide_identity=False):
"""Decomposes a Hermitian matrix into a linear combination of Pauli operators.
Args:
H (array[complex]): an Hermitian matrix of dimension :math:`2^n\times 2^n`
Keyword Args:
hide_identity (bool): always show ~.Identity observables in the results
Returns:
tuple[list[float], list[~.Observable]]: Returns a list of tensor products of PennyLane Pauli observables, as
well as the corresponding coefficients for each tensor product.
**Example:**
We can use this function to compute the Pauli operator decomposition of an arbitrary Hermitian
matrix:
>>> A = np.array([[-2, -2+1j, -2, -2], [-2-1j, 0, 0, -1], [-2, 0, -2, -1], [-2, -1, -1, 0]])
>>> coeffs, obs_list = decompose_hamiltonian(A)
>>> coeffs
[-1.0, -1.5, -0.5, -1.0, -1.5, -1.0, -0.5, 1.0, -0.5, -0.5]
We can use the output coefficients and tensor Pauli terms to construct a :class:`~.Hamiltonian`:
>>> H = qml.Hamiltonian(coeffs, obs_list)
>>> print(H)
(-1.0) [I0 I1]
+ (-1.5) [X1]
+ (-0.5) [Y1]
+ (-1.0) [Z1]
+ (-1.5) [X0]
+ (-1.0) [X0 X1]
+ (-0.5) [X0 Z1]
+ (1.0) [Y0 Y1]
+ (-0.5) [Z0 X1]
+ (-0.5) [Z0 Y1]
This Hamiltonian can then be used in defining VQE problems using :class:`~VQECost`.
"""
n = int(np.log2(len(H)))
N = 2 ** n
if len(H) - N != 0:
raise ValueError("Hamiltonian should be in the form (n^2 x n^2), for any n>=1")
if not np.allclose(H, H.conj().T):
raise ValueError("The Hamiltonian is not Hermitian")
paulis = [qml.Identity, qml.PauliX, qml.PauliY, qml.PauliZ]
obs = []
coeffs = []
for term in itertools.product(paulis, repeat=n):
matrices = [i._matrix() for i in term]
coeff = np.trace(functools.reduce(np.kron, matrices) @ H) / N
coeff = np.real_if_close(coeff).item()
if not np.allclose(coeff, 0):
coeffs.append(coeff)
if not all(t is qml.Identity for t in term):
obs.append(
functools.reduce(
matmul,
[
t(i)
for i, t in enumerate(term)
if t is not qml.Identity or not hide_identity
],
)
)
else:
obs.append(functools.reduce(matmul, [t(i) for i, t in enumerate(term)]))
return coeffs, obs
|
def decompose_hamiltonian(H, hide_identity=False):
r"""Decomposes a Hermitian matrix into a linear combination of Pauli operators.
Args:
H (array[complex]): an Hermitian matrix of dimension :math:`2^n\times 2^n`
Keyword Args:
hide_identity (bool): always show ~.Identity observables in the results
Returns:
tuple[list[float], list[~.Observable]]: Returns a list of tensor products of PennyLane Pauli observables, as
well as the corresponding coefficients for each tensor product.
**Example:**
We can use this function to compute the Pauli operator decomposition of an arbitrary Hermitian
matrix:
>>> A = np.array([[-2, -2+1j, -2, -2], [-2-1j, 0, 0, -1], [-2, 0, -2, -1], [-2, -1, -1, 0]])
>>> coeffs, obs_list = decompose_hamiltonian(A)
>>> coeffs
[-1.0, -1.5, -0.5, -1.0, -1.5, -1.0, -0.5, 1.0, -0.5, -0.5]
We can use the output coefficients and tensor Pauli terms to construct a :class:`~.Hamiltonian`:
>>> H = qml.Hamiltonian(coeffs, obs_list)
>>> print(H)
(-1.0) [I0 I1]
+ (-1.5) [X1]
+ (-0.5) [Y1]
+ (-1.0) [Z1]
+ (-1.5) [X0]
+ (-1.0) [X0 X1]
+ (-0.5) [X0 Z1]
+ (1.0) [Y0 Y1]
+ (-0.5) [Z0 X1]
+ (-0.5) [Z0 Y1]
This Hamiltonian can then be used in defining VQE problems using :class:`~VQECost`.
"""
n = int(np.log2(len(H)))
N = 2 ** n
if len(H) - N != 0:
raise ValueError("Hamiltonian should be in the form (n^2 x n^2), for any n>=1")
if not np.allclose(H, H.conj().T):
raise ValueError("The Hamiltonian is not Hermitian")
paulis = [qml.Identity, qml.PauliX, qml.PauliY, qml.PauliZ]
obs = []
coeffs = []
for term in itertools.product(paulis, repeat=n):
matrices = [i._matrix() for i in term]
coeff = np.trace(functools.reduce(np.kron, matrices) @ H) / N
coeff = np.real_if_close(coeff).item()
if not np.allclose(coeff, 0):
coeffs.append(coeff)
if not all(t is qml.Identity for t in term):
obs.append(
functools.reduce(
matmul,
[
t(i)
for i, t in enumerate(term)
if t is not qml.Identity or not hide_identity
],
)
)
else:
obs.append(functools.reduce(matmul, [t(i) for i, t in enumerate(term)]))
return coeffs, obs
|
1,308 |
def test_bagging_classifier_voting():
# Test BaggingClassifier when base_estimator doesn't define predict_proba
A = np.random.rand(10, 4)
Y = np.random.randint(2, size=10, dtype=np.bool)
bagging_classifier = BaggingClassifier(DummyVoteClassifier())
bagging_classifier.fit(A, Y)
# All ensemble members predict True; BaggingClassifier should predict True
assert(bagging_classifier.predict(A).all())
|
def test_bagging_classifier_voting():
# Test BaggingClassifier when base_estimator doesn't define predict_proba
A = np.random.rand(10, 4)
Y = np.random.randint(2, size=10, dtype=np.bool)
bagging_classifier = BaggingClassifier(DummyVoteClassifier())
bagging_classifier.fit(A, Y)
assert bagging_classifier.predict(X).all()
|
31,857 |
def ignore_ransomware_anomaly_command(client: Client, args: Dict[str, Any]) -> str:
"""Ignore detected anomalous object on Helios.
:type client: ``Client``
:param Client: cohesity helios client to use.
:type args: ``Dict[str, Any]``
:param args: Dictionary with ignore anomaly parameters.
:return: success message of the ignore anomaly operation.
:rtype: ``str``
"""
# Filter ransomware alert for given object name.
alert_id = ''
object_name = args.get('object_name')
demisto.debug("Performing ignore anomaly operation for object {name}".format(name=object_name))
resp = client.get_ransomware_alerts()
for alert in resp:
property_dict = _get_property_dict(alert['propertyList'])
if property_dict.get('object', "") == object_name:
alert_id = alert.get('id')
if alert_id == '':
raise ValueError('No anomalous object found by given name')
# Suppress ransomware alert.
client.suppress_ransomware_alert_by_id(alert_id)
return "Ignored object {name}".format(name=object_name)
|
def ignore_ransomware_anomaly_command(client: Client, args: Dict[str, Any]) -> str:
"""Ignore detected anomalous object on Helios.
:type client: ``Client``
:param Client: cohesity helios client to use.
:type args: ``Dict[str, Any]``
:param args: Dictionary with ignore anomaly parameters.
:return: success message of the ignore anomaly operation.
:rtype: ``str``
"""
# Filter ransomware alert for given object name.
alert_id = ''
object_name = args.get('object_name')
demisto.debug("Performing ignore anomaly operation for object {name}".format(name=object_name))
resp = client.get_ransomware_alerts()
for alert in resp:
property_dict = _get_property_dict(alert['propertyList'])
if property_dict.get('object', "") == object_name:
alert_id = alert.get('id')
if alert_id == '':
raise ValueError('No anomalous object found by given name')
# Suppress ransomware alert.
client.suppress_ransomware_alert_by_id(alert_id)
return f"Ignored object {object_name}."
|
31,948 |
def fetch_incidents():
last_run = demisto.getLastRun()
if 'last_fetch_time' in last_run:
last_fetch_time = datetime.strptime(last_run['last_fetch_time'], DATETIME_FORMAT)
demisto.info(f'Found last run, fetching new alerts from {last_fetch_time}')
else:
days_back = int(demisto.params().get('first_fetch_days', DEFAULT_DAYS_BACK))
if days_back > MAX_DAYS_BACK:
demisto.info(f'Days back({days_back}) is larger than the maximum, setting to {MAX_DAYS_BACK}')
days_back = MAX_DAYS_BACK
last_fetch_time = datetime.now() - timedelta(days=days_back)
demisto.info(f'First run, fetching alerts from {last_fetch_time}')
max_incidents_to_return = int(demisto.params().get('max_incidents', DEFAULT_INCIDENTS))
if max_incidents_to_return > MAX_INCIDENTS:
demisto.info(f'Max incidents({max_incidents_to_return}) is larger than the maximum, setting to {MAX_INCIDENTS}')
max_incidents_to_return = MAX_INCIDENTS
sixgill_alerts_client = SixgillActionableAlertClient(client_id=demisto.params()['client_id'],
client_secret=demisto.params()['client_secret'],
channel_id=CHANNEL_CODE,
logger=demisto,
session=SESSION,
verify=VERIFY)
filter_alerts_kwargs = get_incident_init_params()
incidents = []
items = sixgill_alerts_client.get_actionable_alerts_bulk(limit=MAX_INCIDENTS, **filter_alerts_kwargs)
newest_incident_date = datetime.strptime(items[0].get('date'), DATETIME_FORMAT)
offset = 0
items_to_add = []
if newest_incident_date > last_fetch_time:
# finding all new alerts since last fetch time
while items:
for item in items:
if datetime.strptime(item.get('date'), DATETIME_FORMAT) > last_fetch_time:
items_to_add.append(item)
if len(items_to_add) - offset == len(items):
offset += len(items)
items = sixgill_alerts_client.get_actionable_alerts_bulk(limit=MAX_INCIDENTS, offset=offset,
**filter_alerts_kwargs)
else:
items = []
demisto.info(f'Found {len(items_to_add)} new alerts since {last_fetch_time}')
# getting more info about oldest ~max_incidents_to_return(can be more because of sub alerts)
if len(items_to_add):
items_to_add.reverse()
newest_incident_date = items_to_add[-1].get('date')
for item in items_to_add:
item_info = sixgill_alerts_client.get_actionable_alert(actionable_alert_id=item.get('id'))
item_info['date'] = item.get('date')
new_incidents = item_to_incident(item_info, sixgill_alerts_client)
incidents.extend(new_incidents)
if len(incidents) >= max_incidents_to_return:
newest_incident_date = item.get('date')
break
demisto.info(f'Adding {len(incidents)} to demisto')
demisto.incidents(incidents)
if len(incidents):
demisto.info(f'Update last fetch time to: {newest_incident_date}')
demisto.setLastRun({
'last_fetch_time': newest_incident_date
})
|
def fetch_incidents():
last_run = demisto.getLastRun()
if 'last_fetch_time' in last_run:
last_fetch_time = datetime.strptime(last_run['last_fetch_time'], DATETIME_FORMAT)
demisto.info(f'Found last run, fetching new alerts from {last_fetch_time}')
else:
days_back = int(demisto.params().get('first_fetch_days', DEFAULT_DAYS_BACK))
if days_back > MAX_DAYS_BACK:
demisto.info(f'Days back({days_back}) is larger than the maximum, setting to {MAX_DAYS_BACK}')
days_back = MAX_DAYS_BACK
last_fetch_time = datetime.now() - timedelta(days=days_back)
demisto.info(f'First run, fetching alerts from {last_fetch_time}')
max_incidents_to_return = int(demisto.params().get('max_fetch', DEFAULT_INCIDENTS))
if max_incidents_to_return > MAX_INCIDENTS:
demisto.info(f'Max incidents({max_incidents_to_return}) is larger than the maximum, setting to {MAX_INCIDENTS}')
max_incidents_to_return = MAX_INCIDENTS
sixgill_alerts_client = SixgillActionableAlertClient(client_id=demisto.params()['client_id'],
client_secret=demisto.params()['client_secret'],
channel_id=CHANNEL_CODE,
logger=demisto,
session=SESSION,
verify=VERIFY)
filter_alerts_kwargs = get_incident_init_params()
incidents = []
items = sixgill_alerts_client.get_actionable_alerts_bulk(limit=MAX_INCIDENTS, **filter_alerts_kwargs)
newest_incident_date = datetime.strptime(items[0].get('date'), DATETIME_FORMAT)
offset = 0
items_to_add = []
if newest_incident_date > last_fetch_time:
# finding all new alerts since last fetch time
while items:
for item in items:
if datetime.strptime(item.get('date'), DATETIME_FORMAT) > last_fetch_time:
items_to_add.append(item)
if len(items_to_add) - offset == len(items):
offset += len(items)
items = sixgill_alerts_client.get_actionable_alerts_bulk(limit=MAX_INCIDENTS, offset=offset,
**filter_alerts_kwargs)
else:
items = []
demisto.info(f'Found {len(items_to_add)} new alerts since {last_fetch_time}')
# getting more info about oldest ~max_incidents_to_return(can be more because of sub alerts)
if len(items_to_add):
items_to_add.reverse()
newest_incident_date = items_to_add[-1].get('date')
for item in items_to_add:
item_info = sixgill_alerts_client.get_actionable_alert(actionable_alert_id=item.get('id'))
item_info['date'] = item.get('date')
new_incidents = item_to_incident(item_info, sixgill_alerts_client)
incidents.extend(new_incidents)
if len(incidents) >= max_incidents_to_return:
newest_incident_date = item.get('date')
break
demisto.info(f'Adding {len(incidents)} to demisto')
demisto.incidents(incidents)
if len(incidents):
demisto.info(f'Update last fetch time to: {newest_incident_date}')
demisto.setLastRun({
'last_fetch_time': newest_incident_date
})
|
31,575 |
def fetch_records(client: Client, url_suffix, prefix, key, params):
results = fetch_record_command(client, url_suffix, prefix, key, params, None)
return_results(results)
|
def fetch_records(client: Client, url_suffix, prefix, key, params):
results = fetch_record_command(client, url_suffix, prefix, key, params)
return_results(results)
|
3,022 |
def interpolate_1d_fill(
values,
method="pad",
axis=0,
limit=None,
limit_area=None,
fill_value=None,
dtype=None,
):
"""
This is a 1D-versoin of `interpolate_2d`, which is used for methods `pad`
and `backfill` when interpolating. This 1D-version is necessary to be
able to handle kwarg `limit_area` via the function
` _derive_indices_of_nans_to_preserve`. It is used the same way as the
1D-interpolation functions which are based on scipy-interpolation, i.e.
via np.apply_along_axis.
"""
if method == "pad":
limit_direction = "forward"
elif method == "backfill":
limit_direction = "backward"
else:
raise ValueError("`method` must be either 'pad' or 'backfill'.")
orig_values = values
yvalues = values
invalid = isna(yvalues)
valid = ~invalid
if values.ndim > 1:
raise AssertionError("This only works with 1D data.")
if fill_value is None:
mask = None
else: # todo create faster fill func without masking
mask = mask_missing(values, fill_value)
preserve_nans = _derive_indices_of_nans_to_preserve(
yvalues=yvalues,
valid=valid,
invalid=invalid,
limit=limit,
limit_area=limit_area,
limit_direction=limit_direction,
)
method = clean_fill_method(method)
if method == "pad":
values = pad_1d(values, limit=limit, mask=mask, dtype=dtype)
else:
values = backfill_1d(values, limit=limit, mask=mask, dtype=dtype)
if orig_values.dtype.kind == "M":
# convert float back to datetime64
values = values.astype(orig_values.dtype)
values[preserve_nans] = fill_value
return values
|
def interpolate_1d_fill(
values,
method="pad",
axis=0,
limit=None,
limit_area: Optional[str] = None,
fill_value=None,
dtype=None,
):
"""
This is a 1D-versoin of `interpolate_2d`, which is used for methods `pad`
and `backfill` when interpolating. This 1D-version is necessary to be
able to handle kwarg `limit_area` via the function
` _derive_indices_of_nans_to_preserve`. It is used the same way as the
1D-interpolation functions which are based on scipy-interpolation, i.e.
via np.apply_along_axis.
"""
if method == "pad":
limit_direction = "forward"
elif method == "backfill":
limit_direction = "backward"
else:
raise ValueError("`method` must be either 'pad' or 'backfill'.")
orig_values = values
yvalues = values
invalid = isna(yvalues)
valid = ~invalid
if values.ndim > 1:
raise AssertionError("This only works with 1D data.")
if fill_value is None:
mask = None
else: # todo create faster fill func without masking
mask = mask_missing(values, fill_value)
preserve_nans = _derive_indices_of_nans_to_preserve(
yvalues=yvalues,
valid=valid,
invalid=invalid,
limit=limit,
limit_area=limit_area,
limit_direction=limit_direction,
)
method = clean_fill_method(method)
if method == "pad":
values = pad_1d(values, limit=limit, mask=mask, dtype=dtype)
else:
values = backfill_1d(values, limit=limit, mask=mask, dtype=dtype)
if orig_values.dtype.kind == "M":
# convert float back to datetime64
values = values.astype(orig_values.dtype)
values[preserve_nans] = fill_value
return values
|
47,199 |
def get_list_of_files(
path_or_repo: Union[str, os.PathLike],
revision: Optional[str] = None,
use_auth_token: Optional[Union[bool, str]] = None,
) -> List[str]:
"""
Gets the list of files inside :obj:`path_or_repo`.
Args:
path_or_repo (:obj:`str` or :obj:`os.PathLike`):
Can be either the id of a repo on huggingface.co or a path to a `directory`.
revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
use_auth_token (:obj:`str` or `bool`, `optional`):
The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token
generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`).
Returns:
:obj:`List[str]`: The list of files available in :obj:`path_or_repo`.
"""
path_or_repo = str(path_or_repo)
# If path_or_repo is a folder, we just return what is inside (subdirectories included).
if os.path.isdir(path_or_repo):
list_of_files = []
for path, dir_names, file_names in os.walk(path_or_repo):
list_of_files.extend([os.path.join(path, f) for f in file_names])
return list_of_files
# Can't grab the files if we are on offline mode.
if is_offline_mode():
return []
# Otherwise we grab the token and use the model_info method.
if isinstance(use_auth_token, str):
token = use_auth_token
elif use_auth_token is True:
token = HfFolder.get_token()
else:
token = None
model_info = HfApi(endpoint=HUGGINGFACE_CO_RESOLVE_ENDPOINT).model_info(
path_or_repo, revision=revision, token=token
)
return [f.rfilename for f in model_info.siblings]
|
def get_list_of_files(
path_or_repo: Union[str, os.PathLike],
revision: Optional[str] = None,
use_auth_token: Optional[Union[bool, str]] = None,
) -> List[str]:
"""
Gets the list of files inside :obj:`path_or_repo`.
Args:
path_or_repo (:obj:`str` or :obj:`os.PathLike`):
Can be either the id of a repo on huggingface.co or a path to a `directory`.
revision (:obj:`str`, `optional`, defaults to :obj:`"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
use_auth_token (:obj:`str` or `bool`, `optional`):
The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token
generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`).
Returns:
:obj:`List[str]`: The list of files available in :obj:`path_or_repo`.
"""
path_or_repo = str(path_or_repo)
# If path_or_repo is a folder, we just return what is inside (subdirectories included).
if os.path.isdir(path_or_repo):
list_of_files = []
for path, dir_names, file_names in os.walk(path_or_repo):
list_of_files.extend([os.path.join(path, f) for f in file_names])
return list_of_files
# Can't grab the files if we are on offline mode.
if is_offline_mode():
return []
# Otherwise we grab the token and use the model_info method.
if isinstance(use_auth_token, str):
token = use_auth_token
elif use_auth_token is True:
token = HfFolder.get_token()
else:
token = None
model_info = HfApi(endpoint=HUGGINGFACE_CO_RESOLVE_ENDPOINT).model_info(
path_or_repo, revision=revision, token=token
)
return [f.rfilename for f in model_info.siblings]
|
42,465 |
def fstring_contains_expr(s: str) -> bool:
return any(True for _ in iter_fexpr_spans(s))
|
def fstring_contains_expr(s: str) -> bool:
return any(iter_fexpr_spans(s))
|
57,830 |
def main():
try:
recipients = collect_campaign_recipients()
update_campaign_email_to_field(recipients)
except Exception as e:
return_error(f'Failed to execute CollectCampaignRecipients. Error: {str(e)}')
|
def main():
try:
args = demisto.args()
recipients = collect_campaign_recipients(args)
update_campaign_email_to_field(recipients)
except Exception as e:
return_error(f'Failed to execute CollectCampaignRecipients. Error: {str(e)}')
|
20,543 |
def main(argv=None):
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_loglevel(verbose=verbose)
# Default params
param = Param()
# Get parser info
fname_data = arguments.i
fname_mask = arguments.m
fname_mask_noise = arguments.m_noise
method = arguments.method
file_name = arguments.o
# Check parameters
if method in ['diff', 'single']:
if not fname_mask:
raise SCTArgumentParser.error(parser, f"You need to provide a mask with -method {method}.")
# Load data
im_data = Image(fname_data)
data = im_data.data
dim = len(data.shape)
if fname_mask:
mask = Image(fname_mask).data
# Check dimensionality
if method in ['diff', 'mult']:
if dim != 4:
raise ValueError(f"Input data dimension: {dim}. Input dimension for this method should be 4.")
if method in ['single']:
if dim not in [3, 4]:
raise ValueError(f"Input data dimension: {dim}. Input dimension for this method should be 3 or 4.")
# Check dimensionality of mask
if fname_mask:
if len(mask.shape) != 3:
raise ValueError(f"Input mask dimension: {dim}. Input dimension for the mask should be 3.")
# Retrieve selected volumes
index_vol = parse_num_list(arguments.vol)
if not index_vol:
if method in ['diff', 'mult']:
index_vol = range(data.shape[3])
elif method in ['single']:
index_vol = [0]
# Compute SNR
# NB: "time" is assumed to be the 4th dimension of the variable "data"
if method == 'mult':
# Compute mean and STD across time
data_mean = np.mean(data[:, :, :, index_vol], axis=3)
data_std = np.std(data[:, :, :, index_vol], axis=3, ddof=1)
# Generate mask where std is different from 0
mask_std_nonzero = np.where(data_std > param.almost_zero)
snr_map = np.zeros_like(data_mean)
snr_map[mask_std_nonzero] = data_mean[mask_std_nonzero] / data_std[mask_std_nonzero]
# Output SNR map
fname_snr = add_suffix(fname_data, '_SNR-' + method)
im_snr = empty_like(im_data)
im_snr.data = snr_map
im_snr.save(fname_snr, dtype=np.float32)
# Output non-zero mask
fname_stdnonzero = add_suffix(fname_data, '_mask-STD-nonzero' + method)
im_stdnonzero = empty_like(im_data)
data_stdnonzero = np.zeros_like(data_mean)
data_stdnonzero[mask_std_nonzero] = 1
im_stdnonzero.data = data_stdnonzero
im_stdnonzero.save(fname_stdnonzero, dtype=np.float32)
# Compute SNR in ROI
if fname_mask:
snr_roi = np.average(snr_map[mask_std_nonzero], weights=mask[mask_std_nonzero])
elif method == 'diff':
# Check user selected exactly 2 volumes for this method.
if not len(index_vol) == 2:
raise ValueError(f"Number of selected volumes: {len(index_vol)}. The method 'diff' should be used with "
f"exactly 2 volumes. You can specify the number of volumes with the flag '-vol'.")
data_2vol = np.take(data, index_vol, axis=3)
# Compute mean in ROI
data_mean = np.mean(data_2vol, axis=3)
mean_in_roi = np.average(data_mean, weights=mask)
data_sub = np.subtract(data_2vol[:, :, :, 1], data_2vol[:, :, :, 0])
_, std_in_roi = weighted_avg_and_std(data_sub, mask)
# Compute SNR, correcting for Rayleigh noise (see eq. 7 in Dietrich et al.)
snr_roi = (2 / np.sqrt(2)) * mean_in_roi / std_in_roi
elif method == 'single':
# Check that the input volume is 3D, or if it is 4D, that the user selected exactly 1 volume for this method.
if dim == 3:
data3d = data
elif dim == 4:
if not len(index_vol) == 1:
raise ValueError(f"Selected volumes: {index_vol}. The method 'single' should be used with "
f"exactly 1 volume. You can specify the number of volumes with the flag '-vol'.")
data3d = np.squeeze(data[..., index_vol])
# Check that input noise mask is provided
if fname_mask_noise:
mask_noise = Image(fname_mask_noise).data
else:
raise RuntimeError("A noise mask is mandatory with '-method single'.")
# Compute mean in ROI
mean_in_roi = np.average(data3d, weights=mask)
# Compute standard deviation in background
std_in_roi = np.std(data3d[mask_noise])
# Compute SNR, correcting for Rayleigh noise (see eq. A12 in Dietrich et al.)
snr_roi = np.sqrt((4 - np.pi) / 2) * mean_in_roi / std_in_roi
# Display result
if fname_mask:
printv('\nSNR_' + method + ' = ' + str(snr_roi) + '\n', type='info')
# Added function for text file
if file_name is not None:
with open(file_name, "w") as f:
f.write(str(snr_roi))
printv('\nFile saved to ' + file_name)
|
def main(argv=None):
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_loglevel(verbose=verbose)
# Default params
param = Param()
# Get parser info
fname_data = arguments.i
fname_mask = arguments.m
fname_mask_noise = arguments.m_noise
method = arguments.method
file_name = arguments.o
# Check parameters
if method in ['diff', 'single']:
if not fname_mask:
raise SCTArgumentParser.error(parser, f"You need to provide a mask with -method {method}.")
# Load data
im_data = Image(fname_data)
data = im_data.data
dim = len(data.shape)
if fname_mask:
mask = Image(fname_mask).data
# Check dimensionality
if method in ['diff', 'mult']:
if dim != 4:
raise ValueError(f"Input data dimension: {dim}. Input dimension for this method should be 4.")
if method in ['single']:
if dim not in [3, 4]:
raise ValueError(f"Input data dimension: {dim}. Input dimension for this method should be 3 or 4.")
# Check dimensionality of mask
if fname_mask:
if len(mask.shape) != 3:
raise ValueError(f"Input mask dimension: {dim}. Input dimension for the mask should be 3.")
# Retrieve selected volumes
index_vol = parse_num_list(arguments.vol)
if not index_vol:
if method in ['diff', 'mult']:
index_vol = range(data.shape[3])
elif method in ['single']:
index_vol = [0]
# Compute SNR
# NB: "time" is assumed to be the 4th dimension of the variable "data"
if method == 'mult':
# Compute mean and STD across time
data_mean = np.mean(data[:, :, :, index_vol], axis=3)
data_std = np.std(data[:, :, :, index_vol], axis=3, ddof=1)
# Generate mask where std is different from 0
mask_std_nonzero = np.where(data_std > param.almost_zero)
snr_map = np.zeros_like(data_mean)
snr_map[mask_std_nonzero] = data_mean[mask_std_nonzero] / data_std[mask_std_nonzero]
# Output SNR map
fname_snr = add_suffix(fname_data, '_SNR-' + method)
im_snr = empty_like(im_data)
im_snr.data = snr_map
im_snr.save(fname_snr, dtype=np.float32)
# Output non-zero mask
fname_stdnonzero = add_suffix(fname_data, '_mask-STD-nonzero' + method)
im_stdnonzero = empty_like(im_data)
data_stdnonzero = np.zeros_like(data_mean)
data_stdnonzero[mask_std_nonzero] = 1
im_stdnonzero.data = data_stdnonzero
im_stdnonzero.save(fname_stdnonzero, dtype=np.float32)
# Compute SNR in ROI
if fname_mask:
snr_roi = np.average(snr_map[mask_std_nonzero], weights=mask[mask_std_nonzero])
elif method == 'diff':
# Check user selected exactly 2 volumes for this method.
if not len(index_vol) == 2:
raise ValueError(f"Number of selected volumes: {len(index_vol)}. The method 'diff' should be used with "
f"exactly 2 volumes. You can specify the number of volumes with the flag '-vol'.")
data_2vol = np.take(data, index_vol, axis=3)
# Compute mean in ROI
data_mean = np.mean(data_2vol, axis=3)
mean_in_roi = np.average(data_mean, weights=mask)
data_sub = np.subtract(data_2vol[:, :, :, 1], data_2vol[:, :, :, 0])
_, std_in_roi = weighted_avg_and_std(data_sub, mask)
# Compute SNR, correcting for Rayleigh noise (see eq. 7 in Dietrich et al.)
snr_roi = (2 / np.sqrt(2)) * mean_in_roi / std_in_roi
elif method == 'single':
# Check that the input volume is 3D, or if it is 4D, that the user selected exactly 1 volume for this method.
if dim == 3:
data3d = data
elif dim == 4:
if not len(index_vol) == 1:
raise ValueError(f"Selected volumes: {index_vol}. The method 'single' should be used with "
f"exactly 1 volume. You can specify the number of volumes with the flag '-vol'.")
data3d = np.squeeze(data[..., index_vol])
# Check that input noise mask is provided
if fname_mask_noise:
mask_noise = Image(fname_mask_noise).data
else:
raise RuntimeError("Argument '-m-noise' must be specified when using '-method single'.")
# Compute mean in ROI
mean_in_roi = np.average(data3d, weights=mask)
# Compute standard deviation in background
std_in_roi = np.std(data3d[mask_noise])
# Compute SNR, correcting for Rayleigh noise (see eq. A12 in Dietrich et al.)
snr_roi = np.sqrt((4 - np.pi) / 2) * mean_in_roi / std_in_roi
# Display result
if fname_mask:
printv('\nSNR_' + method + ' = ' + str(snr_roi) + '\n', type='info')
# Added function for text file
if file_name is not None:
with open(file_name, "w") as f:
f.write(str(snr_roi))
printv('\nFile saved to ' + file_name)
|
42,556 |
def test_all_location_in_db(database):
"""
Test that all locations in DB deserialize to a valid Location
"""
# Query for all locations
cursor = database.conn.cursor()
locations = cursor.execute("SELECT location,seq from location")
# We deserialize, then serialize and compare the result
for location_letter, seq in locations:
deserialized_location = deserialize_location_from_db(location_letter)
assert deserialized_location.value == seq
assert Location(seq).serialize_for_db() == location_letter
location_name = deserialize_location(str(deserialized_location))
assert location_name == deserialized_location
|
def test_all_location_in_db(database):
"""
Test that all locations in DB deserialize to a valid Location
"""
# Query for all locations
cursor = database.conn.cursor()
locations = cursor.execute('SELECT location,seq from location')
# We deserialize, then serialize and compare the result
for location_letter, seq in locations:
deserialized_location = deserialize_location_from_db(location_letter)
assert deserialized_location.value == seq
assert Location(seq).serialize_for_db() == location_letter
location_name = deserialize_location(str(deserialized_location))
assert location_name == deserialized_location
|
28,021 |
def is_windows():
return sys.platform == "win32"
|
def is_windows():
return sys.platform == "win32"
|
3,865 |
def _apply_move(soln, move, seed):
"""
Apply a move to a solution to generate a neighbor solution.
Parameters
----------
soln : list of nodes
Current solution (list of nodes)
move : string
Move to be applied
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
The solution after move is applied. (A neighbor solution.)
"""
a = seed.randint(1, len(soln) - 2)
listb = list(range(1, a)) + list(range(a + 1, len(soln) - 1))
b = seed.choice(listb)
if move == "1-1":
soln[a], soln[b] = soln[b], soln[a]
elif move == "1-0":
soln.insert(b, soln.pop(a))
return soln
|
def _apply_move(soln, move, seed):
"""
Apply a move to a solution to generate a neighbor solution.
Parameters
----------
soln : list of nodes
Current solution (list of nodes)
move : string
Move to be applied
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
The solution after move is applied. (A neighbor solution.)
"""
a, b = seed.sample(range(1, len(soln) - 1), k=2)
if move == "1-1":
soln[a], soln[b] = soln[b], soln[a]
elif move == "1-0":
soln.insert(b, soln.pop(a))
return soln
|
22,481 |
def main(argv=None):
"""Main entry-point for the CLI tool."""
parser = arg_parser(argv, globals())
parser.add_argument('targets', metavar="TARGETS", default=None, help="Comma-separated packages for calculating the mulled hash.")
parser.add_argument('--hash', dest="hash", choices=["v1", "v2"], default="v2")
args = parser.parse_args()
targets = target_str_to_targets(args.targets)
image_name = v2_image_name if args.hash == 'v2' else v1_image_name
sys.stdout.write(image_name(targets))
sys.stdout.write('\n')
|
def main(argv=None):
"""Main entry-point for the CLI tool."""
parser = arg_parser(argv, globals())
parser.add_argument('targets', metavar="TARGETS", default=None, help="Comma-separated packages for calculating the mulled hash.")
parser.add_argument('--hash', dest="hash", choices=["v1", "v2"], default="v2")
args = parser.parse_args()
targets = target_str_to_targets(args.targets)
image_name = v2_image_name if args.hash == 'v2' else v1_image_name
print(image_name(targets))
|
30,569 |
def get_test_list(files_string, branch_name, two_before_ga_ver='0', conf=None, id_set=None):
"""Create a test list that should run"""
(modified_files, modified_tests_list, changed_common, is_conf_json, sample_tests, is_reputations_json,
is_indicator_json) = get_modified_files(files_string)
tests = set([])
if modified_files:
tests = find_tests_for_modified_files(modified_files, conf, id_set)
# Adding a unique test for a json file.
if is_reputations_json:
tests.add('FormattingPerformance - Test')
tests.add('reputations.json Test')
tests.add('Indicators reputation-.json Test')
if is_indicator_json:
tests.add('Test IP Indicator Fields')
for file_path in modified_tests_list:
test = collect_ids(file_path)
if test not in tests:
tests.add(test)
if is_conf_json:
tests = tests.union(get_test_from_conf(branch_name, conf))
if sample_tests: # Choosing 3 random tests for infrastructure testing
print_warning('Collecting sample tests due to: {}'.format(','.join(sample_tests)))
tests = tests.union(
get_runnable_tests(tests_num=RANDOM_TESTS_NUM, conf=conf, id_set=id_set, server_version=two_before_ga_ver))
if not tests:
if modified_files or modified_tests_list:
print_error(
"There is no test-playbook that checks the changes you've done, please make sure you write one.")
global _FAILED
_FAILED = True
elif changed_common:
print_warning('Adding 3 random tests due to: {}'.format(','.join(changed_common)))
tests = tests.union(get_runnable_tests(tests_num=RANDOM_TESTS_NUM, conf=conf, id_set=id_set,
server_version=two_before_ga_ver))
else:
print_warning("Running Sanity check only")
tests = get_runnable_tests(tests_num=RANDOM_TESTS_NUM, conf=conf, id_set=id_set,
server_version=two_before_ga_ver)
tests.add('DocumentationTest') # test with integration configured
tests.add('TestCommonPython') # test with no integration configured
elif changed_common:
tests.add('TestCommonPython')
return tests
|
def get_test_list(files_string, branch_name, two_before_ga_ver='0', conf=None, id_set=None):
"""Create a test list that should run"""
(modified_files, modified_tests_list, changed_common, is_conf_json, sample_tests, is_reputations_json,
is_indicator_json) = get_modified_files(files_string)
tests = set([])
if modified_files:
tests = find_tests_for_modified_files(modified_files, conf, id_set)
# Adding a unique test for a json file.
if is_reputations_json:
tests.add('FormattingPerformance - Test')
tests.add('reputations.json Test')
tests.add('Indicators reputation-.json Test')
if is_indicator_json:
tests.add('Test IP Indicator Fields')
for file_path in modified_tests_list:
test = collect_ids(file_path)
if test not in tests:
tests.add(test)
if is_conf_json:
tests = tests.union(get_test_from_conf(branch_name, conf))
if sample_tests: # Choosing 3 random tests for infrastructure testing
print_warning('Collecting sample tests due to: {}'.format(','.join(sample_tests)))
tests = tests.union(
get_runnable_tests(tests_num=RANDOM_TESTS_NUM, conf=conf, id_set=id_set, server_version=two_before_ga_ver))
if not tests:
if modified_files or modified_tests_list:
print_error(
"There is no test-playbook that checks the changes you've done, please make sure you write one.")
global _FAILED
_FAILED = True
elif changed_common:
print_warning('Adding 3 random tests due to: {}'.format(','.join(changed_common)))
tests = tests.union(get_runnable_tests(tests_num=RANDOM_TESTS_NUM, conf=conf, id_set=id_set,
server_version=two_before_ga_ver))
else:
print_warning("Running Sanity check only")
tests = get_runnable_tests(tests_num=RANDOM_TESTS_NUM, conf=conf, id_set=id_set,
server_version=two_before_ga_ver)
tests.add('DocumentationTest') # test with integration configured
tests.add('TestCommonPython') # test with no integration configured
if changed_common:
tests.add('TestCommonPython')
return tests
|
4,117 |
def p_c_simple_base_type(s, nonempty, templates = None):
is_basic = 0
signed = 1
longness = 0
complex = 0
module_path = []
pos = s.position()
# Handle const/volatile
is_const = is_volatile = 0
while s.sy == 'IDENT':
if s.systring == 'const':
if is_const: error(pos, "Duplicate 'const'")
is_const = 1
elif s.systring == 'volatile':
if is_volatile: error(pos, "Duplicate 'volatile'")
is_volatile = 1
else:
break
s.next()
if is_const or is_volatile:
base_type = p_c_base_type(s, nonempty=nonempty, templates=templates)
if isinstance(base_type, Nodes.MemoryViewSliceTypeNode):
# reverse order to avoid having to write "(const int)[:]"
base_type.base_type_node = Nodes.CConstOrVolatileTypeNode(pos,
base_type=base_type.base_type_node, is_const=is_const, is_volatile=is_volatile)
return base_type
return Nodes.CConstOrVolatileTypeNode(pos,
base_type=base_type, is_const=is_const, is_volatile=is_volatile)
if s.sy != 'IDENT':
error(pos, "Expected an identifier, found '%s'" % s.sy)
if looking_at_base_type(s):
#print "p_c_simple_base_type: looking_at_base_type at", s.position()
is_basic = 1
if s.sy == 'IDENT' and s.systring in special_basic_c_types:
signed, longness = special_basic_c_types[s.systring]
name = s.systring
s.next()
else:
signed, longness = p_sign_and_longness(s)
if s.sy == 'IDENT' and s.systring in basic_c_type_names:
name = s.systring
s.next()
else:
name = 'int' # long [int], short [int], long [int] complex, etc.
if s.sy == 'IDENT' and s.systring == 'complex':
complex = 1
s.next()
elif looking_at_dotted_name(s):
#print "p_c_simple_base_type: looking_at_type_name at", s.position()
name = s.systring
s.next()
while s.sy == '.':
module_path.append(name)
s.next()
name = p_ident(s)
else:
name = s.systring
s.next()
if nonempty and s.sy != 'IDENT':
# Make sure this is not a declaration of a variable or function.
if s.sy == '(':
s.next()
if (s.sy == '*' or s.sy == '**' or s.sy == '&'
or (s.sy == 'IDENT' and s.systring in calling_convention_words)):
s.put_back(u'(', u'(')
else:
s.put_back(u'(', u'(')
s.put_back(u'IDENT', name)
name = None
elif s.sy not in ('*', '**', '[', '&'):
s.put_back(u'IDENT', name)
name = None
type_node = Nodes.CSimpleBaseTypeNode(pos,
name = name, module_path = module_path,
is_basic_c_type = is_basic, signed = signed,
complex = complex, longness = longness,
templates = templates)
# declarations here.
if s.sy == '[':
if is_memoryviewslice_access(s):
type_node = p_memoryviewslice_access(s, type_node)
else:
type_node = p_buffer_or_template(s, type_node, templates)
if s.sy == '.':
s.next()
name = p_ident(s)
type_node = Nodes.CNestedBaseTypeNode(pos, base_type = type_node, name = name)
return type_node
|
def p_c_simple_base_type(s, nonempty, templates=None):
is_basic = 0
signed = 1
longness = 0
complex = 0
module_path = []
pos = s.position()
# Handle const/volatile
is_const = is_volatile = 0
while s.sy == 'IDENT':
if s.systring == 'const':
if is_const: error(pos, "Duplicate 'const'")
is_const = 1
elif s.systring == 'volatile':
if is_volatile: error(pos, "Duplicate 'volatile'")
is_volatile = 1
else:
break
s.next()
if is_const or is_volatile:
base_type = p_c_base_type(s, nonempty=nonempty, templates=templates)
if isinstance(base_type, Nodes.MemoryViewSliceTypeNode):
# reverse order to avoid having to write "(const int)[:]"
base_type.base_type_node = Nodes.CConstOrVolatileTypeNode(pos,
base_type=base_type.base_type_node, is_const=is_const, is_volatile=is_volatile)
return base_type
return Nodes.CConstOrVolatileTypeNode(pos,
base_type=base_type, is_const=is_const, is_volatile=is_volatile)
if s.sy != 'IDENT':
error(pos, "Expected an identifier, found '%s'" % s.sy)
if looking_at_base_type(s):
#print "p_c_simple_base_type: looking_at_base_type at", s.position()
is_basic = 1
if s.sy == 'IDENT' and s.systring in special_basic_c_types:
signed, longness = special_basic_c_types[s.systring]
name = s.systring
s.next()
else:
signed, longness = p_sign_and_longness(s)
if s.sy == 'IDENT' and s.systring in basic_c_type_names:
name = s.systring
s.next()
else:
name = 'int' # long [int], short [int], long [int] complex, etc.
if s.sy == 'IDENT' and s.systring == 'complex':
complex = 1
s.next()
elif looking_at_dotted_name(s):
#print "p_c_simple_base_type: looking_at_type_name at", s.position()
name = s.systring
s.next()
while s.sy == '.':
module_path.append(name)
s.next()
name = p_ident(s)
else:
name = s.systring
s.next()
if nonempty and s.sy != 'IDENT':
# Make sure this is not a declaration of a variable or function.
if s.sy == '(':
s.next()
if (s.sy == '*' or s.sy == '**' or s.sy == '&'
or (s.sy == 'IDENT' and s.systring in calling_convention_words)):
s.put_back(u'(', u'(')
else:
s.put_back(u'(', u'(')
s.put_back(u'IDENT', name)
name = None
elif s.sy not in ('*', '**', '[', '&'):
s.put_back(u'IDENT', name)
name = None
type_node = Nodes.CSimpleBaseTypeNode(pos,
name = name, module_path = module_path,
is_basic_c_type = is_basic, signed = signed,
complex = complex, longness = longness,
templates = templates)
# declarations here.
if s.sy == '[':
if is_memoryviewslice_access(s):
type_node = p_memoryviewslice_access(s, type_node)
else:
type_node = p_buffer_or_template(s, type_node, templates)
if s.sy == '.':
s.next()
name = p_ident(s)
type_node = Nodes.CNestedBaseTypeNode(pos, base_type = type_node, name = name)
return type_node
|
7,439 |
def threshold_multiotsu(image=None, classes=3, nbins=256, *, hist=None):
r"""Generate `classes`-1 threshold values to divide gray levels in `image`,
following Otsu's method for multiple classes.
The threshold values are chosen to maximize the total sum of pairwise
variances between the thresholded graylevel classes. See Notes and [1]_
for more details.
Either image or hist must be provided. If hist is provided, the actual
histogram of the image is ignored.
Parameters
----------
image : (N, M[, ..., P]) ndarray, optional
Grayscale input image.
classes : int, optional
Number of classes to be thresholded, i.e. the number of resulting
regions.
nbins : int, optional
Number of bins used to calculate the histogram. This value is ignored
for integer arrays.
hist : array, or 2-tuple of arrays, optional
Histogram from which to determine the threshold, and optionally a
corresponding array of bin center intensities. If no hist provided,
this function will compute it from the image (see notes).
Returns
-------
thresh : array
Array containing the threshold values for the desired classes.
Raises
------
ValueError
If ``image`` contains less grayscale value then the desired
number of classes.
Notes
-----
This implementation relies on a Cython function whose complexity
is :math:`O\left(\frac{Ch^{C-1}}{(C-1)!}\right)`, where :math:`h`
is the number of histogram bins and :math:`C` is the number of
classes desired.
If no hist is given, this function will make use of
`skimage.exposure.histogram`, which behaves differently than
`np.histogram`. While both allowed, use the former for consistent
behaviour.
The input image must be grayscale.
References
----------
.. [1] Liao, P-S., Chen, T-S. and Chung, P-C., "A fast algorithm for
multilevel thresholding", Journal of Information Science and
Engineering 17 (5): 713-727, 2001. Available at:
<https://ftp.iis.sinica.edu.tw/JISE/2001/200109_01.pdf>
:DOI:`10.6688/JISE.2001.17.5.1`
.. [2] Tosa, Y., "Multi-Otsu Threshold", a java plugin for ImageJ.
Available at:
<http://imagej.net/plugins/download/Multi_OtsuThreshold.java>
Examples
--------
>>> from skimage.color import label2rgb
>>> from skimage import data
>>> image = data.camera()
>>> thresholds = threshold_multiotsu(image)
>>> regions = np.digitize(image, bins=thresholds)
>>> regions_colorized = label2rgb(regions)
"""
if image is not None and image.ndim > 2 and image.shape[-1] in (3, 4):
warn(f'threshold_multiotsu is expected to work correctly only for '
f'grayscale images; image shape {image.shape} looks like '
f'that of an RGB image.')
# calculating the histogram and the probability of each gray level.
prob, bin_centers = _validate_image_histogram(image, hist, nbins,
normalize=True)
prob = prob.astype('float32', copy=False)
nvalues = np.count_nonzero(prob)
if nvalues < classes:
msg = (f'After discretization into bins, the input image has '
f'only {nvalues} different values. It cannot be thresholded '
f'in {classes} classes. If there are more unique values '
f'before discretization, try increasing the number of bins.')
raise ValueError(msg)
elif nvalues == classes:
thresh_idx = np.flatnonzero(prob)[:-1]
else:
# Get threshold indices
try:
thresh_idx = _get_multiotsu_thresh_indices_lut(prob, classes - 1)
except MemoryError:
# Don't use LUT if the number of bins is too large (if the
# image is uint16 for example): in this case, the
# allocated memory is too large.
thresh_idx = _get_multiotsu_thresh_indices(prob, classes - 1)
thresh = bin_centers[thresh_idx]
return thresh
|
def threshold_multiotsu(image=None, classes=3, nbins=256, *, hist=None):
r"""Generate `classes`-1 threshold values to divide gray levels in `image`,
following Otsu's method for multiple classes.
The threshold values are chosen to maximize the total sum of pairwise
variances between the thresholded graylevel classes. See Notes and [1]_
for more details.
Either image or hist must be provided. If hist is provided, the actual
histogram of the image is ignored.
Parameters
----------
image : (N, M[, ..., P]) ndarray, optional
Grayscale input image.
classes : int, optional
Number of classes to be thresholded, i.e. the number of resulting
regions.
nbins : int, optional
Number of bins used to calculate the histogram. This value is ignored
for integer arrays.
hist : array, or 2-tuple of arrays, optional
Histogram from which to determine the threshold, and optionally a
corresponding array of bin center intensities. If no hist provided,
this function will compute it from the image (see notes).
Returns
-------
thresh : array
Array containing the threshold values for the desired classes.
Raises
------
ValueError
If ``image`` contains less grayscale value then the desired
number of classes.
Notes
-----
This implementation relies on a Cython function whose complexity
is :math:`O\left(\frac{Ch^{C-1}}{(C-1)!}\right)`, where :math:`h`
is the number of histogram bins and :math:`C` is the number of
classes desired.
If no hist is given, this function will make use of
`skimage.exposure.histogram`, which behaves differently than
`np.histogram`. While both allowed, use the former for consistent
behaviour.
The input image must be grayscale.
References
----------
.. [1] Liao, P-S., Chen, T-S. and Chung, P-C., "A fast algorithm for
multilevel thresholding", Journal of Information Science and
Engineering 17 (5): 713-727, 2001. Available at:
<https://ftp.iis.sinica.edu.tw/JISE/2001/200109_01.pdf>
:DOI:`10.6688/JISE.2001.17.5.1`
.. [2] Tosa, Y., "Multi-Otsu Threshold", a java plugin for ImageJ.
Available at:
<http://imagej.net/plugins/download/Multi_OtsuThreshold.java>
Examples
--------
>>> from skimage.color import label2rgb
>>> from skimage import data
>>> image = data.camera()
>>> thresholds = threshold_multiotsu(image)
>>> regions = np.digitize(image, bins=thresholds)
>>> regions_colorized = label2rgb(regions)
"""
if image is not None and image.ndim > 2 and image.shape[-1] in (3, 4):
warn(f'threshold_multiotsu is expected to work correctly only for '
f'grayscale images; image shape {image.shape} looks like '
f'that of an RGB image.')
# calculating the histogram and the probability of each gray level.
prob, bin_centers = _validate_image_histogram(image, hist, nbins,
normalize=True)
prob = prob.astype('float32', copy=False)
nvalues = np.count_nonzero(prob)
if nvalues < classes:
msg = (f'After discretization into bins, the input image has '
f'only {nvalues} different values. It cannot be thresholded '
f'in {classes} classes. If there are more unique values '
f'before discretization, try increasing the number of bins (`nbins`).')
raise ValueError(msg)
elif nvalues == classes:
thresh_idx = np.flatnonzero(prob)[:-1]
else:
# Get threshold indices
try:
thresh_idx = _get_multiotsu_thresh_indices_lut(prob, classes - 1)
except MemoryError:
# Don't use LUT if the number of bins is too large (if the
# image is uint16 for example): in this case, the
# allocated memory is too large.
thresh_idx = _get_multiotsu_thresh_indices(prob, classes - 1)
thresh = bin_centers[thresh_idx]
return thresh
|
2,568 |
def _smacof_single(
dissimilarities,
metric=True,
n_components=2,
init=None,
max_iter=300,
verbose=0,
eps=1e-3,
random_state=None,
normalize=False,
):
"""Computes multidimensional scaling using SMACOF algorithm.
Parameters
----------
dissimilarities : ndarray of shape (n_samples, n_samples)
Pairwise dissimilarities between the points. Must be symmetric.
metric : bool, default=True
Compute metric or nonmetric SMACOF algorithm.
n_components : int, default=2
Number of dimensions in which to immerse the dissimilarities. If an
``init`` array is provided, this option is overridden and the shape of
``init`` is used to determine the dimensionality of the embedding
space.
init : ndarray of shape (n_samples, n_components), default=None
Starting configuration of the embedding to initialize the algorithm. By
default, the algorithm is initialized with a randomly chosen array.
max_iter : int, default=300
Maximum number of iterations of the SMACOF algorithm for a single run.
verbose : int, default=0
Level of verbosity.
eps : float, default=1e-3
Relative tolerance with respect to stress at which to declare
convergence.
random_state : int, RandomState instance or None, default=None
Determines the random number generator used to initialize the centers.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
normalize : bool, default=False
Whether use and return normed stress value (Stress-1) instead of raw
stress calculated by default. Only supported in non-metric MDS.
.. versionadded:: 1.1
Returns
-------
X : ndarray of shape (n_samples, n_components)
Coordinates of the points in a ``n_components``-space.
stress : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points).
If `normalize=True`, and `metric=False` returns Stress-1 (according to
Kruskal (1964, p. 3) value 0 indicates "perfect" fit, 0.025
excellent, 0.05 good, 0.1 fair, and 0.2 poor).
n_iter : int
The number of iterations corresponding to the best stress.
"""
dissimilarities = check_symmetric(dissimilarities, raise_exception=True)
n_samples = dissimilarities.shape[0]
random_state = check_random_state(random_state)
sim_flat = ((1 - np.tri(n_samples)) * dissimilarities).ravel()
sim_flat_w = sim_flat[sim_flat != 0]
if init is None:
# Randomly choose initial configuration
X = random_state.uniform(size=n_samples * n_components)
X = X.reshape((n_samples, n_components))
else:
# overrides the parameter p
n_components = init.shape[1]
if n_samples != init.shape[0]:
raise ValueError(
"init matrix should be of shape (%d, %d)" % (n_samples, n_components)
)
X = init
old_stress = None
ir = IsotonicRegression()
for it in range(max_iter):
# Compute distance and monotonic regression
dis = euclidean_distances(X)
if metric:
disparities = dissimilarities
else:
dis_flat = dis.ravel()
# dissimilarities with 0 are considered as missing values
dis_flat_w = dis_flat[sim_flat != 0]
# Compute the disparities using a monotonic regression
disparities_flat = ir.fit_transform(sim_flat_w, dis_flat_w)
disparities = dis_flat.copy()
disparities[sim_flat != 0] = disparities_flat
disparities = disparities.reshape((n_samples, n_samples))
disparities *= np.sqrt(
(n_samples * (n_samples - 1) / 2) / (disparities**2).sum()
)
# Compute stress
stress = ((dis.ravel() - disparities.ravel()) ** 2).sum() / 2
if normalize:
stress = np.sqrt(stress / ((disparities.ravel() ** 2).sum() / 2))
# Update X using the Guttman transform
dis[dis == 0] = 1e-5
ratio = disparities / dis
B = -ratio
B[np.arange(len(B)), np.arange(len(B))] += ratio.sum(axis=1)
X = 1.0 / n_samples * np.dot(B, X)
dis = np.sqrt((X**2).sum(axis=1)).sum()
if verbose >= 2:
print("it: %d, stress %s" % (it, stress))
if old_stress is not None:
if (old_stress - stress / dis) < eps:
if verbose:
print("breaking at iteration %d with stress %s" % (it, stress))
break
old_stress = stress / dis
return X, stress, it + 1
|
def _smacof_single(
dissimilarities,
metric=True,
n_components=2,
init=None,
max_iter=300,
verbose=0,
eps=1e-3,
random_state=None,
normalize=False,
):
"""Computes multidimensional scaling using SMACOF algorithm.
Parameters
----------
dissimilarities : ndarray of shape (n_samples, n_samples)
Pairwise dissimilarities between the points. Must be symmetric.
metric : bool, default=True
Compute metric or nonmetric SMACOF algorithm.
n_components : int, default=2
Number of dimensions in which to immerse the dissimilarities. If an
``init`` array is provided, this option is overridden and the shape of
``init`` is used to determine the dimensionality of the embedding
space.
init : ndarray of shape (n_samples, n_components), default=None
Starting configuration of the embedding to initialize the algorithm. By
default, the algorithm is initialized with a randomly chosen array.
max_iter : int, default=300
Maximum number of iterations of the SMACOF algorithm for a single run.
verbose : int, default=0
Level of verbosity.
eps : float, default=1e-3
Relative tolerance with respect to stress at which to declare
convergence.
random_state : int, RandomState instance or None, default=None
Determines the random number generator used to initialize the centers.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
normalized_stress : bool, default=False
Whether use and return normed stress value (Stress-1) instead of raw
stress calculated by default. Only supported in non-metric MDS.
.. versionadded:: 1.1
Returns
-------
X : ndarray of shape (n_samples, n_components)
Coordinates of the points in a ``n_components``-space.
stress : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points).
If `normalize=True`, and `metric=False` returns Stress-1 (according to
Kruskal (1964, p. 3) value 0 indicates "perfect" fit, 0.025
excellent, 0.05 good, 0.1 fair, and 0.2 poor).
n_iter : int
The number of iterations corresponding to the best stress.
"""
dissimilarities = check_symmetric(dissimilarities, raise_exception=True)
n_samples = dissimilarities.shape[0]
random_state = check_random_state(random_state)
sim_flat = ((1 - np.tri(n_samples)) * dissimilarities).ravel()
sim_flat_w = sim_flat[sim_flat != 0]
if init is None:
# Randomly choose initial configuration
X = random_state.uniform(size=n_samples * n_components)
X = X.reshape((n_samples, n_components))
else:
# overrides the parameter p
n_components = init.shape[1]
if n_samples != init.shape[0]:
raise ValueError(
"init matrix should be of shape (%d, %d)" % (n_samples, n_components)
)
X = init
old_stress = None
ir = IsotonicRegression()
for it in range(max_iter):
# Compute distance and monotonic regression
dis = euclidean_distances(X)
if metric:
disparities = dissimilarities
else:
dis_flat = dis.ravel()
# dissimilarities with 0 are considered as missing values
dis_flat_w = dis_flat[sim_flat != 0]
# Compute the disparities using a monotonic regression
disparities_flat = ir.fit_transform(sim_flat_w, dis_flat_w)
disparities = dis_flat.copy()
disparities[sim_flat != 0] = disparities_flat
disparities = disparities.reshape((n_samples, n_samples))
disparities *= np.sqrt(
(n_samples * (n_samples - 1) / 2) / (disparities**2).sum()
)
# Compute stress
stress = ((dis.ravel() - disparities.ravel()) ** 2).sum() / 2
if normalize:
stress = np.sqrt(stress / ((disparities.ravel() ** 2).sum() / 2))
# Update X using the Guttman transform
dis[dis == 0] = 1e-5
ratio = disparities / dis
B = -ratio
B[np.arange(len(B)), np.arange(len(B))] += ratio.sum(axis=1)
X = 1.0 / n_samples * np.dot(B, X)
dis = np.sqrt((X**2).sum(axis=1)).sum()
if verbose >= 2:
print("it: %d, stress %s" % (it, stress))
if old_stress is not None:
if (old_stress - stress / dis) < eps:
if verbose:
print("breaking at iteration %d with stress %s" % (it, stress))
break
old_stress = stress / dis
return X, stress, it + 1
|
9,691 |
def main():
"""Validate BOTMETA"""
path = '.github/BOTMETA.yml'
try:
with open(path, 'r') as f_path:
botmeta = yaml.safe_load(f_path)
except yaml.error.MarkedYAMLError as ex:
print('%s:%d:%d: YAML load failed: %s' % (path, ex.context_mark.line + 1, ex.context_mark.column + 1, re.sub(r'\s+', ' ', str(ex))))
sys.exit()
except Exception as ex:
print('%s:%d:%d: YAML load failed: %s' % (path, 0, 0, re.sub(r'\s+', ' ', str(ex))))
sys.exit()
files_schema = Any(
Schema(*string_types),
Schema({
'ignored': Any(list_string_types, *string_types),
'keywords': Any(list_string_types, *string_types),
'labels': Any(list_string_types, *string_types),
'maintainers': Any(list_string_types, *string_types),
'notified': Any(list_string_types, *string_types),
'supershipit': Any(list_string_types, *string_types),
'support': Any("core", "network", "community"),
})
)
list_dict_file_schema = [{str_type: files_schema}
for str_type in string_types]
schema = Schema({
Required('automerge'): bool,
Required('files'): Any(None, *list_dict_file_schema),
Required('macros'): dict, # Any(*list_macros_schema),
})
# Ensure schema is valid
try:
schema(botmeta)
except MultipleInvalid as ex:
for error in ex.errors:
# No way to get line numbers
print('%s:%d:%d: %s' % (path, 0, 0, humanize_error(botmeta, error)))
# Ensure botmeta is always support:core
botmeta_support = botmeta.get('files', {}).get('.github/BOTMETA.yml', {}).get('support', '')
if botmeta_support != 'core':
print('%s:%d:%d: .github/BOTMETA.yml MUST be support: core' % (path, 0, 0))
# Find all path (none-team) macros so we can substitute them
macros = botmeta.get('macros', {})
path_macros = []
team_macros = []
for macro in macros:
if macro.startswith('team_'):
team_macros.append('$'+macro)
else:
path_macros.append(macro)
# Validate files
for file in botmeta['files']:
# maintainers can be:
# implicit: $modules/command/shell.py $team_foo
# maintainer (string): maintainers: $team_foo fred steve
# maintainer (list): maintainers:
# - $team_foo
# - fred
if isinstance(botmeta.get('files', {}).get(file, ''), str):
maintainers = botmeta.get('files', {}).get(file, '').split(' ')
validate_maintainers(maintainers, team_macros, path, file)
elif botmeta.get('files', {}).get(file, '').get('maintainers', ''):
if isinstance(botmeta.get('files', {}).get(file, '').get('maintainers', ''), str):
maintainers = botmeta.get('files', {}).get(file, '').get('maintainers', '').split(' ')
if isinstance(botmeta.get('files', {}).get(file, '').get('maintainers', ''), list):
maintainers = botmeta.get('files', {}).get(file, '').get('maintainers', '')
validate_maintainers(maintainers, team_macros, path, file)
for macro in path_macros:
file = file.replace('$' + macro, botmeta.get('macros', {}).get(macro, ''))
if not os.path.exists(file):
# Not a file or directory, though maybe the prefix to one?
# https://github.com/ansible/ansibullbot/pull/1023
if not glob.glob('%s*' % file):
print("%s:%d:%d: Can't find '%s.*' in this branch" % (path, 0, 0, file))
|
def main():
"""Validate BOTMETA"""
path = '.github/BOTMETA.yml'
try:
with open(path, 'r') as f_path:
botmeta = yaml.safe_load(f_path)
except yaml.error.MarkedYAMLError as ex:
print('%s:%d:%d: YAML load failed: %s' % (path, ex.context_mark.line + 1, ex.context_mark.column + 1, re.sub(r'\s+', ' ', str(ex))))
sys.exit()
except Exception as ex:
print('%s:%d:%d: YAML load failed: %s' % (path, 0, 0, re.sub(r'\s+', ' ', str(ex))))
sys.exit()
files_schema = Any(
Schema(*string_types),
Schema({
'ignored': Any(list_string_types, *string_types),
'keywords': Any(list_string_types, *string_types),
'labels': Any(list_string_types, *string_types),
'maintainers': Any(list_string_types, *string_types),
'notified': Any(list_string_types, *string_types),
'supershipit': Any(list_string_types, *string_types),
'support': Any("core", "network", "community"),
})
)
list_dict_file_schema = [{str_type: files_schema}
for str_type in string_types]
schema = Schema({
Required('automerge'): bool,
Required('files'): Any(None, *list_dict_file_schema),
Required('macros'): dict, # Any(*list_macros_schema),
})
# Ensure schema is valid
try:
schema(botmeta)
except MultipleInvalid as ex:
for error in ex.errors:
# No way to get line numbers
print('%s:%d:%d: %s' % (path, 0, 0, humanize_error(botmeta, error)))
# Ensure botmeta is always support:core
botmeta_support = botmeta.get('files', {}).get('.github/BOTMETA.yml', {}).get('support', '')
if botmeta_support != 'core':
print('%s:%d:%d: .github/BOTMETA.yml MUST be support: core' % (path, 0, 0))
# Find all path (none-team) macros so we can substitute them
macros = botmeta.get('macros', {})
path_macros = []
team_macros = []
for macro in macros:
if macro.startswith('team_'):
team_macros.append('$' + macro)
else:
path_macros.append(macro)
# Validate files
for file in botmeta['files']:
# maintainers can be:
# implicit: $modules/command/shell.py $team_foo
# maintainer (string): maintainers: $team_foo fred steve
# maintainer (list): maintainers:
# - $team_foo
# - fred
if isinstance(botmeta.get('files', {}).get(file, ''), str):
maintainers = botmeta.get('files', {}).get(file, '').split(' ')
validate_maintainers(maintainers, team_macros, path, file)
elif botmeta.get('files', {}).get(file, '').get('maintainers', ''):
if isinstance(botmeta.get('files', {}).get(file, '').get('maintainers', ''), str):
maintainers = botmeta.get('files', {}).get(file, '').get('maintainers', '').split(' ')
if isinstance(botmeta.get('files', {}).get(file, '').get('maintainers', ''), list):
maintainers = botmeta.get('files', {}).get(file, '').get('maintainers', '')
validate_maintainers(maintainers, team_macros, path, file)
for macro in path_macros:
file = file.replace('$' + macro, botmeta.get('macros', {}).get(macro, ''))
if not os.path.exists(file):
# Not a file or directory, though maybe the prefix to one?
# https://github.com/ansible/ansibullbot/pull/1023
if not glob.glob('%s*' % file):
print("%s:%d:%d: Can't find '%s.*' in this branch" % (path, 0, 0, file))
|
40,427 |
def test_graph_store_conversion():
graph_store = MyGraphStore()
edge_index = get_edge_index(100, 100, 300)
edge_index = sort_edge_index(edge_index, sort_by_row=False)
adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(100, 100))
coo = (edge_index[0], edge_index[1])
csr = adj.csr()[:2]
csc = adj.csc()[-2::-1]
# Put all edge indices:
graph_store.put_edge_index(edge_index=coo, edge_type=('v', '1', 'v'),
layout='coo', num_nodes=(100, 100),
is_sorted=True)
graph_store.put_edge_index(edge_index=csr, edge_type=('v', '2', 'v'),
layout='csr', num_nodes=(100, 100))
graph_store.put_edge_index(edge_index=csc, edge_type=('v', '3', 'v'),
layout='csc', num_nodes=(100, 100))
def assert_edge_index_equal(expected: torch.Tensor, actual: torch.Tensor):
assert torch.equal(sort_edge_index(expected), sort_edge_index(actual))
# Convert to COO:
row_dict, col_dict, perm_dict = graph_store.coo()
assert len(row_dict) == len(col_dict) == len(perm_dict) == 3
for key in row_dict.keys():
actual = torch.stack((row_dict[key], col_dict[key]))
assert_edge_index_equal(actual, edge_index)
assert perm_dict[key] is None
# Convert to CSR:
row_dict, col_dict, perm_dict = graph_store.csr()
assert len(row_dict) == len(col_dict) == len(perm_dict) == 3
for key in row_dict:
assert torch.equal(row_dict[key], csr[0])
assert torch.equal(col_dict[key], csr[1])
if key == ('v', '1', 'v'):
assert perm_dict[key] is not None
# Convert to CSC:
row_dict, col_dict, perm_dict = graph_store.csc()
assert len(row_dict) == len(col_dict) == len(perm_dict) == 3
for key in row_dict:
assert torch.equal(row_dict[key], csc[0])
assert torch.equal(col_dict[key], csc[1])
assert perm_dict[key] is None
|
def test_graph_store_conversion():
graph_store = MyGraphStore()
edge_index = get_edge_index(100, 100, 300)
edge_index = sort_edge_index(edge_index, sort_by_row=False)
adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(100, 100))
coo = (edge_index[0], edge_index[1])
csr = adj.csr()[:2]
csc = adj.csc()[-2::-1]
# Put all edge indices:
graph_store.put_edge_index(edge_index=coo, edge_type=('v', '1', 'v'),
layout='coo', num_nodes=(100, 100),
is_sorted=True)
graph_store.put_edge_index(edge_index=csr, edge_type=('v', '2', 'v'),
layout='csr', num_nodes=(100, 100))
graph_store.put_edge_index(edge_index=csc, edge_type=('v', '3', 'v'),
layout='csc', num_nodes=(100, 100))
def assert_edge_index_equal(expected: torch.Tensor, actual: torch.Tensor):
assert torch.equal(sort_edge_index(expected), sort_edge_index(actual))
# Convert to COO:
row_dict, col_dict, perm_dict = graph_store.coo()
assert len(row_dict) == len(col_dict) == len(perm_dict) == 3
for key in row_dict.keys():
actual = torch.stack((row_dict[key], col_dict[key]))
assert_edge_index_equal(actual, edge_index)
assert perm_dict[key] is None
# Convert to CSR:
row_dict, col_dict, perm_dict = graph_store.csr()
assert len(row_dict) == len(col_dict) == len(perm_dict) == 3
for key in row_dict:
assert torch.equal(row_dict[key], csr[0])
assert torch.equal(col_dict[key], csr[1])
if key == ('v', '1', 'v'):
assert perm_dict[key] is not None
# Convert to CSC:
row_dict, col_dict, perm_dict = graph_store.csc()
assert len(row_dict) == len(col_dict) == len(perm_dict) == 3
for key in row_dict:
assert torch.equal(row_dict[key], csc[0])
assert torch.equal(colptr_dict[key], csc[1])
assert perm_dict[key] is None
|
46,091 |
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
|
def _get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
|
16,478 |
def report_integration(
what: str,
integration_frame: tuple[FrameSummary, str, str],
level: int = logging.WARNING,
) -> None:
"""Report incorrect usage in an integration.
Async friendly.
"""
found_frame, integration, path = integration_frame
# Keep track of integrations already reported to prevent flooding
key = f"{integration}:{what}"
if key in _REPORTED_INTEGRATIONS:
return
_REPORTED_INTEGRATIONS.append(key)
index = found_frame.filename.index(path)
if path == "custom_components/":
extra = " to the custom component author"
else:
extra = ""
_LOGGER.log(
level,
"Detected integration that %s. "
"Please report issue%s for %s using this method at %s, line %s: %s",
what,
extra,
integration,
found_frame.filename[index:],
found_frame.lineno,
found_frame.line.strip(),
)
|
def report_integration(
what: str,
integration_frame: tuple[FrameSummary, str, str],
level: int = logging.WARNING,
) -> None:
"""Report incorrect usage in an integration.
Async friendly.
"""
found_frame, integration, path = integration_frame
# Keep track of integrations already reported to prevent flooding
key = f"{integration}:{what}:{found_frame.filename}:{found_frame.lineno}"
if key in _REPORTED_INTEGRATIONS:
return
_REPORTED_INTEGRATIONS.append(key)
index = found_frame.filename.index(path)
if path == "custom_components/":
extra = " to the custom component author"
else:
extra = ""
_LOGGER.log(
level,
"Detected integration that %s. "
"Please report issue%s for %s using this method at %s, line %s: %s",
what,
extra,
integration,
found_frame.filename[index:],
found_frame.lineno,
found_frame.line.strip(),
)
|
38,807 |
def generate_testcases(checks, prepare=False):
'''Generate concrete test cases from checks.
If `prepare` is true then each of the cases will also be prepared for
being sent to the test pipeline. Note that setting this true may slow down
the test case generation.
'''
rt = runtime.runtime()
cases = []
for c in checks:
valid_comb = runtime.valid_sysenv_comb(c.valid_systems,
c.valid_prog_environs)
for part, environs in valid_comb.items():
for env in environs:
case = TestCase(c, part, env)
if prepare:
case.prepare()
cases.append(case)
return cases
|
def generate_testcases(checks, prepare=False):
'''Generate concrete test cases from checks.
If `prepare` is true then each of the cases will also be prepared for
being sent to the test pipeline. Note that setting this to true may slow down
the test case generation.
'''
rt = runtime.runtime()
cases = []
for c in checks:
valid_comb = runtime.valid_sysenv_comb(c.valid_systems,
c.valid_prog_environs)
for part, environs in valid_comb.items():
for env in environs:
case = TestCase(c, part, env)
if prepare:
case.prepare()
cases.append(case)
return cases
|
31,572 |
def search_logs_command(client, args):
query = args.get('query')
time_range = args.get('time_range') if args.get('time_range') else 'Last 5 minutes'
limit = args.get('limit') if args.get('limit') else 100
repos = argToList(args.get('repos')) if args.get('repos') else []
if limit:
try:
limit = int(limit)
except ValueError:
raise DemistoException(f"The provided argument '{limit}' for limit is not a valid integer.")
result = client.get_search_id(query, time_range, limit, repos)
if not result.get('success'):
raise DemistoException(result['message'])
search_id = result.get('search_id')
search_result = client.get_search_results(search_id)
if not search_result.get('success'):
raise DemistoException(search_result['message'])
rows = search_result.get('rows', [])
display_title = f"Found {len(rows)} logs"
markdown = tableToMarkdown(display_title, rows, headers=None)
return CommandResults(
readable_output=markdown,
outputs_prefix='LogPoint.SearchLogs',
outputs=rows
)
|
def search_logs_command(client, args):
query = args.get('query')
time_range = args.get('time_range') if args.get('time_range') else 'Last 5 minutes'
limit = args.get('limit') if args.get('limit') else 100
repos = argToList(args.get('repos'))
if limit:
try:
limit = int(limit)
except ValueError:
raise DemistoException(f"The provided argument '{limit}' for limit is not a valid integer.")
result = client.get_search_id(query, time_range, limit, repos)
if not result.get('success'):
raise DemistoException(result['message'])
search_id = result.get('search_id')
search_result = client.get_search_results(search_id)
if not search_result.get('success'):
raise DemistoException(search_result['message'])
rows = search_result.get('rows', [])
display_title = f"Found {len(rows)} logs"
markdown = tableToMarkdown(display_title, rows, headers=None)
return CommandResults(
readable_output=markdown,
outputs_prefix='LogPoint.SearchLogs',
outputs=rows
)
|
48,803 |
def validate_group_key(k: str, max_length: int = 200) -> bool:
"""Validates value used as a group key."""
if not isinstance(k, str):
raise TypeError(f"The key has to be a string and is {type(k)}:{k}")
elif len(k) > max_length:
raise AirflowException(f"The key has to be less than {max_length} characters")
elif not GROUP_KEY_REGEX.match(k):
raise AirflowException(
"The key ({k}) has to be made of alphanumeric characters, dashes "
"and underscores exclusively".format(k=k)
)
else:
return True
|
def validate_group_key(k: str, max_length: int = 200) -> bool:
"""Validates value used as a group key."""
if not isinstance(k, str):
raise TypeError(f"The key has to be a string and is {type(k)}:{k}")
if len(k) > max_length:
raise AirflowException(f"The key has to be less than {max_length} characters")
if not GROUP_KEY_REGEX.match(k):
raise AirflowException(
"The key ({k}) has to be made of alphanumeric characters, dashes "
"and underscores exclusively".format(k=k)
)
return True
|
23,031 |
def test_asanyarray():
y = da.asarray(xr.DataArray([1, 2, 3.0]))
assert isinstance(y, da.array)
assert_eq(y, y)
|
def test_asanyarray():
y = da.asanyarray(xr.DataArray([1, 2, 3.0]))
assert isinstance(y, da.array)
assert_eq(y, y)
|
33,043 |
def pam_add_autologin(root: Path, ttys: List[str]) -> None:
login = root / "etc/pam.d/login"
if login.exists():
with open(login) as f:
original = f.read()
else:
original = ""
with open(login, "w") as f:
for tty in ttys:
# Some PAM versions require the /dev/ prefix, others don't. Just add both variants.
f.write(f"auth sufficient pam_succeed_if.so tty = {tty}\n")
f.write(f"auth sufficient pam_succeed_if.so tty = /dev/{tty}\n")
f.write(original)
|
def pam_add_autologin(root: Path, ttys: List[str]) -> None:
login = root / "etc/pam.d/login"
original = login.read_text() if login.exists() else ""
with open(login, "w") as f:
for tty in ttys:
# Some PAM versions require the /dev/ prefix, others don't. Just add both variants.
f.write(f"auth sufficient pam_succeed_if.so tty = {tty}\n")
f.write(f"auth sufficient pam_succeed_if.so tty = /dev/{tty}\n")
f.write(original)
|
48,403 |
def syspatch_run(module):
cmd = module.get_bin_path('syspatch', True)
changed = False
reboot_needed = False
warnings = []
run_flag = []
check_flag = []
if module.params['revert']:
check_flag = ['-l']
if module.params['revert'] == 'all':
run_flag = ['-R']
else:
run_flag = ['-r']
elif module.params['apply']:
check_flag = ['-c']
# Run check command
rc, out, err = module.run_command([cmd] + check_flag)
if rc != 0:
module.fail_json(msg="Command %s failed rc=%d, out=%s, err=%s" % (cmd, rc, out, err))
if len(out) > 0:
# Changes pending
change_pending = True
else:
# No changes pending
change_pending = False
if module.check_mode:
changed = change_pending
elif change_pending:
rc, out, err = module.run_command([cmd] + run_flag)
# Workaround syspatch ln bug:
# http://openbsd-archive.7691.n7.nabble.com/Warning-applying-latest-syspatch-td354250.html
if rc != 0 and err != 'ln: /usr/X11R6/bin/X: No such file or directory\n':
module.fail_json(msg="Command %s failed rc=%d, out=%s, err=%s" % (cmd, rc, out, err))
elif out.lower().find('create unique kernel') > 0:
# Kernel update applied
reboot_needed = True
elif out.lower().find('syspatch updated itself') > 0:
warnings.append('Syspatch was updated. Please run syspatch again.')
# If no stdout, then warn user
if len(out) == 0:
warnings.append('syspatch had suggested changes, but stdout was empty.')
changed = True
else:
changed = False
return dict(
changed=changed,
reboot_needed=reboot_needed,
rc=rc,
stderr=err,
stdout=out,
warnings=warnings
)
|
def syspatch_run(module):
cmd = module.get_bin_path('syspatch', True)
changed = False
reboot_needed = False
warnings = []
# Set safe defaults for run_flag and check_flag
run_flag = ['-c']
check_flag = []
if module.params['revert']:
check_flag = ['-l']
if module.params['revert'] == 'all':
run_flag = ['-R']
else:
run_flag = ['-r']
elif module.params['apply']:
check_flag = ['-c']
# Run check command
rc, out, err = module.run_command([cmd] + check_flag)
if rc != 0:
module.fail_json(msg="Command %s failed rc=%d, out=%s, err=%s" % (cmd, rc, out, err))
if len(out) > 0:
# Changes pending
change_pending = True
else:
# No changes pending
change_pending = False
if module.check_mode:
changed = change_pending
elif change_pending:
rc, out, err = module.run_command([cmd] + run_flag)
# Workaround syspatch ln bug:
# http://openbsd-archive.7691.n7.nabble.com/Warning-applying-latest-syspatch-td354250.html
if rc != 0 and err != 'ln: /usr/X11R6/bin/X: No such file or directory\n':
module.fail_json(msg="Command %s failed rc=%d, out=%s, err=%s" % (cmd, rc, out, err))
elif out.lower().find('create unique kernel') > 0:
# Kernel update applied
reboot_needed = True
elif out.lower().find('syspatch updated itself') > 0:
warnings.append('Syspatch was updated. Please run syspatch again.')
# If no stdout, then warn user
if len(out) == 0:
warnings.append('syspatch had suggested changes, but stdout was empty.')
changed = True
else:
changed = False
return dict(
changed=changed,
reboot_needed=reboot_needed,
rc=rc,
stderr=err,
stdout=out,
warnings=warnings
)
|
46,905 |
def test_finetune_lr_shedulers():
args_d: dict = CHEAP_ARGS.copy()
task = "summarization"
tmp_dir = make_test_data_dir()
model = BART_TINY
output_dir = tempfile.mkdtemp(prefix="output_1_")
args_d.update(
data_dir=tmp_dir,
model_name_or_path=model,
output_dir=output_dir,
tokenizer_name=None,
train_batch_size=2,
eval_batch_size=2,
do_predict=False,
task=task,
src_lang="en_XX",
tgt_lang="ro_RO",
freeze_encoder=True,
freeze_embeds=True,
)
# emulate finetune.py
parser = argparse.ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
parser = SummarizationModule.add_model_specific_args(parser, os.getcwd())
args = {"--help": True}
# --help test
with pytest.raises(SystemExit) as excinfo:
with CaptureStdout() as cs:
args = parser.parse_args(args)
assert False, "--help is expected to sys.exit"
assert excinfo.type == SystemExit
expected = lightning_base.arg_to_scheduler_metavar
assert expected in cs.out, "--help is expected to list the supported schedulers"
# --lr_scheduler=non_existing_scheduler test
unsupported_param = "non_existing_scheduler"
args = {f"--lr_scheduler={unsupported_param}"}
with pytest.raises(SystemExit) as excinfo:
with CaptureStderr() as cs:
args = parser.parse_args(args)
assert False, "invalid argument is expected to sys.exit"
assert excinfo.type == SystemExit
expected = f"invalid choice: '{unsupported_param}'"
assert expected in cs.err, f"should have bailed on invalid choice of scheduler {unsupported_param}"
# --lr_scheduler=existing_scheduler test
supported_param = "cosine"
args_d1 = args_d.copy()
args_d1["lr_scheduler"] = supported_param
args = argparse.Namespace(**args_d1)
model = main(args)
assert getattr(model.hparams, "lr_scheduler") == supported_param, f"lr_scheduler={supported_param} shouldn't fail"
|
def test_finetune_lr_schedulers():
args_d: dict = CHEAP_ARGS.copy()
task = "summarization"
tmp_dir = make_test_data_dir()
model = BART_TINY
output_dir = tempfile.mkdtemp(prefix="output_1_")
args_d.update(
data_dir=tmp_dir,
model_name_or_path=model,
output_dir=output_dir,
tokenizer_name=None,
train_batch_size=2,
eval_batch_size=2,
do_predict=False,
task=task,
src_lang="en_XX",
tgt_lang="ro_RO",
freeze_encoder=True,
freeze_embeds=True,
)
# emulate finetune.py
parser = argparse.ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
parser = SummarizationModule.add_model_specific_args(parser, os.getcwd())
args = {"--help": True}
# --help test
with pytest.raises(SystemExit) as excinfo:
with CaptureStdout() as cs:
args = parser.parse_args(args)
assert False, "--help is expected to sys.exit"
assert excinfo.type == SystemExit
expected = lightning_base.arg_to_scheduler_metavar
assert expected in cs.out, "--help is expected to list the supported schedulers"
# --lr_scheduler=non_existing_scheduler test
unsupported_param = "non_existing_scheduler"
args = {f"--lr_scheduler={unsupported_param}"}
with pytest.raises(SystemExit) as excinfo:
with CaptureStderr() as cs:
args = parser.parse_args(args)
assert False, "invalid argument is expected to sys.exit"
assert excinfo.type == SystemExit
expected = f"invalid choice: '{unsupported_param}'"
assert expected in cs.err, f"should have bailed on invalid choice of scheduler {unsupported_param}"
# --lr_scheduler=existing_scheduler test
supported_param = "cosine"
args_d1 = args_d.copy()
args_d1["lr_scheduler"] = supported_param
args = argparse.Namespace(**args_d1)
model = main(args)
assert getattr(model.hparams, "lr_scheduler") == supported_param, f"lr_scheduler={supported_param} shouldn't fail"
|
52,995 |
def gen_function_cpp(name, msg=None, includes=None, calls=None, preprocessor=None):
t = Template(_function_cpp)
msg = msg or name
includes = includes or []
calls = calls or []
preprocessor = preprocessor or []
return t.render(name=name, msg=msg, includes=includes, calls=calls, preprocessor=preprocessor)
|
def gen_function_cpp(**context):
t = Template(_function_cpp)
return t.render(**context)
|
30,094 |
def subparser(subparsers):
subparser = subparsers.add_parser('dna', aliases=['rna'],
usage=usage)
subparser.add_argument(
'--license', default='CC0', type=str,
help='signature license. Currently only CC0 is supported.'
)
subparser.add_argument(
'--check-sequence', action='store_true',
help='complain if input sequence is invalid'
)
subparser.add_argument(
'-p', '--param-string', default=[],
help='signature parameters to use.', action='append',
)
subparser.add_argument(
'filenames', nargs='*', help='file(s) of sequences'
)
file_args = subparser.add_argument_group('File handling options')
file_args.add_argument(
'-f', '--force', action='store_true',
help='recompute signatures even if the file exists'
)
subparser.add_argument(
'--from-file',
help='a text file containing a list of sequence files to load'
)
file_args.add_argument(
'-o', '--output',
help='output computed signatures to this file'
)
file_args.add_argument(
'--merge', '--name', type=str, default='', metavar="FILE",
help='merge all input files into one signature file with the '
'specified name'
)
file_args.add_argument(
'--outdir', help='output computed signatures to this directory'
)
file_args.add_argument(
'--singleton', action='store_true',
help='compute a signature for each sequence record individually'
)
file_args.add_argument(
'--name-from-first', action='store_true',
help='name the signature generated from each file after the first '
'record in the file'
)
file_args.add_argument(
'--randomize', action='store_true',
help='shuffle the list of input filenames randomly'
)
|
def subparser(subparsers):
subparser = subparsers.add_parser('dna', aliases=['rna', 'nucleotide'],
usage=usage)
subparser.add_argument(
'--license', default='CC0', type=str,
help='signature license. Currently only CC0 is supported.'
)
subparser.add_argument(
'--check-sequence', action='store_true',
help='complain if input sequence is invalid'
)
subparser.add_argument(
'-p', '--param-string', default=[],
help='signature parameters to use.', action='append',
)
subparser.add_argument(
'filenames', nargs='*', help='file(s) of sequences'
)
file_args = subparser.add_argument_group('File handling options')
file_args.add_argument(
'-f', '--force', action='store_true',
help='recompute signatures even if the file exists'
)
subparser.add_argument(
'--from-file',
help='a text file containing a list of sequence files to load'
)
file_args.add_argument(
'-o', '--output',
help='output computed signatures to this file'
)
file_args.add_argument(
'--merge', '--name', type=str, default='', metavar="FILE",
help='merge all input files into one signature file with the '
'specified name'
)
file_args.add_argument(
'--outdir', help='output computed signatures to this directory'
)
file_args.add_argument(
'--singleton', action='store_true',
help='compute a signature for each sequence record individually'
)
file_args.add_argument(
'--name-from-first', action='store_true',
help='name the signature generated from each file after the first '
'record in the file'
)
file_args.add_argument(
'--randomize', action='store_true',
help='shuffle the list of input filenames randomly'
)
|
31,788 |
def accept_invitation(args):
try:
client = aws_session(
region=args.get('region'),
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
response = client.accept_invitation(
DetectorId=args.get('detectorId'),
MasterId=args.get('masterId'),
InvitationId=args.get('invitationId')
)
result = 'Failed'
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
result = 'Success'
accepted_invitaions = []
accepted_invitaions.append({
'MasterId': args.get('masterId'),
'InvitationId': args.get('invitationId'),
'Result': result
})
ec = {"AWS.GuardDuty.AcceptedInvitations": accepted_invitaions} \
if accepted_invitaions else None
return create_entry('AWS GuardDuty Accept Invitation', accepted_invitaions, ec)
except Exception as e:
return raise_error(e)
|
def accept_invitation(args):
try:
client = aws_session(
region=args.get('region'),
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
response = client.accept_invitation(
DetectorId=args.get('detectorId'),
MasterId=args.get('masterId'),
InvitationId=args.get('invitationId')
)
result = 'Failed'
if response.get('ResponseMetadata').get('HTTPStatusCode') == 200:
result = 'Success'
accepted_invitaions = []
accepted_invitaions.append({
'MasterId': args.get('masterId'),
'InvitationId': args.get('invitationId'),
'Result': result
})
ec = {"AWS.GuardDuty.AcceptedInvitations": accepted_invitaions} \
if accepted_invitaions else None
return create_entry('AWS GuardDuty Accept Invitation', accepted_invitaions, ec)
except Exception as e:
return raise_error(e)
|
29,870 |
def _get_directory(output: Optional[str]) -> Optional[str]:
"""Get directory to output the snap file to.
:param output: Snap file name or directory.
:return: The directory to output the snap file to.
If no directory is provided, return None.
If directory is current working directory, return None.
"""
if output:
output_path = Path(output)
output_parent = output_path.parent
if output_path.is_dir():
return str(output_path)
if output_parent and output_parent.resolve() != Path(".").resolve():
return str(output_parent)
return None
|
def _get_directory(output: Optional[str]) -> Path:
"""Get directory to output the snap file to.
:param output: Snap file name or directory.
:return: The directory to output the snap file to.
If no directory is provided, return None.
If directory is current working directory, return None.
"""
if output:
output_path = Path(output)
output_parent = output_path.parent
if output_path.is_dir():
return str(output_path)
if output_parent and output_parent.resolve() != Path(".").resolve():
return str(output_parent)
return None
|
4,236 |
def parse_impedance_ranges(settings):
"""Parse the selected electrode impedance ranges from the header.
:param settings: header settings lines
:type settings: list
:returns parsed electrode impedances
:rtype dict
"""
impedance_ranges = [item for item in settings if
"Selected Impedance Measurement Range" in item]
electrode_imp_ranges = dict()
if impedance_ranges:
if len(impedance_ranges) == 1:
img_range = impedance_ranges[0].split()
for electrode_type in ['Data', 'Reference', 'Ground']:
electrode_imp_ranges[electrode_type] = {
"imp_lower_bound": float(img_range[-4]),
"imp_upper_bound": float(img_range[-2]),
"imp_range_unit": img_range[-1]
}
else:
for electrode_range in impedance_ranges:
electrode_range = electrode_range.split()
electrode_imp_ranges[electrode_range[0]] = {
"imp_lower_bound": float(electrode_range[6]),
"imp_upper_bound": float(electrode_range[8]),
"imp_range_unit": electrode_range[9]
}
return electrode_imp_ranges
|
def _parse_impedance_ranges(settings):
"""Parse the selected electrode impedance ranges from the header.
:param settings: header settings lines
:type settings: list
:returns parsed electrode impedances
:rtype dict
"""
impedance_ranges = [item for item in settings if
"Selected Impedance Measurement Range" in item]
electrode_imp_ranges = dict()
if impedance_ranges:
if len(impedance_ranges) == 1:
img_range = impedance_ranges[0].split()
for electrode_type in ['Data', 'Reference', 'Ground']:
electrode_imp_ranges[electrode_type] = {
"imp_lower_bound": float(img_range[-4]),
"imp_upper_bound": float(img_range[-2]),
"imp_range_unit": img_range[-1]
}
else:
for electrode_range in impedance_ranges:
electrode_range = electrode_range.split()
electrode_imp_ranges[electrode_range[0]] = {
"imp_lower_bound": float(electrode_range[6]),
"imp_upper_bound": float(electrode_range[8]),
"imp_range_unit": electrode_range[9]
}
return electrode_imp_ranges
|
45,864 |
def rotation_matrix_to_angle_axis(rotation_matrix: torch.Tensor, eps: float = 1e-8) -> torch.Tensor:
r"""Convert 3x3 rotation matrix to Rodrigues vector.
Args:
rotation_matrix: rotation matrix.
Returns:
Rodrigues vector transformation.
Shape:
- Input: :math:`(N, 3, 3)`
- Output: :math:`(N, 3)`
Example:
>>> input = torch.rand(2, 3, 3) # Nx3x3
>>> output = rotation_matrix_to_angle_axis(input) # Nx3
"""
if not isinstance(rotation_matrix, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(rotation_matrix)}")
if not rotation_matrix.shape[-2:] == (3, 3):
raise ValueError(f"Input size must be a (*, 3, 3) tensor. Got {rotation_matrix.shape}")
axis = torch.zeros((rotation_matrix.shape[0], 3), device=rotation_matrix.device)
axis[:, 0] = rotation_matrix[:, 2, 1] - rotation_matrix[:, 1, 2]
axis[:, 1] = rotation_matrix[:, 0, 2] - rotation_matrix[:, 2, 0]
axis[:, 2] = rotation_matrix[:, 1, 0] - rotation_matrix[:, 0, 1]
# add epsilon for numerical stability
r = torch.norm(axis, dim=1).unsqueeze(1) + eps
t = rotation_matrix[:, 0, 0] + rotation_matrix[:, 1, 1] + rotation_matrix[:, 2, 2]
t = t.unsqueeze(1)
# use atan2 instead of torch.acos((t - 1)/2) for numerical stability
theta = torch.atan2(r, t - 1)
axis = axis / r
angle_axis = theta * axis
return angle_axis
|
def rotation_matrix_to_angle_axis(rotation_matrix: torch.Tensor, eps: float = 1e-8) -> torch.Tensor:
r"""Convert 3x3 rotation matrix to Rodrigues vector.
Args:
rotation_matrix: rotation matrix.
Returns:
Rodrigues vector transformation.
Shape:
- Input: :math:`(N, 3, 3)`
- Output: :math:`(N, 3)`
Example:
>>> input = torch.rand(2, 3, 3) # Nx3x3
>>> output = rotation_matrix_to_angle_axis(input) # Nx3
"""
if not isinstance(rotation_matrix, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(rotation_matrix)}")
if not rotation_matrix.shape[-2:] == (3, 3):
raise ValueError(f"Input size must be a (*, 3, 3) tensor. Got {rotation_matrix.shape}")
axis = torch.empty((rotation_matrix.shape[0], 3), device=rotation_matrix.device, dtype=rotation_matrix.dtype)
axis[:, 0] = rotation_matrix[:, 2, 1] - rotation_matrix[:, 1, 2]
axis[:, 1] = rotation_matrix[:, 0, 2] - rotation_matrix[:, 2, 0]
axis[:, 2] = rotation_matrix[:, 1, 0] - rotation_matrix[:, 0, 1]
# add epsilon for numerical stability
r = torch.norm(axis, dim=1).unsqueeze(1) + eps
t = rotation_matrix[:, 0, 0] + rotation_matrix[:, 1, 1] + rotation_matrix[:, 2, 2]
t = t.unsqueeze(1)
# use atan2 instead of torch.acos((t - 1)/2) for numerical stability
theta = torch.atan2(r, t - 1)
axis = axis / r
angle_axis = theta * axis
return angle_axis
|
44,077 |
def graph_to_tape(graph: MultiDiGraph) -> QuantumTape:
"""
Converts a directed multigraph to the corresponding quantum tape.
Args:
graph (MultiDiGraph): directed multigraph containing measure to be
converted to a tape
Returns:
tape (QuantumTape): the quantum tape corresponding to the input
**Example**
Consider the following ... :
.. code-block:: python
from pennylane.transforms import qcut
wire_cut_0 = qml.WireCut(wires=0)
wire_cut_1 = qml.WireCut(wires=1)
multi_wire_cut = qml.WireCut(wires=[0, 1])
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=0)
qml.apply(wire_cut_0)
qml.RY(0.5, wires=0)
qml.apply(wire_cut_1)
qml.CNOT(wires=[0, 1])
qml.apply(multi_wire_cut)
qml.RZ(0.6, wires=1)
qml.expval(qml.PauliZ(0))
We can find the subgraphs and corresponding tapes by using:
>>> graph = qcut.tape_to_graph(tape)
>>> qcut.replace_wire_cut_nodes(graph)
>>> subgraphs, communication_graph = qcut.fragment_graph(graph)
>>> tapes = [qcut.graph_to_tape(sg) for sg in subgraphs]
>>> tapes
[<QuantumTape: wires=[0], params=1>, <QuantumTape: wires=[0, 1], params=1>,
<QuantumTape: wires=[1], params=1>, <QuantumTape: wires=[0], params=0>,
<QuantumTape: wires=[1], params=0>]
"""
wires = Wires.all_wires([n.wires for n in graph.nodes])
ordered_ops = sorted(
[(order, op) for op, order in graph.nodes(data="order")], key=lambda x: x[0]
)
wire_map = {w: w for w in wires}
with QuantumTape() as tape:
for _, op in ordered_ops:
new_wires = [wire_map[w] for w in op.wires]
op._wires = Wires(new_wires) # TODO: find a better way to update operation wires
apply(op)
if isinstance(op, MeasureNode):
measured_wire = op.wires[0]
new_wire = _find_new_wire(wires)
wires += new_wire
wire_map[measured_wire] = new_wire
return tape
|
def graph_to_tape(graph: MultiDiGraph) -> QuantumTape:
"""
Converts a directed multigraph to the corresponding quantum tape.
Args:
graph (MultiDiGraph): directed multigraph to be converted to a tape
Returns:
tape (QuantumTape): the quantum tape corresponding to the input
**Example**
Consider the following ... :
.. code-block:: python
from pennylane.transforms import qcut
wire_cut_0 = qml.WireCut(wires=0)
wire_cut_1 = qml.WireCut(wires=1)
multi_wire_cut = qml.WireCut(wires=[0, 1])
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=0)
qml.apply(wire_cut_0)
qml.RY(0.5, wires=0)
qml.apply(wire_cut_1)
qml.CNOT(wires=[0, 1])
qml.apply(multi_wire_cut)
qml.RZ(0.6, wires=1)
qml.expval(qml.PauliZ(0))
We can find the subgraphs and corresponding tapes by using:
>>> graph = qcut.tape_to_graph(tape)
>>> qcut.replace_wire_cut_nodes(graph)
>>> subgraphs, communication_graph = qcut.fragment_graph(graph)
>>> tapes = [qcut.graph_to_tape(sg) for sg in subgraphs]
>>> tapes
[<QuantumTape: wires=[0], params=1>, <QuantumTape: wires=[0, 1], params=1>,
<QuantumTape: wires=[1], params=1>, <QuantumTape: wires=[0], params=0>,
<QuantumTape: wires=[1], params=0>]
"""
wires = Wires.all_wires([n.wires for n in graph.nodes])
ordered_ops = sorted(
[(order, op) for op, order in graph.nodes(data="order")], key=lambda x: x[0]
)
wire_map = {w: w for w in wires}
with QuantumTape() as tape:
for _, op in ordered_ops:
new_wires = [wire_map[w] for w in op.wires]
op._wires = Wires(new_wires) # TODO: find a better way to update operation wires
apply(op)
if isinstance(op, MeasureNode):
measured_wire = op.wires[0]
new_wire = _find_new_wire(wires)
wires += new_wire
wire_map[measured_wire] = new_wire
return tape
|
41,923 |
def plot_contour(
study: Study,
params: Optional[List[str]] = None,
target: Optional[Callable[[FrozenTrial], float]] = None,
target_name: str = "Objective Value",
) -> "go.Figure":
"""Plot the parameter relationship as contour plot in a study.
Note that, If a parameter contains missing values, a trial with missing values is not plotted.
Example:
The following code snippet shows how to plot the parameter relationship as contour plot.
.. plotly::
import optuna
def objective(trial):
x = trial.suggest_uniform("x", -100, 100)
y = trial.suggest_categorical("y", [-1, 0, 1])
return x ** 2 + y
sampler = optuna.samplers.TPESampler(seed=10)
study = optuna.create_study(sampler=sampler)
study.optimize(objective, n_trials=30)
optuna.visualization.plot_contour(study, params=["x", "y"])
Args:
study:
A :class:`~optuna.study.Study` object whose trials are plotted for their target values.
params:
Parameter list to visualize. The default is all parameters.
target:
A function to specify the value to display. If it is :obj:`None`, the objective values
are plotted.
target_name:
Target's name to display on the axis label and the legend.
Returns:
A :class:`plotly.graph_objs.Figure` object.
"""
_imports.check()
return _get_contour_plot(study, params, target, target_name)
|
def plot_contour(
study: Study,
params: Optional[List[str]] = None,
target: Optional[Callable[[FrozenTrial], float]] = None,
target_name: str = "Objective Value",
) -> "go.Figure":
"""Plot the parameter relationship as contour plot in a study.
Note that, If a parameter contains missing values, a trial with missing values is not plotted.
Example:
The following code snippet shows how to plot the parameter relationship as contour plot.
.. plotly::
import optuna
def objective(trial):
x = trial.suggest_uniform("x", -100, 100)
y = trial.suggest_categorical("y", [-1, 0, 1])
return x ** 2 + y
sampler = optuna.samplers.TPESampler(seed=10)
study = optuna.create_study(sampler=sampler)
study.optimize(objective, n_trials=30)
optuna.visualization.plot_contour(study, params=["x", "y"])
Args:
study:
A :class:`~optuna.study.Study` object whose trials are plotted for their target values.
params:
Parameter list to visualize. The default is all parameters.
target:
A function to specify the value to display. If it is :obj:`None`, the objective values
are plotted.
target_name:
Target's name to display on the color bar.
Returns:
A :class:`plotly.graph_objs.Figure` object.
"""
_imports.check()
return _get_contour_plot(study, params, target, target_name)
|
55,046 |
def net_flow_constraint(graph: nx.DiGraph) -> qml.Hamiltonian:
r"""Calculates the `net flow constraint <https://doi.org/10.1080/0020739X.2010.526248>`__
Hamiltonian.
The net-zero flow constraint is, for all :math:`i`:
.. math:: \sum_{j, (i, j) \in E} x_{ij} = \sum_{j, (j, i) \in E} x_{ji},
where :math:`E` are the edges of the graph and :math:`x_{ij}` is a binary number that selects
whether to include the edge :math:`(i, j)`.
The corresponding qubit Hamiltonian is:
.. math::
\frac{1}{4}\sum_{i \in V} \left((d_{i}^{\rm out} - d_{i}^{\rm in})\mathbb{I} -
\sum_{j, (i, j) \in E} Z_{ij} + \sum_{j, (j, i) \in E} Z_{ji} \right)^{2},
where :math:`V` are the graph vertices, :math:`d_{i}^{\rm out}` and :math:`d_{i}^{\rm in}` are
the outdegree and indegree, respectively, and :math:`Z_{ij}` is a qubit Pauli-Z matrix acting
upon the qubit specified by the pair :math:`(i, j)`. Note that this function omits the
:math:`1/4` constant factor.
This Hamiltonian is minimized by selecting edges such that each node has a net zero flow.
Args:
graph (nx.DiGraph): the graph specifying possible edges
Returns:
qml.Hamiltonian: the net-flow constraint Hamiltonian
Raises:
ValueError: if the input graph is not directed
"""
if not hasattr(graph, "in_edges") or not hasattr(graph, "out_edges"):
raise ValueError("Input graph must be directed")
hamiltonian = qml.Hamiltonian([], [])
for node in graph.nodes:
hamiltonian += _inner_net_flow_constraint_hamiltonian(graph, node)
return hamiltonian
|
def net_flow_constraint(graph: nx.DiGraph) -> qml.Hamiltonian:
r"""Calculates the `net flow constraint <https://doi.org/10.1080/0020739X.2010.526248>`__
Hamiltonian.
The net-zero flow constraint is, for all :math:`i`:
.. math:: \sum_{j, (i, j) \in E} x_{ij} = \sum_{j, (j, i) \in E} x_{ji},
where :math:`E` are the edges of the graph and :math:`x_{ij}` is a binary number that selects
whether to include the edge :math:`(i, j)`.
The corresponding qubit Hamiltonian is:
.. math::
\frac{1}{4}\sum_{i \in V} \left((d_{i}^{\rm out} - d_{i}^{\rm in})\mathbb{I} -
\sum_{j, (i, j) \in E} Z_{ij} + \sum_{j, (j, i) \in E} Z_{ji} \right)^{2},
where :math:`V` are the graph vertices, :math:`d_{i}^{\rm out}` and :math:`d_{i}^{\rm in}` are
the outdegree and indegree, respectively, and :math:`Z_{ij}` is a qubit Pauli-Z matrix acting
upon the qubit specified by the pair :math:`(i, j)`. Note that this function omits the
:math:`1/4` constant factor.
Args:
graph (nx.DiGraph): the graph specifying possible edges
Returns:
qml.Hamiltonian: the net-flow constraint Hamiltonian
Raises:
ValueError: if the input graph is not directed
"""
if not hasattr(graph, "in_edges") or not hasattr(graph, "out_edges"):
raise ValueError("Input graph must be directed")
hamiltonian = qml.Hamiltonian([], [])
for node in graph.nodes:
hamiltonian += _inner_net_flow_constraint_hamiltonian(graph, node)
return hamiltonian
|
24,391 |
def resolve_db_host(db_host):
agent_hostname = datadog_agent.get_hostname()
if not db_host or db_host in {'localhost', '127.0.0.1'}:
return agent_hostname
try:
host_ip = socket.gethostbyname(db_host)
except socket.gaierror as e:
# could be connecting via a unix domain socket
logger.debug(
"failed to resolve DB host '%s' due to socket.gaierror(%s). falling back to agent hostname: %s",
db_host,
e,
agent_hostname,
)
return agent_hostname
try:
agent_host_ip = socket.gethostbyname(agent_hostname)
if agent_host_ip == host_ip:
return agent_hostname
except socket.gaierror as e:
logger.debug(
"failed to resolve agent host '%s' due to socket.gaierror(%s). using DB host: %s",
agent_hostname,
e,
db_host,
)
return db_host
|
def resolve_db_host(db_host):
agent_hostname = datadog_agent.get_hostname()
if not db_host or db_host in {'localhost', '127.0.0.1'}:
return agent_hostname
try:
host_ip = socket.gethostbyname(db_host)
except socket.gaierror as e:
# could be connecting via a unix domain socket
logger.debug(
"failed to resolve DB host '%s' due to %r. falling back to agent hostname: %s",
db_host,
e,
agent_hostname,
)
return agent_hostname
try:
agent_host_ip = socket.gethostbyname(agent_hostname)
if agent_host_ip == host_ip:
return agent_hostname
except socket.gaierror as e:
logger.debug(
"failed to resolve agent host '%s' due to socket.gaierror(%s). using DB host: %s",
agent_hostname,
e,
db_host,
)
return db_host
|
43,929 |
def _boys(n, t):
r"""Evaluate Boys function.
The :math:`n`-th order `Boys function <https://arxiv.org/abs/2107.01488>`_ is defined as
.. math::
F_n(t) = \int_{0}^{1}x^{2n} e^{-tx^2}dx.
The Boys function is related to the lower incomplete Gamma
`function <https://en.wikipedia.org/wiki/Incomplete_gamma_function>`_, :math:`\gamma`, as
.. math::
F_n(t) = \frac{1}{2t^{n + 0.5}} \gamma(n + 0.5, t),
where
.. math::
\gamma(m, t) = \int_{0}^{t} x^{m-1} e^{-x} dx.
Args:
n (float): order of the Boys function
t (float): exponent of the Boys function
Returns:
float: magnitude of the Boys function
"""
if t == 0.0:
return 1 / (2 * n + 1)
return asp.special.gammainc(n + 0.5, t) * asp.special.gamma(n + 0.5) / (2 * t ** (n + 0.5))
|
def _boys(n, t):
r"""Evaluate the Boys function.
The :math:`n`-th order `Boys function <https://arxiv.org/abs/2107.01488>`_ is defined as
.. math::
F_n(t) = \int_{0}^{1}x^{2n} e^{-tx^2}dx.
The Boys function is related to the lower incomplete Gamma
`function <https://en.wikipedia.org/wiki/Incomplete_gamma_function>`_, :math:`\gamma`, as
.. math::
F_n(t) = \frac{1}{2t^{n + 0.5}} \gamma(n + 0.5, t),
where
.. math::
\gamma(m, t) = \int_{0}^{t} x^{m-1} e^{-x} dx.
Args:
n (float): order of the Boys function
t (float): exponent of the Boys function
Returns:
float: magnitude of the Boys function
"""
if t == 0.0:
return 1 / (2 * n + 1)
return asp.special.gammainc(n + 0.5, t) * asp.special.gamma(n + 0.5) / (2 * t ** (n + 0.5))
|
4,920 |
def subplot2grid(shape, loc, rowspan=1, colspan=1, fig=None, **kwargs):
"""
Create a subplot at a specific location inside a regular grid.
Parameters
----------
shape : (int, int)
Number of rows and of columns of the grid in which to place axis.
loc : (int, int)
Row number and column number of the axis location within the grid.
rowspan : int, default: 1
Number of rows for the axis to span to the right.
colspan : int. default: 1
Number of columns for the axis to span downwards.
fig : `.Figure`, optional
Figure to place the subplot in. Defaults to the current figure.
**kwargs
Additional keyword arguments are handed to `~.Figure.add_subplot`.
Returns
-------
`.axes.SubplotBase`, or another subclass of `~.axes.Axes`
The axes of the subplot. The returned axes base class depends on
the projection used. It is `~.axes.Axes` if rectilinear projection
are used and `.projections.polar.PolarAxes` if polar projection
are used. The returned axes is then a subplot subclass of the
base class.
Notes
-----
The following call ::
subplot2grid(shape, loc, rowspan=1, colspan=1)
is identical to ::
gridspec = GridSpec(shape[0], shape[1])
subplotspec = gridspec.new_subplotspec(loc, rowspan, colspan)
subplot(subplotspec)
"""
if fig is None:
fig = gcf()
s1, s2 = shape
subplotspec = GridSpec(s1, s2).new_subplotspec(loc,
rowspan=rowspan,
colspan=colspan)
ax = fig.add_subplot(subplotspec, **kwargs)
bbox = ax.bbox
axes_to_delete = []
for other_ax in fig.axes:
if other_ax == ax:
continue
if bbox.fully_overlaps(other_ax.bbox):
axes_to_delete.append(other_ax)
for ax_to_del in axes_to_delete:
delaxes(ax_to_del)
return ax
|
def subplot2grid(shape, loc, rowspan=1, colspan=1, fig=None, **kwargs):
"""
Create a subplot at a specific location inside a regular grid.
Parameters
----------
shape : (int, int)
Number of rows and of columns of the grid in which to place axis.
loc : (int, int)
Row number and column number of the axis location within the grid.
rowspan : int, default: 1
Number of rows for the axis to span to the right.
colspan : int, default: 1
Number of columns for the axis to span downwards.
fig : `.Figure`, optional
Figure to place the subplot in. Defaults to the current figure.
**kwargs
Additional keyword arguments are handed to `~.Figure.add_subplot`.
Returns
-------
`.axes.SubplotBase`, or another subclass of `~.axes.Axes`
The axes of the subplot. The returned axes base class depends on
the projection used. It is `~.axes.Axes` if rectilinear projection
are used and `.projections.polar.PolarAxes` if polar projection
are used. The returned axes is then a subplot subclass of the
base class.
Notes
-----
The following call ::
subplot2grid(shape, loc, rowspan=1, colspan=1)
is identical to ::
gridspec = GridSpec(shape[0], shape[1])
subplotspec = gridspec.new_subplotspec(loc, rowspan, colspan)
subplot(subplotspec)
"""
if fig is None:
fig = gcf()
s1, s2 = shape
subplotspec = GridSpec(s1, s2).new_subplotspec(loc,
rowspan=rowspan,
colspan=colspan)
ax = fig.add_subplot(subplotspec, **kwargs)
bbox = ax.bbox
axes_to_delete = []
for other_ax in fig.axes:
if other_ax == ax:
continue
if bbox.fully_overlaps(other_ax.bbox):
axes_to_delete.append(other_ax)
for ax_to_del in axes_to_delete:
delaxes(ax_to_del)
return ax
|
462 |
def _modify_user_status(request, domain, user_id, is_active):
user = CommCareUser.get_by_user_id(user_id, domain)
if (not _can_edit_workers_location(request.couch_user, user)
or (is_active and not can_add_extra_mobile_workers(request))):
return json_response({
'error': _("No Permission."),
})
if not is_active and user.user_location_id:
return json_response({
'error': _("This is a location user, archive or delete the "
"corresponding location to deactivate it."),
})
user.is_active = is_active
user.save(spawn_task=True)
log_model_change(request.user, user.get_django_user(), message={'is_active': is_active},
action=ModelAction.UPDATE)
return json_response({
'success': True,
})
|
def _modify_user_status(request, domain, user_id, is_active):
user = CommCareUser.get_by_user_id(user_id, domain)
if (not _can_edit_workers_location(request.couch_user, user)
or (is_active and not can_add_extra_mobile_workers(request))):
return json_response({
'error': _("No Permission."),
})
if not is_active and user.user_location_id:
return json_response({
'error': _("This is a location user, archive or delete the "
"corresponding location to deactivate it."),
})
user.is_active = is_active
user.save(spawn_task=True)
log_model_change(request.user, user.get_django_user(), fields_changed={'is_active': is_active},
action=ModelAction.UPDATE)
return json_response({
'success': True,
})
|
44,831 |
def _get_or_create_conda_env_root_dir(nfs_root_dir):
global _CONDA_ENV_ROOT_DIR
if _CONDA_ENV_ROOT_DIR is None:
if nfs_root_dir is not None:
# In databricks, the '/local_disk0/.ephemeral_nfs' is mounted as NFS disk
# the data stored in the disk is shared with all remote nodes.
root_dir = os.path.join(nfs_root_dir, "conda_envs")
os.makedirs(root_dir, exist_ok=True)
conda_env_root_dir = tempfile.mkdtemp(dir=root_dir)
# TODO: register deleting tmp_model_dir handler when exit
else:
import atexit
import shutil
conda_env_root_dir = tempfile.mkdtemp()
atexit.register(shutil.rmtree, conda_env_root_dir, ignore_errors=True)
# Create individual package cache dir "pkgs" under the conda_env_root_dir
# for each python process.
# Note: shared conda package cache dir causes race condition issues:
# See https://github.com/conda/conda/issues/8870
pkg_cache_dir = os.path.join(conda_env_root_dir, "pkgs")
os.mkdir(pkg_cache_dir)
_CONDA_ENV_ROOT_DIR = conda_env_root_dir
return _CONDA_ENV_ROOT_DIR
|
def _get_or_create_conda_env_root_dir(nfs_root_dir):
global _CONDA_ENV_ROOT_DIR
if _CONDA_ENV_ROOT_DIR is None:
if nfs_root_dir is not None:
# In databricks, the '/local_disk0/.ephemeral_nfs' is mounted as NFS disk
# the data stored in the disk is shared with all remote nodes.
root_dir = os.path.join(nfs_root_dir, "conda_envs")
os.makedirs(root_dir, exist_ok=True)
conda_env_root_dir = tempfile.mkdtemp(dir=root_dir)
# TODO: register deleting conda_env_root_dir when exit
else:
import atexit
import shutil
conda_env_root_dir = tempfile.mkdtemp()
atexit.register(shutil.rmtree, conda_env_root_dir, ignore_errors=True)
# Create individual package cache dir "pkgs" under the conda_env_root_dir
# for each python process.
# Note: shared conda package cache dir causes race condition issues:
# See https://github.com/conda/conda/issues/8870
pkg_cache_dir = os.path.join(conda_env_root_dir, "pkgs")
os.mkdir(pkg_cache_dir)
_CONDA_ENV_ROOT_DIR = conda_env_root_dir
return _CONDA_ENV_ROOT_DIR
|
57,836 |
def format_cloud_resource_data(cloud_resources: List[Dict[str, Any]]) -> List[CommandResults]:
cloud_resource_data_list: List[Dict[str, Any]] = []
command_results = []
hr_cloud_resource_list = []
for cloud_resource_data in cloud_resources:
cloud_resource_data_list.append(cloud_resource_data)
cloud_resource_standard_context = DBotScoreOnlyIndicator(
dbot_score=Common.DBotScore(
indicator=cloud_resource_data['ips'][0],
indicator_type=DBotScoreType.IP,
integration_name="ExpanseV2",
score=Common.DBotScore.NONE
)
)
command_results.append(CommandResults(
readable_output=tableToMarkdown("New IP indicator was found", cloud_resource_standard_context.to_context()),
indicator=cloud_resource_standard_context
))
hr_cloud_resource_list.append({
"ID": cloud_resource_data.get("id"),
"IP": cloud_resource_data.get("ips"),
"Domain": cloud_resource_data.get("domain"),
"Cloud Provider": cloud_resource_data.get("provider", {}).get("name"),
"Asset Type": cloud_resource_data.get("type"),
"Instance ID": cloud_resource_data.get("instanceId"),
"Region": cloud_resource_data.get("region"),
"Source": cloud_resource_data.get("sourceDetails"),
})
readable_output = tableToMarkdown(
'Expanse Cloud Resource List', hr_cloud_resource_list) if len(hr_cloud_resource_list) > 0 else \
"## No Cloud Resources found"
command_results.append(CommandResults(
outputs_prefix='Expanse.CloudResource',
outputs_key_field='id',
outputs=cloud_resource_data_list if len(cloud_resource_data_list) > 0 else None,
readable_output=readable_output
))
return command_results
|
def format_cloud_resource_data(cloud_resources: List[Dict[str, Any]]) -> List[CommandResults]:
cloud_resource_data_list: List[Dict[str, Any]] = []
command_results = []
hr_cloud_resource_list = []
for cloud_resource_data in cloud_resources:
cloud_resource_data_list.append(cloud_resource_data)
cloud_resource_standard_context = DBotScoreOnlyIndicator(
dbot_score=Common.DBotScore(
indicator=cloud_resource_data['ips'][0],
indicator_type=DBotScoreType.IP,
integration_name="ExpanseV2",
score=Common.DBotScore.NONE
)
)
command_results.append(CommandResults(
readable_output=tableToMarkdown("New IP indicator was found", cloud_resource_standard_context.to_context()),
indicator=cloud_resource_standard_context
))
hr_cloud_resource_list.append({
"ID": cloud_resource_data.get("id"),
"IP": cloud_resource_data.get("ips"),
"Domain": cloud_resource_data.get("domain"),
"Cloud Provider": cloud_resource_data.get("provider", {}).get("name"),
"Asset Type": cloud_resource_data.get("type"),
"Instance ID": cloud_resource_data.get("instanceId"),
"Region": cloud_resource_data.get("region"),
"Source": cloud_resource_data.get("sourceDetails"),
})
readable_output = tableToMarkdown(
'Expanse Cloud Resource List', hr_cloud_resource_list) if len(hr_cloud_resource_list) > 0 else \
"## No Cloud Resources found"
command_results.append(CommandResults(
outputs_prefix='Expanse.CloudResource',
outputs_key_field='id',
outputs=cloud_resource_data_list if len(cloud_resource_data_list) > 0 else None,
readable_output=readable_output,
raw_response=cloud_resources
))
return command_results
|
40,415 |
def run(args: argparse.ArgumentParser) -> None:
print("BENCHMARK STARTS")
for dataset_name in args.datasets:
print("Dataset: ", dataset_name)
if args.datasets_root == 'data':
root = osp.join(osp.dirname(osp.realpath(__file__)), '../..',
'data',
dataset_name.partition("-")[2])
else:
root = args.datasets_root
if dataset_name == 'ogbn-mag':
transform = T.ToUndirected(merge=True)
dataset = OGB_MAG(root=root, transform=transform)
train_idx = ('paper', dataset[0]['paper'].train_mask)
valid_idx = ('paper', dataset[0]['paper'].val_mask)
neighbour_sizes = args.hetero_neighbour_sizes
else:
dataset = PygNodePropPredDataset(dataset_name, root)
split_idx = dataset.get_idx_split()
train_idx = split_idx['train']
valid_idx = split_idx['valid']
neighbour_sizes = args.homo_neighbour_sizes
data = dataset[0].to(args.device)
print('Train sampling')
for sizes in neighbour_sizes:
print(f'Sizes={sizes}')
for batch_size in args.batch_sizes:
train_loader = NeighborLoader(data, num_neighbors=sizes,
input_nodes=train_idx,
batch_size=batch_size,
shuffle=True,
num_workers=args.num_workers)
start = default_timer()
iter = 0
times = []
for run in range(args.runs):
start = default_timer()
for batch in train_loader:
iter = iter + 1
stop = default_timer()
times.append(round(stop - start, 3))
average_time = round(sum(times) / args.runs, 3)
print(f'Batch size={batch_size} iterations={iter} ' +
f'times={times} average_time={average_time}')
print('Validation sampling')
for batch_size in args.eval_batch_sizes:
val_loader = NeighborLoader(data, num_neighbors=[-1],
input_nodes=valid_idx,
batch_size=batch_size, shuffle=False,
num_workers=args.num_workers)
start = default_timer()
iter = 0
times = []
for run in range(args.runs):
start = default_timer()
for batch in val_loader:
iter = iter + 1
stop = default_timer()
times.append(round(stop - start, 3))
average_time = round(sum(times) / args.runs, 3)
print(f'Batch size={batch_size} iterations={iter} ' +
f'times={times} average_time={average_time}')
|
def run(args: argparse.ArgumentParser) -> None:
print("BENCHMARK STARTS")
for dataset_name in args.datasets:
print("Dataset: ", dataset_name)
if args.datasets_root == 'data':
root = osp.join(osp.dirname(osp.realpath(__file__)), '../..',
'data',
dataset_name.partition("-")[2])
else:
root = args.datasets_root
if dataset_name == 'ogbn-mag':
transform = T.ToUndirected(merge=True)
dataset = OGB_MAG(root=root, transform=transform)
train_idx = ('paper', dataset[0]['paper'].train_mask)
valid_idx = ('paper', dataset[0]['paper'].val_mask)
neighbour_sizes = args.hetero_neighbour_sizes
else:
dataset = PygNodePropPredDataset(dataset_name, root)
split_idx = dataset.get_idx_split()
train_idx = split_idx['train']
valid_idx = split_idx['valid']
neighbour_sizes = args.homo_neighbour_sizes
data = dataset[0].to(args.device)
print('Train sampling')
for sizes in neighbour_sizes:
print(f'Sizes={sizes}')
for batch_size in args.batch_sizes:
train_loader = NeighborLoader(data, num_neighbors=sizes,
input_nodes=train_idx,
batch_size=batch_size,
shuffle=True,
num_workers=args.num_workers)
start = default_timer()
iter = 0
times = []
for run in range(args.runs):
start = default_timer()
for batch in train_loader:
iter = iter + 1
stop = default_timer()
times.append(round(stop - start, 3))
average_time = round(sum(times) / args.runs, 3)
print(f'Batch size={batch_size} iterations={iter} ' +
f'times={times} average_time={average_time}')
print('Validation sampling')
for batch_size in args.eval_batch_sizes:
val_loader = NeighborLoader(data, num_neighbors=[-1],
input_nodes=valid_idx,
batch_size=batch_size, shuffle=False,
num_workers=args.num_workers, )
start = default_timer()
iter = 0
times = []
for run in range(args.runs):
start = default_timer()
for batch in val_loader:
iter = iter + 1
stop = default_timer()
times.append(round(stop - start, 3))
average_time = round(sum(times) / args.runs, 3)
print(f'Batch size={batch_size} iterations={iter} ' +
f'times={times} average_time={average_time}')
|
34,024 |
def format_error_message(exception_message: str, task_exception=False):
"""Improve the formatting of an exception thrown by a remote function.
This method takes a traceback from an exception and makes it nicer by
removing a few uninformative lines and adding some space to indent the
remaining lines nicely.
Args:
exception_message: A message generated by traceback.format_exc().
Returns:
A string of the formatted exception message.
"""
lines = exception_message.split("\n")
if task_exception:
# For errors that occur inside of tasks, remove lines 1 and 2 which are
# always the same, they just contain information about the worker code.
lines = lines[0:1] + lines[3:]
pass
return "\n".join(lines)
|
def format_error_message(exception_message: str, task_exception: bool = False):
"""Improve the formatting of an exception thrown by a remote function.
This method takes a traceback from an exception and makes it nicer by
removing a few uninformative lines and adding some space to indent the
remaining lines nicely.
Args:
exception_message: A message generated by traceback.format_exc().
Returns:
A string of the formatted exception message.
"""
lines = exception_message.split("\n")
if task_exception:
# For errors that occur inside of tasks, remove lines 1 and 2 which are
# always the same, they just contain information about the worker code.
lines = lines[0:1] + lines[3:]
pass
return "\n".join(lines)
|
26,030 |
def managed_db_log_replay_stop(
cmd,
client,
database_name,
managed_instance_name,
resource_group_name):
'''
Stop log replay restore.
'''
restore_details_client = get_sql_managed_database_restore_details_operations(cmd.cli_ctx, None)
# Determine if managed DB was created using log replay service, raise exception if not
logReplayData = restore_details_client.get(
database_name=database_name,
managed_instance_name=managed_instance_name,
resource_group_name=resource_group_name,
restore_details_name=RestoreDetailsName.DEFAULT)
# If type is present, it must be lrsrestore in order to proceed with stop-log-replay
if (hasattr(logReplayData, 'type_properties_type') and logReplayData.type_properties_type.lower() != 'lrsrestore'):
raise CLIError(f'Cannot stop the log replay as database {database_name} on the instance {managed_instance_name}'
'in the resource group {resource_group_name} was not created with log replay service.')
return client.begin_delete(
database_name=database_name,
managed_instance_name=managed_instance_name,
resource_group_name=resource_group_name)
|
def managed_db_log_replay_stop(
cmd,
client,
database_name,
managed_instance_name,
resource_group_name):
'''
Stop log replay restore.
'''
restore_details_client = get_sql_managed_database_restore_details_operations(cmd.cli_ctx, None)
# Determine if managed DB was created using log replay service, raise exception if not
logReplayData = restore_details_client.get(
database_name=database_name,
managed_instance_name=managed_instance_name,
resource_group_name=resource_group_name,
restore_details_name=RestoreDetailsName.DEFAULT)
# If type is present, it must be lrsrestore in order to proceed with stop-log-replay
if (hasattr(logReplayData, 'type_properties_type') and logReplayData.type_properties_type.lower() != 'lrsrestore'):
raise CLIError(f'Cannot stop the log replay as database {database_name} on the instance {managed_instance_name}'
'in the resource group {resource_group_name} was not created with log replay service.')
return client.begin_delete(
database_name=database_name,
managed_instance_name=managed_instance_name,
resource_group_name=resource_group_name)
|
27,992 |
def collect_file_info(files):
"""Collect file information about given list of files like:
- last modification time
- content hash
If the file is missing the corresponding data will
be empty.
"""
res = {}
for sf in files:
res[sf] = {}
if os.path.isfile(sf):
content_hash = get_file_content_hash(sf)
last_mod_time = util.get_last_mod_time(sf)
res[sf]["hash"] = content_hash
res[sf]["mtime"] = last_mod_time
return res
|
def collect_file_info(files):
"""Collect file information about given list of files like:
- last modification time
- content hash
If the file is missing the corresponding data will
be empty.
"""
res = {}
for sf in files:
res[sf] = {}
if os.path.isfile(sf):
content_hash = get_file_content_hash(sf)
last_mod_time = util.get_last_mod_time(sf)
res[sf]["hash"] = get_file_content_hash(sf)
res[sf]["mtime"] = last_mod_time
return res
|
5,997 |
def get_simd_group_size(dev, type_size):
"""Return an estimate of how many work items will be executed across SIMD
lanes. This returns the size of what Nvidia calls a warp and what AMD calls
a wavefront.
Only refers to implicit SIMD.
:arg type_size: number of bytes in vector entry type.
"""
try:
return dev.warp_size_nv
except Exception:
pass
lc_vendor = dev.platform.vendor.lower()
lc_real_vendor = dev.vendor.lower()
if "nvidia" in lc_vendor or "nvidia" in lc_real_vendor:
return 32
if ("advanced micro" in lc_vendor or "ati" in lc_vendor
or "advanced micro" in lc_real_vendor or "ati" in lc_real_vendor):
if dev.type & cl.device_type.GPU:
# Tomasz Rybak says, in response to reduction misbehaving on the AMD
# 'Loveland' APU:
#
# Like in CUDA reduction bug (related to Fermi) it again seems
# to be related to too eager concurrency when reducing results.
# According to http://oscarbg.blogspot.com/2009/10/news-from-web.html
# "Actually the wavefront size is only 64 for the highend cards(48XX,
# 58XX, 57XX), but 32 for the middleend cards and 16 for the lowend
# cards."
# IMO we should use PREFERRED_WORK_GROUP_SIZE_MULTIPLE to get
# non_sync_size. At the same size we lose SIMD CPU optimisation,
# but I do not know for now how to fix those two at the same time.
# Attached patch fixes problem on Loveland, not breaking anything on
# NVIDIA ION.
# This is therefore our best guess as to the SIMD group size.
return reasonable_work_group_size_multiple(dev)
elif dev.type & cl.device_type.CPU:
return 1
else:
raise RuntimeError("unexpected AMD device type")
if dev.type & cl.device_type.CPU:
# implicit assumption: Impl. will vectorize
return 1
return None
|
def get_simd_group_size(dev, type_size):
"""Return an estimate of how many work items will be executed across SIMD
lanes. This returns the size of what Nvidia calls a warp and what AMD calls
a wavefront.
Only refers to implicit SIMD.
:arg type_size: number of bytes in vector entry type.
"""
try:
return dev.warp_size_nv
except Exception:
pass
lc_vendor = dev.platform.vendor.lower()
lc_dev_vendor = dev.vendor.lower()
if "nvidia" in lc_vendor or "nvidia" in lc_real_vendor:
return 32
if ("advanced micro" in lc_vendor or "ati" in lc_vendor
or "advanced micro" in lc_real_vendor or "ati" in lc_real_vendor):
if dev.type & cl.device_type.GPU:
# Tomasz Rybak says, in response to reduction misbehaving on the AMD
# 'Loveland' APU:
#
# Like in CUDA reduction bug (related to Fermi) it again seems
# to be related to too eager concurrency when reducing results.
# According to http://oscarbg.blogspot.com/2009/10/news-from-web.html
# "Actually the wavefront size is only 64 for the highend cards(48XX,
# 58XX, 57XX), but 32 for the middleend cards and 16 for the lowend
# cards."
# IMO we should use PREFERRED_WORK_GROUP_SIZE_MULTIPLE to get
# non_sync_size. At the same size we lose SIMD CPU optimisation,
# but I do not know for now how to fix those two at the same time.
# Attached patch fixes problem on Loveland, not breaking anything on
# NVIDIA ION.
# This is therefore our best guess as to the SIMD group size.
return reasonable_work_group_size_multiple(dev)
elif dev.type & cl.device_type.CPU:
return 1
else:
raise RuntimeError("unexpected AMD device type")
if dev.type & cl.device_type.CPU:
# implicit assumption: Impl. will vectorize
return 1
return None
|
31,532 |
def trustwave_seg_spiderlabs_forward_quarantine_message_as_spam_command(client: Client,
block_number: str,
edition: str,
folder_id: str,
message_name: str,
recipient: str,
server_id: str,
time_logged: str,
spam_report_message: str,
is_spam: str) -> str:
"""Forward a message to Trustwave Spiderlabs to confirm a message is a spam
Args:
client (Client): Trustwave SEG API Client
block_number (str): Block number of the message to forward from find message command
edition (str): Edition of the message to forward from find message command
folder_id (str): Folder ID of the message to forward from find message command
message_name (str): Message name of the message to forward from find message command
recipient (str): Recipeient of the message to forward from find message command
server_id (str): Server ID of the message to forward from find message command
time_logged (str): Time logged of the message to forward from find message command
spam_report_message (str): The reason for the report
is_spam (str): Should it be reported as spam
Returns:
str: An informative string about the action
"""
response = client.forward_spam(int(block_number), edition, int(folder_id),
message_name, recipient, int(
server_id), int(time_logged),
argToBoolean(is_spam), spam_report_message)
if response.status_code == 200:
return "The message forwarded to Spiderlabs"
return "Something went wrong..."
|
def trustwave_seg_spiderlabs_forward_quarantine_message_as_spam_command(client: Client,
block_number: str,
edition: str,
folder_id: str,
message_name: str,
recipient: str,
server_id: str,
time_logged: str,
spam_report_message: str,
is_spam: str) -> str:
"""Forward a message to Trustwave Spiderlabs to confirm a message is a spam
Args:
client (Client): Trustwave SEG API Client
block_number (str): Block number of the message to forward from find message command
edition (str): Edition of the message to forward from find message command
folder_id (str): Folder ID of the message to forward from find message command
message_name (str): Message name of the message to forward from find message command
recipient (str): Recipeient of the message to forward from find message command
server_id (str): Server ID of the message to forward from find message command
time_logged (str): Time logged of the message to forward from find message command
spam_report_message (str): The reason for the report
is_spam (str): Should it be reported as spam
Returns:
str: An informative string about the action
"""
response = client.forward_spam(int(block_number), edition, int(folder_id),
message_name, recipient, int(
server_id), int(time_logged),
argToBoolean(is_spam), spam_report_message)
return "The message was forwarded to Spiderlabs."
|
26,617 |
def _tabulate_dag_runs(dag_runs: List[DagRun], tablefmt="fancy_grid"):
tabulat_data = (
{
'ID': dag_run.id,
'Run ID': dag_run.run_id,
'State': dag_run.state,
'DAG ID': dag_run.dag_id,
'Execution date': dag_run.execution_date.isoformat() if dag_run.execution_date else "",
'Start date': dag_run.start_date.isoformat() if dag_run.start_date else '',
} for dag_run in dag_runs
)
return "\n%s" % tabulate(
tabular_data=tabulat_data,
tablefmt=tablefmt
)
|
def _tabulate_dag_runs(dag_runs: List[DagRun], tablefmt="fancy_grid"):
tabulat_data = (
{
'ID': dag_run.id,
'Run ID': dag_run.run_id,
'State': dag_run.state,
'DAG ID': dag_run.dag_id,
'Execution date': dag_run.execution_date.isoformat() if dag_run.execution_date else None,
'Start date': dag_run.start_date.isoformat() if dag_run.start_date else '',
} for dag_run in dag_runs
)
return "\n%s" % tabulate(
tabular_data=tabulat_data,
tablefmt=tablefmt
)
|
35,846 |
def test_msg_data_internal_fn_usage_denied(get_contract):
code = """
@internal
def foo() -> Bytes[4]:
return msg.data[4]
@external
def bar() -> Bytes[4]:
return self.foo()
"""
with pytest.raises(StateAccessViolation):
compiler.compile_code(code)
|
def test_msg_data_internal_fn_usage_denied(get_contract):
code = """
@internal
def foo() -> Bytes[4]:
return slice(msg.data, 0, 4)
@external
def bar() -> Bytes[4]:
return self.foo()
"""
with pytest.raises(StateAccessViolation):
compiler.compile_code(code)
|
32,928 |
def truncate_to_length(value, max_length, suffix="..."):
"""Truncate a string to a maximum length and append a suffix."""
if not value or len(value) <= max_length:
return value
return value[:max_length] + suffix
|
def truncate_to_length(value, max_length, suffix="..."):
"""Truncate a string to a maximum length and append a suffix."""
if not value or len(value) <= max_length:
return value
return value[:max_length-len(suffix)] + suffix
|
42,594 |
def test_coingecko_identifiers_are_reachable():
"""
Test that all assets have a coingecko entry and that all the identifiers exist in coingecko
"""
coingecko = Coingecko()
all_coins = coingecko.all_coins()
# If coingecko identifier is missing test is trying to suggest possible assets.
symbol_checked_exceptions = ( # This is the list of already checked assets
# only 300 in coingecko is spartan coin: https://www.coingecko.com/en/coins/spartan
ethaddress_to_identifier('0xaEc98A708810414878c3BCDF46Aad31dEd4a4557'),
# no arcade city in coingeko. Got other ARC symbol tokens
ethaddress_to_identifier('0xAc709FcB44a43c35F0DA4e3163b117A17F3770f5'),
# no avalon in coingecko. Got travalala.com
ethaddress_to_identifier('0xeD247980396B10169BB1d36f6e278eD16700a60f'),
# no Bionic in coingecko. Got Bnoincoin
ethaddress_to_identifier('0xEf51c9377FeB29856E61625cAf9390bD0B67eA18'),
# no Bitair in coingecko. Got other BTCA symbol tokens
ethaddress_to_identifier('0x02725836ebF3eCDb1cDf1c7b02FcbBfaa2736AF8'),
# no Bither in coingecko. Got other BTR symbol tokens
ethaddress_to_identifier('0xcbf15FB8246F679F9Df0135881CB29a3746f734b'),
# no Content and Ad Network in coingecko. Got other CAN symbol tokens
ethaddress_to_identifier('0x5f3789907b35DCe5605b00C0bE0a7eCDBFa8A841'),
# no DICE money in coingecko. Got other CET symbol tokens
ethaddress_to_identifier('0xF660cA1e228e7BE1fA8B4f5583145E31147FB577'),
# no Cyberfi in coingecko. Got other CFI symbol tokens
ethaddress_to_identifier('0x12FEF5e57bF45873Cd9B62E9DBd7BFb99e32D73e'),
# The DAO is not in coingecko. Got other DAO symbol tokens
ethaddress_to_identifier('0xBB9bc244D798123fDe783fCc1C72d3Bb8C189413'),
# no Earth Token in coingecko. Got other EARTH symbol token and in BSC
ethaddress_to_identifier('0x900b4449236a7bb26b286601dD14d2bDe7a6aC6c'),
# no iDice in coingecko. Got other ICE symbol token
ethaddress_to_identifier('0x5a84969bb663fb64F6d015DcF9F622Aedc796750'),
# no InvestFeed token in coingecko. Got other IFT symbol token
ethaddress_to_identifier('0x7654915A1b82D6D2D0AFc37c52Af556eA8983c7E'),
# no Invacio token in coingecko. Got other INV symbol token
ethaddress_to_identifier('0xEcE83617Db208Ad255Ad4f45Daf81E25137535bb'),
# no Live Start token in coingecko. Got other LIVE symbol token
ethaddress_to_identifier('0x24A77c1F17C547105E14813e517be06b0040aa76'),
# no Musiconomi in coingecko. Got other MCI symbol token
ethaddress_to_identifier('0x138A8752093F4f9a79AaeDF48d4B9248fab93c9C'),
# no Remicoin in coingecko. Got other RMC symbol token
ethaddress_to_identifier('0x7Dc4f41294697a7903C4027f6Ac528C5d14cd7eB'),
# no Sola token in coingecko. Got other SOL symbol token
ethaddress_to_identifier('0x1F54638b7737193FFd86c19Ec51907A7c41755D8'),
# no Bitcoin card token in coingecko. Got other VD symbol token
ethaddress_to_identifier('0x9a9bB9b4b11BF8eccff84B58a6CCCCD4058A7f0D'),
# no Venus Energy token in coingecko. Got other VENUS symbol token
ethaddress_to_identifier('0xEbeD4fF9fe34413db8fC8294556BBD1528a4DAca'),
# no WinToken in coingecko. Got other WIN symbol token
ethaddress_to_identifier('0xBfaA8cF522136C6FAfC1D53Fe4b85b4603c765b8'),
# no Snowball in coingecko. Got other SNBL symbol token
ethaddress_to_identifier('0x198A87b3114143913d4229Fb0f6D4BCb44aa8AFF'),
# Token suggestion doesn't match token in db
ethaddress_to_identifier('0xFD25676Fc2c4421778B18Ec7Ab86E7C5701DF187'),
# Token suggestion doesn't match token in db
ethaddress_to_identifier('0xcca0c9c383076649604eE31b20248BC04FdF61cA'),
# Token suggestion doesn't match token in db
ethaddress_to_identifier('0xAef38fBFBF932D1AeF3B808Bc8fBd8Cd8E1f8BC5'),
# Token suggestion doesn't match token in db
ethaddress_to_identifier('0x662aBcAd0b7f345AB7FfB1b1fbb9Df7894f18e66'),
# Token suggestion doesn't match token in db
ethaddress_to_identifier('0x497bAEF294c11a5f0f5Bea3f2AdB3073DB448B56'),
# Token suggestion doesn't match token in db
ethaddress_to_identifier('0xAbdf147870235FcFC34153828c769A70B3FAe01F'),
# Token suggestion doesn't match token in db
ethaddress_to_identifier('0x4DF47B4969B2911C966506E3592c41389493953b'),
# Token suggestion doesn't match token in db
ethaddress_to_identifier('0xB563300A3BAc79FC09B93b6F84CE0d4465A2AC27'),
'ACC', # no Adcoin in Coingecko. Got other ACC symbol token
'APH', # no Aphelion in Coingecko. Got other APH symbol token
'ARCH', # no ARCH in Coingecko. Got other ARCH symbol token
'BET-2', # no BetaCoin in Coingecko. Got other BET symbol token
'CCN-2', # no CannaCoin in Coingecko. Got other CCN symbol token
'CHAT', # no ChatCoin in Coingecko. Got other CHAT symbol token
'CMT-2', # no Comet in Coingecko. Got other CMT symbol token
'CRC-2', # no CrownCoin in Coingecko. Got other CRC symbol token
'CYC', # no ConspiracyCoin in Coingecko. Got other CYC symbol token
'EDR-2', # no E-Dinar coin in Coingecko. Got other EDR symbol token
'FLAP', # no FlappyCoin coin in Coingecko. Got other FLAP symbol token
'HC-2', # no Harvest Masternode Coin in Coingecko. Got other HC symbol token
'KEY-3', # no KeyCoin Coin in Coingecko. Got other KEY symbol token
'MUSIC', # Music in coingecko is nftmusic and not our MUSIC
'NAUT', # Token suggestion doesn't match token in db
'OCC', # no Octoin Coin in Coingecko. Got other OCC symbol token
'SPA', # no SpainCoin Coin in Coingecko. Got other SPA symbol token
'WEB-2', # no Webchain in Coingecko. Got other WEB symbol token
'WOLF', # no Insanity Coin in Coingecko. Got other WOLF symbol token
'XAI', # Token suggestion doesn't match token in db
'XPB', # no Pebble Coin in Coingecko. Got other XPB symbol token
'XNS', # no Insolar in Coingecko. Got other XNS symbol token
'PIGGY', # Coingecko listed another asset PIGGY that is not Piggy Coin
# coingecko listed CAR that is not our token CarBlock.io
ethaddress_to_identifier('0x4D9e23a3842fE7Eb7682B9725cF6c507C424A41B'),
# coingecko listed newb farm with symbol NEWB that is not our newb
ethaddress_to_identifier('0x5A63Eb358a751b76e58325eadD86c2473fC40e87'),
# coingecko has BBC that is not tradove
ethaddress_to_identifier('0xe7D3e4413E29ae35B0893140F4500965c74365e5'),
# MNT is Meownaut in coingecko and not media network token
ethaddress_to_identifier('0xA9877b1e05D035899131DBd1e403825166D09f92'),
# Project quantum in coingecko but we have Qubitica
ethaddress_to_identifier('0xCb5ea3c190d8f82DEADF7ce5Af855dDbf33e3962'),
# We have Cashbery Coin that is not listed in the coingecko list
'CBC-2',
)
for asset_data in GlobalDBHandler().get_all_asset_data(mapping=False):
identifier = asset_data.identifier
if identifier in DELISTED_ASSETS:
# delisted assets won't be in the mapping
continue
if asset_data.asset_type == AssetType.FIAT:
continue
found = True
coingecko_str = asset_data.coingecko
have_id = True
if coingecko_str is not None or coingecko_str != '':
have_id = False
found = False
for entry in all_coins:
if coingecko_str == entry['id']:
found = True
break
suggestions = []
if not found:
for entry in all_coins:
if entry['symbol'].upper() == asset_data.symbol.upper():
suggestions.append((entry['id'], entry['name'], entry['symbol']))
continue
if entry['name'].upper() == asset_data.symbol.upper():
suggestions.append((entry['id'], entry['name'], entry['symbol']))
continue
if have_id is False and (len(suggestions) == 0 or identifier in symbol_checked_exceptions):
continue # no coingecko identifier and no suggestion or is in known exception
msg = f'Asset {identifier} with symbol {asset_data.symbol} coingecko mapping does not exist.' # noqa: E501
if len(suggestions) != 0:
for s in suggestions:
msg += f'\nSuggestion: id:{s[0]} name:{s[1]} symbol:{s[2]}'
if not found:
test_warnings.warn(UserWarning(msg))
|
def test_coingecko_identifiers_are_reachable():
"""
Test that all assets have a coingecko entry and that all the identifiers exist in coingecko
"""
coingecko = Coingecko()
all_coins = coingecko.all_coins()
# If coingecko identifier is missing test is trying to suggest possible assets.
symbol_checked_exceptions = ( # This is the list of already checked assets
# only 300 in coingecko is spartan coin: https://www.coingecko.com/en/coins/spartan
ethaddress_to_identifier('0xaEc98A708810414878c3BCDF46Aad31dEd4a4557'),
# no arcade city in coingeko. Got other ARC symbol tokens
ethaddress_to_identifier('0xAc709FcB44a43c35F0DA4e3163b117A17F3770f5'),
# no avalon in coingecko. Got travalala.com
ethaddress_to_identifier('0xeD247980396B10169BB1d36f6e278eD16700a60f'),
# no Bionic in coingecko. Got Bnoincoin
ethaddress_to_identifier('0xEf51c9377FeB29856E61625cAf9390bD0B67eA18'),
# no Bitair in coingecko. Got other BTCA symbol tokens
ethaddress_to_identifier('0x02725836ebF3eCDb1cDf1c7b02FcbBfaa2736AF8'),
# no Bither in coingecko. Got other BTR symbol tokens
ethaddress_to_identifier('0xcbf15FB8246F679F9Df0135881CB29a3746f734b'),
# no Content and Ad Network in coingecko. Got other CAN symbol tokens
ethaddress_to_identifier('0x5f3789907b35DCe5605b00C0bE0a7eCDBFa8A841'),
# no DICE money in coingecko. Got other CET symbol tokens
ethaddress_to_identifier('0xF660cA1e228e7BE1fA8B4f5583145E31147FB577'),
# no Cyberfi in coingecko. Got other CFI symbol tokens
ethaddress_to_identifier('0x12FEF5e57bF45873Cd9B62E9DBd7BFb99e32D73e'),
# The DAO is not in coingecko. Got other DAO symbol tokens
ethaddress_to_identifier('0xBB9bc244D798123fDe783fCc1C72d3Bb8C189413'),
# no Earth Token in coingecko. Got other EARTH symbol token and in BSC
ethaddress_to_identifier('0x900b4449236a7bb26b286601dD14d2bDe7a6aC6c'),
# no iDice in coingecko. Got other ICE symbol token
ethaddress_to_identifier('0x5a84969bb663fb64F6d015DcF9F622Aedc796750'),
# no InvestFeed token in coingecko. Got other IFT symbol token
ethaddress_to_identifier('0x7654915A1b82D6D2D0AFc37c52Af556eA8983c7E'),
# no Invacio token in coingecko. Got other INV symbol token
ethaddress_to_identifier('0xEcE83617Db208Ad255Ad4f45Daf81E25137535bb'),
# no Live Start token in coingecko. Got other LIVE symbol token
ethaddress_to_identifier('0x24A77c1F17C547105E14813e517be06b0040aa76'),
# no Musiconomi in coingecko. Got other MCI symbol token
ethaddress_to_identifier('0x138A8752093F4f9a79AaeDF48d4B9248fab93c9C'),
# no Remicoin in coingecko. Got other RMC symbol token
ethaddress_to_identifier('0x7Dc4f41294697a7903C4027f6Ac528C5d14cd7eB'),
# no Sola token in coingecko. Got other SOL symbol token
ethaddress_to_identifier('0x1F54638b7737193FFd86c19Ec51907A7c41755D8'),
# no Bitcoin card token in coingecko. Got other VD symbol token
ethaddress_to_identifier('0x9a9bB9b4b11BF8eccff84B58a6CCCCD4058A7f0D'),
# no Venus Energy token in coingecko. Got other VENUS symbol token
ethaddress_to_identifier('0xEbeD4fF9fe34413db8fC8294556BBD1528a4DAca'),
# no WinToken in coingecko. Got other WIN symbol token
ethaddress_to_identifier('0xBfaA8cF522136C6FAfC1D53Fe4b85b4603c765b8'),
# no Snowball in coingecko. Got other SNBL symbol token
ethaddress_to_identifier('0x198A87b3114143913d4229Fb0f6D4BCb44aa8AFF'),
# Token suggestion doesn't match token in db
ethaddress_to_identifier('0xFD25676Fc2c4421778B18Ec7Ab86E7C5701DF187'),
# Token suggestion doesn't match token in db
ethaddress_to_identifier('0xcca0c9c383076649604eE31b20248BC04FdF61cA'),
# Token suggestion doesn't match token in db
ethaddress_to_identifier('0xAef38fBFBF932D1AeF3B808Bc8fBd8Cd8E1f8BC5'),
# Token suggestion doesn't match token in db
ethaddress_to_identifier('0x662aBcAd0b7f345AB7FfB1b1fbb9Df7894f18e66'),
# Token suggestion doesn't match token in db
ethaddress_to_identifier('0x497bAEF294c11a5f0f5Bea3f2AdB3073DB448B56'),
# Token suggestion doesn't match token in db
ethaddress_to_identifier('0xAbdf147870235FcFC34153828c769A70B3FAe01F'),
# Token suggestion doesn't match token in db
ethaddress_to_identifier('0x4DF47B4969B2911C966506E3592c41389493953b'),
# Token suggestion doesn't match token in db
ethaddress_to_identifier('0xB563300A3BAc79FC09B93b6F84CE0d4465A2AC27'),
'ACC', # no Adcoin in Coingecko. Got other ACC symbol token
'APH', # no Aphelion in Coingecko. Got other APH symbol token
'ARCH', # no ARCH in Coingecko. Got other ARCH symbol token
'BET-2', # no BetaCoin in Coingecko. Got other BET symbol token
'CCN-2', # no CannaCoin in Coingecko. Got other CCN symbol token
'CHAT', # no ChatCoin in Coingecko. Got other CHAT symbol token
'CMT-2', # no Comet in Coingecko. Got other CMT symbol token
'CRC-2', # no CrownCoin in Coingecko. Got other CRC symbol token
'CYC', # no ConspiracyCoin in Coingecko. Got other CYC symbol token
'EDR-2', # no E-Dinar coin in Coingecko. Got other EDR symbol token
'FLAP', # no FlappyCoin coin in Coingecko. Got other FLAP symbol token
'HC-2', # no Harvest Masternode Coin in Coingecko. Got other HC symbol token
'KEY-3', # no KeyCoin Coin in Coingecko. Got other KEY symbol token
'MUSIC', # Music in coingecko is nftmusic and not our MUSIC
'NAUT', # Token suggestion doesn't match token in db
'OCC', # no Octoin Coin in Coingecko. Got other OCC symbol token
'SPA', # no SpainCoin Coin in Coingecko. Got other SPA symbol token
'WEB-2', # no Webchain in Coingecko. Got other WEB symbol token
'WOLF', # no Insanity Coin in Coingecko. Got other WOLF symbol token
'XAI', # Token suggestion doesn't match token in db
'XPB', # no Pebble Coin in Coingecko. Got other XPB symbol token
'XNS', # no Insolar in Coingecko. Got other XNS symbol token
'PIGGY', # Coingecko listed another asset PIGGY that is not Piggy Coin
# coingecko listed CAR that is not our token CarBlock.io
ethaddress_to_identifier('0x4D9e23a3842fE7Eb7682B9725cF6c507C424A41B'),
# coingecko listed newb farm with symbol NEWB that is not our newb
ethaddress_to_identifier('0x5A63Eb358a751b76e58325eadD86c2473fC40e87'),
# coingecko has BBC that is not tradove
ethaddress_to_identifier('0xe7D3e4413E29ae35B0893140F4500965c74365e5'),
# MNT is Meownaut in coingecko and not media network token
ethaddress_to_identifier('0xA9877b1e05D035899131DBd1e403825166D09f92'),
# Project quantum in coingecko but we have Qubitica
ethaddress_to_identifier('0xCb5ea3c190d8f82DEADF7ce5Af855dDbf33e3962'),
# We have Cashbery Coin for symbol CBC that is not listed in the coingecko list
'CBC-2',
)
for asset_data in GlobalDBHandler().get_all_asset_data(mapping=False):
identifier = asset_data.identifier
if identifier in DELISTED_ASSETS:
# delisted assets won't be in the mapping
continue
if asset_data.asset_type == AssetType.FIAT:
continue
found = True
coingecko_str = asset_data.coingecko
have_id = True
if coingecko_str is not None or coingecko_str != '':
have_id = False
found = False
for entry in all_coins:
if coingecko_str == entry['id']:
found = True
break
suggestions = []
if not found:
for entry in all_coins:
if entry['symbol'].upper() == asset_data.symbol.upper():
suggestions.append((entry['id'], entry['name'], entry['symbol']))
continue
if entry['name'].upper() == asset_data.symbol.upper():
suggestions.append((entry['id'], entry['name'], entry['symbol']))
continue
if have_id is False and (len(suggestions) == 0 or identifier in symbol_checked_exceptions):
continue # no coingecko identifier and no suggestion or is in known exception
msg = f'Asset {identifier} with symbol {asset_data.symbol} coingecko mapping does not exist.' # noqa: E501
if len(suggestions) != 0:
for s in suggestions:
msg += f'\nSuggestion: id:{s[0]} name:{s[1]} symbol:{s[2]}'
if not found:
test_warnings.warn(UserWarning(msg))
|
33,276 |
def _transcribe(item, parent=None, editRate=24, masterMobs=None):
result = None
metadata = {}
# First lets grab some standard properties that are present on
# many types of AAF objects...
metadata["Name"] = _get_name(item)
metadata["ClassName"] = _get_class_name(item)
if isinstance(item, aaf2.components.Component):
metadata["Length"] = item.length
if isinstance(item, aaf2.core.AAFObject):
for prop in item.properties():
if hasattr(prop, 'name') and hasattr(prop, 'value'):
key = str(prop.name)
value = prop.value
metadata[key] = _transcribe_property(value)
# Now we will use the item's class to determine which OTIO type
# to transcribe into. Note that the order of this if/elif/... chain
# is important, because the class hierarchy of AAF objects is more
# complex than OTIO.
if isinstance(item, aaf2.content.ContentStorage):
result = otio.schema.SerializableCollection()
# Gather all the Master Mobs, so we can find them later by MobID
# when we parse the SourceClips in the composition
if masterMobs is None:
masterMobs = {}
for mob in item.mastermobs():
child = _transcribe(mob, parent=item)
if child is not None:
mobID = child.metadata.get("AAF", {}).get("MobID")
masterMobs[mobID] = child
for mob in item.compositionmobs():
child = _transcribe(mob, parent=item, masterMobs=masterMobs)
_add_child(result, child, mob)
elif isinstance(item, aaf2.mobs.Mob):
result = otio.schema.Timeline()
for slot in item.slots:
child = _transcribe(slot, parent=item, masterMobs=masterMobs)
_add_child(result.tracks, child, slot)
elif isinstance(item, aaf2.components.SourceClip):
result = otio.schema.Clip()
# Evidently the last mob is the one with the timecode
mobs = _find_timecode_mobs(item)
# Get the Timecode start and length values
timecode_info = _extract_timecode_info(mobs[-1]) if mobs else None
length = item.length
startTime = int(metadata.get("StartTime", "0"))
if timecode_info:
timecode_start, timecode_length = timecode_info
startTime += timecode_start
result.source_range = otio.opentime.TimeRange(
otio.opentime.RationalTime(startTime, editRate),
otio.opentime.RationalTime(length, editRate)
)
mobID = metadata.get("SourceID")
if masterMobs and mobID:
masterMob = masterMobs.get(mobID)
if masterMob:
media = otio.schema.MissingReference()
if timecode_info:
media.available_range = otio.opentime.TimeRange(
otio.opentime.RationalTime(timecode_start, editRate),
otio.opentime.RationalTime(timecode_length, editRate)
)
# copy the metadata from the master into the media_reference
media.metadata["AAF"] = masterMob.metadata.get("AAF", {})
result.media_reference = media
elif isinstance(item, aaf2.components.Transition):
result = otio.schema.Transition()
# Does AAF support anything else?
result.transition_type = otio.schema.TransitionTypes.SMPTE_Dissolve
# Extract value and time attributes of both ControlPoints used for
# creating AAF Transition objects
varying_value = None
for param in item.getvalue('OperationGroup').parameters:
if isinstance(param, aaf2.misc.VaryingValue):
varying_value = param
break
if varying_value:
for control_point in varying_value.getvalue('PointList'):
value = control_point.value
time = control_point.time
metadata.setdefault('PointList', []).append({'Value': value,
'Time': time})
in_offset = int(metadata.get("CutPoint", "0"))
out_offset = item.length - in_offset
result.in_offset = otio.opentime.RationalTime(in_offset, editRate)
result.out_offset = otio.opentime.RationalTime(out_offset, editRate)
elif isinstance(item, aaf2.components.Filler):
result = otio.schema.Gap()
length = item.length
result.source_range = otio.opentime.TimeRange(
otio.opentime.RationalTime(0, editRate),
otio.opentime.RationalTime(length, editRate)
)
elif isinstance(item, aaf2.components.NestedScope):
# TODO: Is this the right class?
result = otio.schema.Stack()
for slot in item.slots:
child = _transcribe(slot, parent=item, masterMobs=masterMobs)
_add_child(result, child, slot)
elif isinstance(item, aaf2.components.Sequence):
result = otio.schema.Track()
for component in item.components:
child = _transcribe(component, parent=item, masterMobs=masterMobs)
_add_child(result, child, component)
elif isinstance(item, aaf2.components.OperationGroup):
result = _transcribe_operation_group(
item, metadata, editRate, masterMobs
)
elif isinstance(item, aaf2.mobslots.TimelineMobSlot):
result = otio.schema.Track()
child = _transcribe(item.segment, parent=item, masterMobs=masterMobs)
_add_child(result, child, item.segment)
elif isinstance(item, aaf2.mobslots.MobSlot):
result = otio.schema.Track()
child = _transcribe(item.segment, parent=item, masterMobs=masterMobs)
_add_child(result, child, item.segment)
elif isinstance(item, aaf2.components.Timecode):
pass
elif isinstance(item, aaf2.components.Pulldown):
pass
elif isinstance(item, aaf2.components.EdgeCode):
pass
elif isinstance(item, aaf2.components.ScopeReference):
# TODO: is this like FILLER?
result = otio.schema.Gap()
length = item.length
result.source_range = otio.opentime.TimeRange(
otio.opentime.RationalTime(0, editRate),
otio.opentime.RationalTime(length, editRate)
)
elif isinstance(item, aaf2.components.DescriptiveMarker):
# Markers come in on their own separate Track.
# TODO: We should consolidate them onto the same track(s) as the clips
# result = otio.schema.Marker()
pass
elif isinstance(item, aaf2.components.Selector):
# If you mute a clip in media composer, it becomes one of these in the
# AAF.
result = _transcribe(
item.getvalue("Selected"),
parent=item, masterMobs=masterMobs
)
alternates = [
_transcribe(alt, parent=item, masterMobs=masterMobs)
for alt in item.getvalue("Alternates")
]
# muted case -- if there is only one item its muted, otherwise its
# a multi cam thing
if alternates and len(alternates) == 1:
metadata['muted_clip'] = True
result.name = str(alternates[0].name) + "_MUTED"
metadata['alternates'] = alternates
# @TODO: There are a bunch of other AAF object types that we will
# likely need to add support for. I'm leaving this code here to help
# future efforts to extract the useful information out of these.
# elif isinstance(item, aaf.storage.File):
# self.extendChildItems([item.header])
# elif isinstance(item, aaf.storage.Header):
# self.extendChildItems([item.storage()])
# self.extendChildItems([item.dictionary()])
# elif isinstance(item, aaf.dictionary.Dictionary):
# l = []
# l.append(DummyItem(list(item.class_defs()), 'ClassDefs'))
# l.append(DummyItem(list(item.codec_defs()), 'CodecDefs'))
# l.append(DummyItem(list(item.container_defs()), 'ContainerDefs'))
# l.append(DummyItem(list(item.data_defs()), 'DataDefs'))
# l.append(DummyItem(list(item.interpolation_defs()),
# 'InterpolationDefs'))
# l.append(DummyItem(list(item.klvdata_defs()), 'KLVDataDefs'))
# l.append(DummyItem(list(item.operation_defs()), 'OperationDefs'))
# l.append(DummyItem(list(item.parameter_defs()), 'ParameterDefs'))
# l.append(DummyItem(list(item.plugin_defs()), 'PluginDefs'))
# l.append(DummyItem(list(item.taggedvalue_defs()), 'TaggedValueDefs'))
# l.append(DummyItem(list(item.type_defs()), 'TypeDefs'))
# self.extendChildItems(l)
#
# elif isinstance(item, pyaaf.AxSelector):
# self.extendChildItems(list(item.EnumAlternateSegments()))
#
# elif isinstance(item, pyaaf.AxScopeReference):
# #print item, item.GetRelativeScope(),item.GetRelativeSlot()
# pass
#
# elif isinstance(item, pyaaf.AxEssenceGroup):
# segments = []
#
# for i in xrange(item.CountChoices()):
# choice = item.GetChoiceAt(i)
# segments.append(choice)
# self.extendChildItems(segments)
#
# elif isinstance(item, pyaaf.AxProperty):
# self.properties['Value'] = str(item.GetValue())
elif isinstance(item, Iterable):
result = otio.schema.SerializableCollection()
for child in item:
result.append(
_transcribe(
child,
parent=item,
masterMobs=masterMobs
)
)
else:
# For everything else, we just ignore it.
# To see what is being ignored, turn on the debug flag
if debug:
print("SKIPPING: {}: {} -- {}".format(type(item), item, result))
# Did we get anything? If not, we're done
if result is None:
return None
# Okay, now we've turned the AAF thing into an OTIO result
# There's a bit more we can do before we're ready to return the result.
# If we didn't get a name yet, use the one we have in metadata
if result.name is None:
# TODO: Some AAFs contain non-utf8 names?
# This works in Python 2.7, but not 3.5:
# result.name = metadata["Name"].encode('utf8', 'replace')
result.name = str(metadata["Name"])
# Attach the AAF metadata
if not result.metadata:
result.metadata = {}
result.metadata["AAF"] = metadata
# Double check that we got the length we expected
if isinstance(result, otio.core.Item):
length = metadata.get("Length")
if (
length and
result.source_range is not None and
result.source_range.duration.value != length
):
raise otio.exceptions.OTIOError(
"Wrong duration? {} should be {} in {}".format(
result.source_range.duration.value,
length,
result
)
)
# Did we find a Track?
if isinstance(result, otio.schema.Track):
# Try to figure out the kind of Track it is
if hasattr(item, 'media_kind'):
media_kind = str(item.media_kind)
result.metadata["AAF"]["MediaKind"] = media_kind
if media_kind == "Picture":
result.kind = otio.schema.TrackKind.Video
elif media_kind in ("SoundMasterTrack", "Sound"):
result.kind = otio.schema.TrackKind.Audio
else:
# Timecode, Edgecode, others?
result.kind = None
# Done!
return result
|
def _transcribe(item, parent=None, editRate=24, masterMobs=None):
result = None
metadata = {}
# First lets grab some standard properties that are present on
# many types of AAF objects...
metadata["Name"] = _get_name(item)
metadata["ClassName"] = _get_class_name(item)
if isinstance(item, aaf2.components.Component):
metadata["Length"] = item.length
if isinstance(item, aaf2.core.AAFObject):
for prop in item.properties():
if hasattr(prop, 'name') and hasattr(prop, 'value'):
key = str(prop.name)
value = prop.value
metadata[key] = _transcribe_property(value)
# Now we will use the item's class to determine which OTIO type
# to transcribe into. Note that the order of this if/elif/... chain
# is important, because the class hierarchy of AAF objects is more
# complex than OTIO.
if isinstance(item, aaf2.content.ContentStorage):
result = otio.schema.SerializableCollection()
# Gather all the Master Mobs, so we can find them later by MobID
# when we parse the SourceClips in the composition
if masterMobs is None:
masterMobs = {}
for mob in item.mastermobs():
child = _transcribe(mob, parent=item)
if child is not None:
mobID = child.metadata.get("AAF", {}).get("MobID")
masterMobs[mobID] = child
for mob in item.compositionmobs():
child = _transcribe(mob, parent=item, masterMobs=masterMobs)
_add_child(result, child, mob)
elif isinstance(item, aaf2.mobs.Mob):
result = otio.schema.Timeline()
for slot in item.slots:
child = _transcribe(slot, parent=item, masterMobs=masterMobs)
_add_child(result.tracks, child, slot)
elif isinstance(item, aaf2.components.SourceClip):
result = otio.schema.Clip()
# Evidently the last mob is the one with the timecode
mobs = _find_timecode_mobs(item)
# Get the Timecode start and length values
timecode_info = _extract_timecode_info(mobs[-1]) if mobs else None
length = item.length
startTime = int(metadata.get("StartTime", "0"))
if timecode_info:
timecode_start, timecode_length = timecode_info
startTime += timecode_start
result.source_range = otio.opentime.TimeRange(
otio.opentime.RationalTime(startTime, editRate),
otio.opentime.RationalTime(length, editRate)
)
mobID = metadata.get("SourceID")
if masterMobs and mobID:
masterMob = masterMobs.get(mobID)
if masterMob:
media = otio.schema.MissingReference()
if timecode_info:
media.available_range = otio.opentime.TimeRange(
otio.opentime.RationalTime(timecode_start, editRate),
otio.opentime.RationalTime(timecode_length, editRate)
)
# copy the metadata from the master into the media_reference
media.metadata["AAF"] = masterMob.metadata.get("AAF", {})
result.media_reference = media
elif isinstance(item, aaf2.components.Transition):
result = otio.schema.Transition()
# Does AAF support anything else?
result.transition_type = otio.schema.TransitionTypes.SMPTE_Dissolve
# Extract value and time attributes of both ControlPoints used for
# creating AAF Transition objects
varying_value = None
for param in item.getvalue('OperationGroup').parameters:
if isinstance(param, aaf2.misc.VaryingValue):
varying_value = param
break
if varying_value is None:
for control_point in varying_value.getvalue('PointList'):
value = control_point.value
time = control_point.time
metadata.setdefault('PointList', []).append({'Value': value,
'Time': time})
in_offset = int(metadata.get("CutPoint", "0"))
out_offset = item.length - in_offset
result.in_offset = otio.opentime.RationalTime(in_offset, editRate)
result.out_offset = otio.opentime.RationalTime(out_offset, editRate)
elif isinstance(item, aaf2.components.Filler):
result = otio.schema.Gap()
length = item.length
result.source_range = otio.opentime.TimeRange(
otio.opentime.RationalTime(0, editRate),
otio.opentime.RationalTime(length, editRate)
)
elif isinstance(item, aaf2.components.NestedScope):
# TODO: Is this the right class?
result = otio.schema.Stack()
for slot in item.slots:
child = _transcribe(slot, parent=item, masterMobs=masterMobs)
_add_child(result, child, slot)
elif isinstance(item, aaf2.components.Sequence):
result = otio.schema.Track()
for component in item.components:
child = _transcribe(component, parent=item, masterMobs=masterMobs)
_add_child(result, child, component)
elif isinstance(item, aaf2.components.OperationGroup):
result = _transcribe_operation_group(
item, metadata, editRate, masterMobs
)
elif isinstance(item, aaf2.mobslots.TimelineMobSlot):
result = otio.schema.Track()
child = _transcribe(item.segment, parent=item, masterMobs=masterMobs)
_add_child(result, child, item.segment)
elif isinstance(item, aaf2.mobslots.MobSlot):
result = otio.schema.Track()
child = _transcribe(item.segment, parent=item, masterMobs=masterMobs)
_add_child(result, child, item.segment)
elif isinstance(item, aaf2.components.Timecode):
pass
elif isinstance(item, aaf2.components.Pulldown):
pass
elif isinstance(item, aaf2.components.EdgeCode):
pass
elif isinstance(item, aaf2.components.ScopeReference):
# TODO: is this like FILLER?
result = otio.schema.Gap()
length = item.length
result.source_range = otio.opentime.TimeRange(
otio.opentime.RationalTime(0, editRate),
otio.opentime.RationalTime(length, editRate)
)
elif isinstance(item, aaf2.components.DescriptiveMarker):
# Markers come in on their own separate Track.
# TODO: We should consolidate them onto the same track(s) as the clips
# result = otio.schema.Marker()
pass
elif isinstance(item, aaf2.components.Selector):
# If you mute a clip in media composer, it becomes one of these in the
# AAF.
result = _transcribe(
item.getvalue("Selected"),
parent=item, masterMobs=masterMobs
)
alternates = [
_transcribe(alt, parent=item, masterMobs=masterMobs)
for alt in item.getvalue("Alternates")
]
# muted case -- if there is only one item its muted, otherwise its
# a multi cam thing
if alternates and len(alternates) == 1:
metadata['muted_clip'] = True
result.name = str(alternates[0].name) + "_MUTED"
metadata['alternates'] = alternates
# @TODO: There are a bunch of other AAF object types that we will
# likely need to add support for. I'm leaving this code here to help
# future efforts to extract the useful information out of these.
# elif isinstance(item, aaf.storage.File):
# self.extendChildItems([item.header])
# elif isinstance(item, aaf.storage.Header):
# self.extendChildItems([item.storage()])
# self.extendChildItems([item.dictionary()])
# elif isinstance(item, aaf.dictionary.Dictionary):
# l = []
# l.append(DummyItem(list(item.class_defs()), 'ClassDefs'))
# l.append(DummyItem(list(item.codec_defs()), 'CodecDefs'))
# l.append(DummyItem(list(item.container_defs()), 'ContainerDefs'))
# l.append(DummyItem(list(item.data_defs()), 'DataDefs'))
# l.append(DummyItem(list(item.interpolation_defs()),
# 'InterpolationDefs'))
# l.append(DummyItem(list(item.klvdata_defs()), 'KLVDataDefs'))
# l.append(DummyItem(list(item.operation_defs()), 'OperationDefs'))
# l.append(DummyItem(list(item.parameter_defs()), 'ParameterDefs'))
# l.append(DummyItem(list(item.plugin_defs()), 'PluginDefs'))
# l.append(DummyItem(list(item.taggedvalue_defs()), 'TaggedValueDefs'))
# l.append(DummyItem(list(item.type_defs()), 'TypeDefs'))
# self.extendChildItems(l)
#
# elif isinstance(item, pyaaf.AxSelector):
# self.extendChildItems(list(item.EnumAlternateSegments()))
#
# elif isinstance(item, pyaaf.AxScopeReference):
# #print item, item.GetRelativeScope(),item.GetRelativeSlot()
# pass
#
# elif isinstance(item, pyaaf.AxEssenceGroup):
# segments = []
#
# for i in xrange(item.CountChoices()):
# choice = item.GetChoiceAt(i)
# segments.append(choice)
# self.extendChildItems(segments)
#
# elif isinstance(item, pyaaf.AxProperty):
# self.properties['Value'] = str(item.GetValue())
elif isinstance(item, Iterable):
result = otio.schema.SerializableCollection()
for child in item:
result.append(
_transcribe(
child,
parent=item,
masterMobs=masterMobs
)
)
else:
# For everything else, we just ignore it.
# To see what is being ignored, turn on the debug flag
if debug:
print("SKIPPING: {}: {} -- {}".format(type(item), item, result))
# Did we get anything? If not, we're done
if result is None:
return None
# Okay, now we've turned the AAF thing into an OTIO result
# There's a bit more we can do before we're ready to return the result.
# If we didn't get a name yet, use the one we have in metadata
if result.name is None:
# TODO: Some AAFs contain non-utf8 names?
# This works in Python 2.7, but not 3.5:
# result.name = metadata["Name"].encode('utf8', 'replace')
result.name = str(metadata["Name"])
# Attach the AAF metadata
if not result.metadata:
result.metadata = {}
result.metadata["AAF"] = metadata
# Double check that we got the length we expected
if isinstance(result, otio.core.Item):
length = metadata.get("Length")
if (
length and
result.source_range is not None and
result.source_range.duration.value != length
):
raise otio.exceptions.OTIOError(
"Wrong duration? {} should be {} in {}".format(
result.source_range.duration.value,
length,
result
)
)
# Did we find a Track?
if isinstance(result, otio.schema.Track):
# Try to figure out the kind of Track it is
if hasattr(item, 'media_kind'):
media_kind = str(item.media_kind)
result.metadata["AAF"]["MediaKind"] = media_kind
if media_kind == "Picture":
result.kind = otio.schema.TrackKind.Video
elif media_kind in ("SoundMasterTrack", "Sound"):
result.kind = otio.schema.TrackKind.Audio
else:
# Timecode, Edgecode, others?
result.kind = None
# Done!
return result
|
31,551 |
def search(post_to_warroom: bool = True) -> Tuple[dict, Any]:
"""
will search in MISP
Returns
dict: Object with results to demisto:
"""
d_args = demisto.args()
# List of all applicable search arguments
search_args = [
'event_id',
'value',
'type',
'category',
'org',
'tags',
'from',
'to',
'last',
'eventid',
'uuid',
'to_ids',
'enforceWarninglist'
]
args = dict()
# Create dict to pass into the search
for arg in search_args:
if arg in d_args:
args[arg] = d_args[arg]
# Replacing keys and values from Demisto to Misp's keys
if 'type' in args:
args['type_attribute'] = d_args.pop('type')
# search function 'to_ids' parameter gets 0 or 1 instead of bool.
if 'to_ids' in args:
args['to_ids'] = 1 if d_args.get('to_ids') in ('true', '1', 1) else 0
# search function 'enforceWarninglist' parameter gets 0 or 1 instead of bool.
if 'enforceWarninglist' in args:
args['enforceWarninglist'] = 1 if d_args.get('enforceWarninglist') in ('true', '1', 1) else 0
# build MISP complex filter
if 'tags' in args:
args['tags'] = build_misp_complex_filter(args['tags'])
response = MISP.search(**args)
if response:
response_for_context = build_context(response)
# Prepare MD. getting all keys and values if exists
args_for_md = {key: value for key, value in args.items() if value}
if post_to_warroom:
md = tableToMarkdown('Results in MISP for search:', args_for_md)
md_event = response_for_context[0]
md += f'Total of {len(response_for_context)} events found\n'
event_highlights = {
'Info': md_event.get('Info'),
'Timestamp': convert_timestamp(md_event.get('Timestamp')),
'Analysis': ANALYSIS_WORDS[md_event.get('Analysis')],
'Threat Level ID': THREAT_LEVELS_WORDS[md_event.get('ThreatLevelID')],
'Event Creator Email': md_event.get('EventCreatorEmail'),
'Attributes': json.dumps(md_event.get('Attribute'), indent=4),
'Related Events': md_event.get('RelatedEvent')
}
md += tableToMarkdown(f'Event ID: {md_event.get("ID")}', event_highlights)
if md_event.get('Galaxy'):
md += tableToMarkdown('Galaxy:', md_event.get('Galaxy'))
demisto.results({
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'HumanReadable': md,
'ReadableContentsFormat': formats['markdown'],
'EntryContext': {
MISP_PATH: response_for_context
}
})
return response_for_context, response
else:
demisto.results(f"No events found in MISP for {args}")
return {}, {}
|
def search(post_to_warroom: bool = True) -> Tuple[dict, Any]:
"""
will search in MISP
Returns
dict: Object with results to demisto:
"""
d_args = demisto.args()
# List of all applicable search arguments
search_args = [
'event_id',
'value',
'type',
'category',
'org',
'tags',
'from',
'to',
'last',
'eventid',
'uuid',
'to_ids',
'enforceWarninglist',
]
args = dict()
# Create dict to pass into the search
for arg in search_args:
if arg in d_args:
args[arg] = d_args[arg]
# Replacing keys and values from Demisto to Misp's keys
if 'type' in args:
args['type_attribute'] = d_args.pop('type')
# search function 'to_ids' parameter gets 0 or 1 instead of bool.
if 'to_ids' in args:
args['to_ids'] = 1 if d_args.get('to_ids') in ('true', '1', 1) else 0
# search function 'enforceWarninglist' parameter gets 0 or 1 instead of bool.
if 'enforceWarninglist' in args:
args['enforceWarninglist'] = 1 if d_args.get('enforceWarninglist') in ('true', '1', 1) else 0
# build MISP complex filter
if 'tags' in args:
args['tags'] = build_misp_complex_filter(args['tags'])
response = MISP.search(**args)
if response:
response_for_context = build_context(response)
# Prepare MD. getting all keys and values if exists
args_for_md = {key: value for key, value in args.items() if value}
if post_to_warroom:
md = tableToMarkdown('Results in MISP for search:', args_for_md)
md_event = response_for_context[0]
md += f'Total of {len(response_for_context)} events found\n'
event_highlights = {
'Info': md_event.get('Info'),
'Timestamp': convert_timestamp(md_event.get('Timestamp')),
'Analysis': ANALYSIS_WORDS[md_event.get('Analysis')],
'Threat Level ID': THREAT_LEVELS_WORDS[md_event.get('ThreatLevelID')],
'Event Creator Email': md_event.get('EventCreatorEmail'),
'Attributes': json.dumps(md_event.get('Attribute'), indent=4),
'Related Events': md_event.get('RelatedEvent')
}
md += tableToMarkdown(f'Event ID: {md_event.get("ID")}', event_highlights)
if md_event.get('Galaxy'):
md += tableToMarkdown('Galaxy:', md_event.get('Galaxy'))
demisto.results({
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'HumanReadable': md,
'ReadableContentsFormat': formats['markdown'],
'EntryContext': {
MISP_PATH: response_for_context
}
})
return response_for_context, response
else:
demisto.results(f"No events found in MISP for {args}")
return {}, {}
|
3,484 |
def track_info(recording, index=None, medium=None, medium_index=None,
medium_total=None):
"""Translates a MusicBrainz recording result dictionary into a beets
``TrackInfo`` object. Three parameters are optional and are used
only for tracks that appear on releases (non-singletons): ``index``,
the overall track number; ``medium``, the disc number;
``medium_index``, the track's index on its medium; ``medium_total``,
the number of tracks on the medium. Each number is a 1-based index.
"""
info = beets.autotag.hooks.TrackInfo(
title=recording['title'],
track_id=recording['id'],
index=index,
medium=medium,
medium_index=medium_index,
medium_total=medium_total,
data_source=u'MusicBrainz',
data_url=track_url(recording['id']),
)
if recording.get('artist-credit'):
# Get the artist names.
info.artist, info.artist_sort, info.artist_credit = \
_flatten_artist_credit(recording['artist-credit'])
# Get the ID and sort name of the first artist.
artist = recording['artist-credit'][0]['artist']
info.artist_id = artist['id']
if recording.get('length'):
info.length = int(recording['length']) / (1000.0)
lyricist = []
composer = []
composer_sort = []
for work_relation in recording.get('work-relation-list', ()):
if work_relation['type'] != 'performance':
continue
info.work = work_relation['work']['title']
info.mb_workid = work_relation['work']['id']
if 'disambiguation' in work_relation['work']:
info.work_disambig = work_relation['work']['disambiguation']
for artist_relation in work_relation['work'].get(
'artist-relation-list', ()):
if 'type' in artist_relation:
type = artist_relation['type']
if type == 'lyricist':
lyricist.append(artist_relation['artist']['name'])
elif type == 'composer':
composer.append(artist_relation['artist']['name'])
composer_sort.append(
artist_relation['artist']['sort-name'])
if lyricist:
info.lyricist = u', '.join(lyricist)
if composer:
info.composer = u', '.join(composer)
info.composer_sort = u', '.join(composer_sort)
arranger = []
for artist_relation in recording.get('artist-relation-list', ()):
if 'type' in artist_relation:
type = artist_relation['type']
if type == 'arranger':
arranger.append(artist_relation['artist']['name'])
if arranger:
info.arranger = u', '.join(arranger)
# supplementary tags provided by plugins
extra_trackdatas = plugins.send('extracting_trackdata', info=recording)
for extra_trackdata in extra_trackdatas:
for key in extra_trackdata:
info[key] = extra_trackdata[key]
info.decode()
return info
|
def track_info(recording, index=None, medium=None, medium_index=None,
medium_total=None):
"""Translates a MusicBrainz recording result dictionary into a beets
``TrackInfo`` object. Three parameters are optional and are used
only for tracks that appear on releases (non-singletons): ``index``,
the overall track number; ``medium``, the disc number;
``medium_index``, the track's index on its medium; ``medium_total``,
the number of tracks on the medium. Each number is a 1-based index.
"""
info = beets.autotag.hooks.TrackInfo(
title=recording['title'],
track_id=recording['id'],
index=index,
medium=medium,
medium_index=medium_index,
medium_total=medium_total,
data_source=u'MusicBrainz',
data_url=track_url(recording['id']),
)
if recording.get('artist-credit'):
# Get the artist names.
info.artist, info.artist_sort, info.artist_credit = \
_flatten_artist_credit(recording['artist-credit'])
# Get the ID and sort name of the first artist.
artist = recording['artist-credit'][0]['artist']
info.artist_id = artist['id']
if recording.get('length'):
info.length = int(recording['length']) / (1000.0)
lyricist = []
composer = []
composer_sort = []
for work_relation in recording.get('work-relation-list', ()):
if work_relation['type'] != 'performance':
continue
info.work = work_relation['work']['title']
info.mb_workid = work_relation['work']['id']
if 'disambiguation' in work_relation['work']:
info.work_disambig = work_relation['work']['disambiguation']
for artist_relation in work_relation['work'].get(
'artist-relation-list', ()):
if 'type' in artist_relation:
type = artist_relation['type']
if type == 'lyricist':
lyricist.append(artist_relation['artist']['name'])
elif type == 'composer':
composer.append(artist_relation['artist']['name'])
composer_sort.append(
artist_relation['artist']['sort-name'])
if lyricist:
info.lyricist = u', '.join(lyricist)
if composer:
info.composer = u', '.join(composer)
info.composer_sort = u', '.join(composer_sort)
arranger = []
for artist_relation in recording.get('artist-relation-list', ()):
if 'type' in artist_relation:
type = artist_relation['type']
if type == 'arranger':
arranger.append(artist_relation['artist']['name'])
if arranger:
info.arranger = u', '.join(arranger)
# Supplementary fields provided by plugins.
extra_trackdatas = plugins.send('extracting_trackdata', info=recording)
for extra_trackdata in extra_trackdatas:
for key in extra_trackdata:
info[key] = extra_trackdata[key]
info.decode()
return info
|
30,563 |
def splunk_job_status(service):
job = service.job(demisto.args()['sid'])
status = job.state.content['dispatchState']
entry_context = {
'SID': demisto.args()['sid'],
'Status': status
}
context = {'Splunk.JobStatus(val.ID && val.ID === obj.ID)': entry_context}
human_readable = tableToMarkdown('Splunk Job Status', entry_context)
demisto.results({
"Type": 1,
"Contents": 'Splunk Job Status',
"ContentsFormat": "json",
"EntryContext": context,
"HumanReadable": human_readable
})
|
def splunk_job_status(service):
job = service.job(demisto.args()['sid'])
status = job.state.content['dispatchState']
entry_context = {
'SID': demisto.args()['sid'],
'Status': status
}
context = {'Splunk.JobStatus(val.SID && val.ID === obj.SID)': entry_context}
human_readable = tableToMarkdown('Splunk Job Status', entry_context)
demisto.results({
"Type": 1,
"Contents": 'Splunk Job Status',
"ContentsFormat": "json",
"EntryContext": context,
"HumanReadable": human_readable
})
|
28,581 |
def plot_ppc(
data,
kind="kde",
alpha=None,
mean=True,
observed=True,
color=None,
colors=None,
grid=None,
figsize=None,
textsize=None,
data_pairs=None,
var_names=None,
filter_vars=None,
coords=None,
flatten=None,
flatten_pp=None,
num_pp_samples=None,
random_seed=None,
jitter=None,
animated=False,
animation_kwargs=None,
legend=True,
labeller=None,
ax=None,
backend=None,
backend_kwargs=None,
group="posterior",
show=None,
):
"""
Plot for posterior/prior predictive checks.
Parameters
----------
data: :class:`arviz.InferenceData` object
InferenceData object containing the observed and posterior/prior predictive data.
kind: str
Type of plot to display ("kde", "cumulative", or "scatter"). Defaults to `kde`.
alpha: float
Opacity of posterior/prior predictive density curves.
Defaults to `0.2` for ``kind`` = kde and cumulative, for scatter defaults to `0.7`.
mean: bool
Whether or not to plot the mean posterior/prior predictive distribution.
Defaults to ``True``.
observed: bool, default ``True``
Whether or not to plot the observed data.
color: str
Valid matplotlib ``color``. Defaults to `C0`.
color: list
List with valid matplotlib colors corresponding to the posterior/prior predictive
distribution, observed data and mean of the posterior/prior predictive distribution.
Defaults to ["C0", "k", "C1"].
grid : tuple
Number of rows and columns. Defaults to None, the rows and columns are
automatically inferred.
figsize: tuple
Figure size. If None, it will be defined automatically.
textsize: float
Text size scaling factor for labels, titles and lines. If None, it will be
autoscaled based on ``figsize``.
data_pairs: dict
Dictionary containing relations between observed data and posterior/prior predictive data.
Dictionary structure:
- key = data var_name
- value = posterior/prior predictive var_name
For example, ``data_pairs = {'y' : 'y_hat'}``
If None, it will assume that the observed data and the posterior/prior
predictive data have the same variable name.
var_names: list of variable names
Variables to be plotted, if `None` all variable are plotted. Prefix the
variables by ``~`` when you want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
coords: dict
Dictionary mapping dimensions to selected coordinates to be plotted.
Dimensions without a mapping specified will include all coordinates for
that dimension. Defaults to including all coordinates for all
dimensions if None.
flatten: list
List of dimensions to flatten in observed_data. Only flattens across the coordinates
specified in the ``coords`` argument. Defaults to flattening all of the dimensions.
flatten_pp: list
List of dimensions to flatten in posterior_predictive/prior_predictive. Only flattens
across the coordinates specified in the ``coords`` argument. Defaults to flattening all
of the dimensions. Dimensions should match flatten excluding dimensions for ``data_pairs``
parameters. If ``flatten`` is defined and ``flatten_pp`` is None, then
``flatten_pp`` = `flatten`.
num_pp_samples: int
The number of posterior/prior predictive samples to plot. For ``kind`` = 'scatter' and
`animation` = ``False`` if defaults to a maximum of 5 samples and will set jitter to `0.7`.
unless defined. Otherwise it defaults to all provided samples.
random_seed: int
Random number generator seed passed to ``numpy.random.seed`` to allow
reproducibility of the plot. By default, no seed will be provided
and the plot will change each call if a random sample is specified
by ``num_pp_samples``.
jitter: float
If ``kind`` is "scatter", jitter will add random uniform noise to the height
of the ppc samples and observed data. By default `0`.
animated: bool
Create an animation of one posterior/prior predictive sample per frame.
Defaults to ``False``. Only works with matploblib backend.
To run animations inside a notebook you have to use the `nbAgg` matplotlib's backend.
Try with `%matplotlib notebook` or `%matplotlib nbAgg`. You can switch back to the
default matplotlib's backend with `%matplotlib inline` or `%matplotlib auto`.
If switching back and forth between matplotlib's backend, you may need to run twice the cell
with the animation.
If you experience problems rendering the animation try setting
`animation_kwargs({'blit':False}) or changing the matplotlib's backend (e.g. to TkAgg)
If you run the animation from a script write `ax, ani = az.plot_ppc(.)`
animation_kwargs : dict
Keywords passed to ``animation.FuncAnimation``. Ignored with matplotlib backend.
legend : bool
Add legend to figure. By default ``True``.
labeller : labeller instance, optional
Class providing the method ``make_pp_label`` to generate the labels in the plot titles.
Read the :ref:`label_guide` for more details and usage examples.
ax: numpy array-like of matplotlib axes or bokeh figures, optional
A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
its own array of plot areas (and return it).
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default to "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`.
For additional documentation check the plotting method of the backend.
group: {"prior", "posterior"}, optional
Specifies which InferenceData group should be plotted. Defaults to `'posterior'`.
Other value can be `'prior'`.
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
See Also
--------
plot_bvp: Plot Bayesian p-value for observed data and Posterior/Prior predictive.
Examples
--------
Plot the observed data KDE overlaid on posterior predictive KDEs.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('radon')
>>> az.plot_ppc(data, data_pairs={"y":"y"})
Plot the overlay with empirical CDFs.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='cumulative')
Use the ``coords`` and ``flatten`` parameters to plot selected variable dimensions
across multiple plots. We will now modify the dimension `obs_id` to contain
indicate the name of the county where the measure was taken. The change has to
be done on both ``posterior_predictive`` and ``observed_data`` groups, which is
why we will use :meth:`~arviz.InferenceData.map` to apply the same function to
both groups. Afterwards, we will select the counties to be plotted with the
``coords`` arg.
.. plot::
:context: close-figs
>>> obs_county = data.posterior["County"][data.constant_data["county_idx"]]
>>> data = data.assign_coords(obs_id=obs_county, groups="observed_vars")
>>> az.plot_ppc(data, coords={'obs_id': ['ANOKA', 'BELTRAMI']}, flatten=[])
Plot the overlay using a stacked scatter plot that is particularly useful
when the sample sizes are small.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='scatter', flatten=[],
>>> coords={'obs_id': ['AITKIN', 'BELTRAMI']})
Plot random posterior predictive sub-samples.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, num_pp_samples=30, random_seed=7)
"""
if group not in ("posterior", "prior"):
raise TypeError("`group` argument must be either `posterior` or `prior`")
for groups in (f"{group}_predictive", "observed_data"):
if not hasattr(data, groups):
raise TypeError(f'`data` argument must have the group "{groups}" for ppcplot')
if kind.lower() not in ("kde", "cumulative", "scatter"):
raise TypeError("`kind` argument must be either `kde`, `cumulative`, or `scatter`")
if colors is None:
colors = ["C0", "k", "C1"]
if isinstance(colors, str):
raise TypeError("colors should be a list with 3 items.")
if len(colors) != 3:
raise ValueError("colors should be a list with 3 items.")
if color is not None:
warnings.warn("color has been deprecated in favor of colors", FutureWarning)
colors[0] = color
if data_pairs is None:
data_pairs = {}
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
if backend == "bokeh":
if animated:
raise TypeError("Animation option is only supported with matplotlib backend.")
observed_data = data.observed_data
if group == "posterior":
predictive_dataset = data.posterior_predictive
elif group == "prior":
predictive_dataset = data.prior_predictive
if var_names is None:
var_names = list(observed_data.data_vars)
var_names = _var_names(var_names, observed_data, filter_vars)
pp_var_names = [data_pairs.get(var, var) for var in var_names]
pp_var_names = _var_names(pp_var_names, predictive_dataset, filter_vars)
if flatten_pp is None and flatten is None:
flatten_pp = list(predictive_dataset.dims.keys())
elif flatten_pp is None:
flatten_pp = flatten
if flatten is None:
flatten = list(observed_data.dims.keys())
if coords is None:
coords = {}
if labeller is None:
labeller = BaseLabeller()
if random_seed is not None:
np.random.seed(random_seed)
total_pp_samples = predictive_dataset.sizes["chain"] * predictive_dataset.sizes["draw"]
if num_pp_samples is None:
if kind == "scatter" and not animated:
num_pp_samples = min(5, total_pp_samples)
else:
num_pp_samples = total_pp_samples
if (
not isinstance(num_pp_samples, Integral)
or num_pp_samples < 1
or num_pp_samples > total_pp_samples
):
raise TypeError(
"`num_pp_samples` must be an integer between 1 and " + f"{total_pp_samples}."
)
pp_sample_ix = np.random.choice(total_pp_samples, size=num_pp_samples, replace=False)
for key in coords.keys():
coords[key] = np.where(np.in1d(observed_data[key], coords[key]))[0]
obs_plotters = filter_plotters_list(
list(
xarray_var_iter(
observed_data.isel(coords),
skip_dims=set(flatten),
var_names=var_names,
combined=True,
)
),
"plot_ppc",
)
length_plotters = len(obs_plotters)
pp_plotters = [
tup
for _, tup in zip(
range(length_plotters),
xarray_var_iter(
predictive_dataset.isel(coords),
var_names=pp_var_names,
skip_dims=set(flatten_pp),
combined=True,
),
)
]
rows, cols = default_grid(length_plotters, grid=grid)
ppcplot_kwargs = dict(
ax=ax,
length_plotters=length_plotters,
rows=rows,
cols=cols,
figsize=figsize,
animated=animated,
obs_plotters=obs_plotters,
pp_plotters=pp_plotters,
predictive_dataset=predictive_dataset,
pp_sample_ix=pp_sample_ix,
kind=kind,
alpha=alpha,
colors=colors,
jitter=jitter,
textsize=textsize,
mean=mean,
observed=observed,
total_pp_samples=total_pp_samples,
legend=legend,
labeller=labeller,
group=group,
animation_kwargs=animation_kwargs,
num_pp_samples=num_pp_samples,
backend_kwargs=backend_kwargs,
show=show,
)
# TODO: Add backend kwargs
plot = get_plotting_function("plot_ppc", "ppcplot", backend)
axes = plot(**ppcplot_kwargs)
return axes
|
def plot_ppc(
data,
kind="kde",
alpha=None,
mean=True,
observed=True,
color=None,
colors=None,
grid=None,
figsize=None,
textsize=None,
data_pairs=None,
var_names=None,
filter_vars=None,
coords=None,
flatten=None,
flatten_pp=None,
num_pp_samples=None,
random_seed=None,
jitter=None,
animated=False,
animation_kwargs=None,
legend=True,
labeller=None,
ax=None,
backend=None,
backend_kwargs=None,
group="posterior",
show=None,
):
"""
Plot for posterior/prior predictive checks.
Parameters
----------
data: :class:`arviz.InferenceData` object
InferenceData object containing the observed and posterior/prior predictive data.
kind: str
Type of plot to display ("kde", "cumulative", or "scatter"). Defaults to `kde`.
alpha: float
Opacity of posterior/prior predictive density curves.
Defaults to `0.2` for ``kind = kde`` and cumulative, for scatter defaults to `0.7`.
mean: bool
Whether or not to plot the mean posterior/prior predictive distribution.
Defaults to ``True``.
observed: bool, default ``True``
Whether or not to plot the observed data.
color: str
Valid matplotlib ``color``. Defaults to `C0`.
color: list
List with valid matplotlib colors corresponding to the posterior/prior predictive
distribution, observed data and mean of the posterior/prior predictive distribution.
Defaults to ["C0", "k", "C1"].
grid : tuple
Number of rows and columns. Defaults to None, the rows and columns are
automatically inferred.
figsize: tuple
Figure size. If None, it will be defined automatically.
textsize: float
Text size scaling factor for labels, titles and lines. If None, it will be
autoscaled based on ``figsize``.
data_pairs: dict
Dictionary containing relations between observed data and posterior/prior predictive data.
Dictionary structure:
- key = data var_name
- value = posterior/prior predictive var_name
For example, ``data_pairs = {'y' : 'y_hat'}``
If None, it will assume that the observed data and the posterior/prior
predictive data have the same variable name.
var_names: list of variable names
Variables to be plotted, if `None` all variable are plotted. Prefix the
variables by ``~`` when you want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
coords: dict
Dictionary mapping dimensions to selected coordinates to be plotted.
Dimensions without a mapping specified will include all coordinates for
that dimension. Defaults to including all coordinates for all
dimensions if None.
flatten: list
List of dimensions to flatten in observed_data. Only flattens across the coordinates
specified in the ``coords`` argument. Defaults to flattening all of the dimensions.
flatten_pp: list
List of dimensions to flatten in posterior_predictive/prior_predictive. Only flattens
across the coordinates specified in the ``coords`` argument. Defaults to flattening all
of the dimensions. Dimensions should match flatten excluding dimensions for ``data_pairs``
parameters. If ``flatten`` is defined and ``flatten_pp`` is None, then
``flatten_pp`` = `flatten`.
num_pp_samples: int
The number of posterior/prior predictive samples to plot. For ``kind`` = 'scatter' and
`animation` = ``False`` if defaults to a maximum of 5 samples and will set jitter to `0.7`.
unless defined. Otherwise it defaults to all provided samples.
random_seed: int
Random number generator seed passed to ``numpy.random.seed`` to allow
reproducibility of the plot. By default, no seed will be provided
and the plot will change each call if a random sample is specified
by ``num_pp_samples``.
jitter: float
If ``kind`` is "scatter", jitter will add random uniform noise to the height
of the ppc samples and observed data. By default `0`.
animated: bool
Create an animation of one posterior/prior predictive sample per frame.
Defaults to ``False``. Only works with matploblib backend.
To run animations inside a notebook you have to use the `nbAgg` matplotlib's backend.
Try with `%matplotlib notebook` or `%matplotlib nbAgg`. You can switch back to the
default matplotlib's backend with `%matplotlib inline` or `%matplotlib auto`.
If switching back and forth between matplotlib's backend, you may need to run twice the cell
with the animation.
If you experience problems rendering the animation try setting
`animation_kwargs({'blit':False}) or changing the matplotlib's backend (e.g. to TkAgg)
If you run the animation from a script write `ax, ani = az.plot_ppc(.)`
animation_kwargs : dict
Keywords passed to ``animation.FuncAnimation``. Ignored with matplotlib backend.
legend : bool
Add legend to figure. By default ``True``.
labeller : labeller instance, optional
Class providing the method ``make_pp_label`` to generate the labels in the plot titles.
Read the :ref:`label_guide` for more details and usage examples.
ax: numpy array-like of matplotlib axes or bokeh figures, optional
A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
its own array of plot areas (and return it).
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default to "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`.
For additional documentation check the plotting method of the backend.
group: {"prior", "posterior"}, optional
Specifies which InferenceData group should be plotted. Defaults to `'posterior'`.
Other value can be `'prior'`.
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
See Also
--------
plot_bvp: Plot Bayesian p-value for observed data and Posterior/Prior predictive.
Examples
--------
Plot the observed data KDE overlaid on posterior predictive KDEs.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('radon')
>>> az.plot_ppc(data, data_pairs={"y":"y"})
Plot the overlay with empirical CDFs.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='cumulative')
Use the ``coords`` and ``flatten`` parameters to plot selected variable dimensions
across multiple plots. We will now modify the dimension `obs_id` to contain
indicate the name of the county where the measure was taken. The change has to
be done on both ``posterior_predictive`` and ``observed_data`` groups, which is
why we will use :meth:`~arviz.InferenceData.map` to apply the same function to
both groups. Afterwards, we will select the counties to be plotted with the
``coords`` arg.
.. plot::
:context: close-figs
>>> obs_county = data.posterior["County"][data.constant_data["county_idx"]]
>>> data = data.assign_coords(obs_id=obs_county, groups="observed_vars")
>>> az.plot_ppc(data, coords={'obs_id': ['ANOKA', 'BELTRAMI']}, flatten=[])
Plot the overlay using a stacked scatter plot that is particularly useful
when the sample sizes are small.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='scatter', flatten=[],
>>> coords={'obs_id': ['AITKIN', 'BELTRAMI']})
Plot random posterior predictive sub-samples.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, num_pp_samples=30, random_seed=7)
"""
if group not in ("posterior", "prior"):
raise TypeError("`group` argument must be either `posterior` or `prior`")
for groups in (f"{group}_predictive", "observed_data"):
if not hasattr(data, groups):
raise TypeError(f'`data` argument must have the group "{groups}" for ppcplot')
if kind.lower() not in ("kde", "cumulative", "scatter"):
raise TypeError("`kind` argument must be either `kde`, `cumulative`, or `scatter`")
if colors is None:
colors = ["C0", "k", "C1"]
if isinstance(colors, str):
raise TypeError("colors should be a list with 3 items.")
if len(colors) != 3:
raise ValueError("colors should be a list with 3 items.")
if color is not None:
warnings.warn("color has been deprecated in favor of colors", FutureWarning)
colors[0] = color
if data_pairs is None:
data_pairs = {}
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
if backend == "bokeh":
if animated:
raise TypeError("Animation option is only supported with matplotlib backend.")
observed_data = data.observed_data
if group == "posterior":
predictive_dataset = data.posterior_predictive
elif group == "prior":
predictive_dataset = data.prior_predictive
if var_names is None:
var_names = list(observed_data.data_vars)
var_names = _var_names(var_names, observed_data, filter_vars)
pp_var_names = [data_pairs.get(var, var) for var in var_names]
pp_var_names = _var_names(pp_var_names, predictive_dataset, filter_vars)
if flatten_pp is None and flatten is None:
flatten_pp = list(predictive_dataset.dims.keys())
elif flatten_pp is None:
flatten_pp = flatten
if flatten is None:
flatten = list(observed_data.dims.keys())
if coords is None:
coords = {}
if labeller is None:
labeller = BaseLabeller()
if random_seed is not None:
np.random.seed(random_seed)
total_pp_samples = predictive_dataset.sizes["chain"] * predictive_dataset.sizes["draw"]
if num_pp_samples is None:
if kind == "scatter" and not animated:
num_pp_samples = min(5, total_pp_samples)
else:
num_pp_samples = total_pp_samples
if (
not isinstance(num_pp_samples, Integral)
or num_pp_samples < 1
or num_pp_samples > total_pp_samples
):
raise TypeError(
"`num_pp_samples` must be an integer between 1 and " + f"{total_pp_samples}."
)
pp_sample_ix = np.random.choice(total_pp_samples, size=num_pp_samples, replace=False)
for key in coords.keys():
coords[key] = np.where(np.in1d(observed_data[key], coords[key]))[0]
obs_plotters = filter_plotters_list(
list(
xarray_var_iter(
observed_data.isel(coords),
skip_dims=set(flatten),
var_names=var_names,
combined=True,
)
),
"plot_ppc",
)
length_plotters = len(obs_plotters)
pp_plotters = [
tup
for _, tup in zip(
range(length_plotters),
xarray_var_iter(
predictive_dataset.isel(coords),
var_names=pp_var_names,
skip_dims=set(flatten_pp),
combined=True,
),
)
]
rows, cols = default_grid(length_plotters, grid=grid)
ppcplot_kwargs = dict(
ax=ax,
length_plotters=length_plotters,
rows=rows,
cols=cols,
figsize=figsize,
animated=animated,
obs_plotters=obs_plotters,
pp_plotters=pp_plotters,
predictive_dataset=predictive_dataset,
pp_sample_ix=pp_sample_ix,
kind=kind,
alpha=alpha,
colors=colors,
jitter=jitter,
textsize=textsize,
mean=mean,
observed=observed,
total_pp_samples=total_pp_samples,
legend=legend,
labeller=labeller,
group=group,
animation_kwargs=animation_kwargs,
num_pp_samples=num_pp_samples,
backend_kwargs=backend_kwargs,
show=show,
)
# TODO: Add backend kwargs
plot = get_plotting_function("plot_ppc", "ppcplot", backend)
axes = plot(**ppcplot_kwargs)
return axes
|
31,589 |
def get_rules(client_obj, args: Dict[str, str]) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
Returns context data and raw response for gcb-list-rules command.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type args: Dict[str, str]
:param args: it contain arguments of gcb-list-rules command
:rtype: Tuple[Dict[str, Any], Dict[str, Any]]
:return: ec, json_data: Context data and raw response for the fetched rules
"""
page_size = args.get('page_size', 100)
validate_page_size(page_size)
page_token = args.get('page_token', '')
if int(page_size) > 1000:
raise ValueError('Page size should be in the range from 1 to 1000.')
live_rule = args.get('live_rule', '').lower()
if live_rule and live_rule != 'true' and live_rule != 'false':
raise ValueError('Live rule should be true or false.')
request_url = '{}/detect/rules?pageSize={}'.format(BACKSTORY_API_V2_URL, page_size)
# Append parameters if specified
if page_token:
request_url += '&page_token={}'.format(page_token)
# get list of rules from Chronicle Backstory
json_data = validate_response(client_obj, request_url)
if live_rule:
if live_rule == 'true':
list_live_rule = [rule for rule in json_data.get('rules', []) if rule.get('liveRuleEnabled')]
else:
list_live_rule = [rule for rule in json_data.get('rules', []) if not rule.get('liveRuleEnabled')]
json_data = {
'rules': list_live_rule
}
raw_resp = deepcopy(json_data)
parsed_ec, token_ec = get_context_for_rules(json_data)
ec: Dict[str, Any] = {
CHRONICLE_OUTPUT_PATHS['Rules']: parsed_ec
}
if token_ec:
ec.update({CHRONICLE_OUTPUT_PATHS['Token']: token_ec})
return ec, raw_resp
|
def get_rules(client_obj, args: Dict[str, str]) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
Returns context data and raw response for gcb-list-rules command.
:type client_obj: Client
:param client_obj: client object which is used to get response from api
:type args: Dict[str, str]
:param args: it contain arguments of gcb-list-rules command
:rtype: Tuple[Dict[str, Any], Dict[str, Any]]
:return: ec, json_data: Context data and raw response for the fetched rules
"""
page_size = args.get('page_size', 100)
validate_page_size(page_size)
page_token = args.get('page_token', '')
if int(page_size) > 1000:
raise ValueError('Page size should be in the range from 1 to 1000.')
live_rule = argToBoolean(args.get('live_rule', ''))
if live_rule and live_rule != 'true' and live_rule != 'false':
raise ValueError('Live rule should be true or false.')
request_url = '{}/detect/rules?pageSize={}'.format(BACKSTORY_API_V2_URL, page_size)
# Append parameters if specified
if page_token:
request_url += '&page_token={}'.format(page_token)
# get list of rules from Chronicle Backstory
json_data = validate_response(client_obj, request_url)
if live_rule:
if live_rule == 'true':
list_live_rule = [rule for rule in json_data.get('rules', []) if rule.get('liveRuleEnabled')]
else:
list_live_rule = [rule for rule in json_data.get('rules', []) if not rule.get('liveRuleEnabled')]
json_data = {
'rules': list_live_rule
}
raw_resp = deepcopy(json_data)
parsed_ec, token_ec = get_context_for_rules(json_data)
ec: Dict[str, Any] = {
CHRONICLE_OUTPUT_PATHS['Rules']: parsed_ec
}
if token_ec:
ec.update({CHRONICLE_OUTPUT_PATHS['Token']: token_ec})
return ec, raw_resp
|
390 |
def test_posdef_symmetric3():
"""The test return 0 if the matrix has 0 eigenvalue.
Is this correct?
"""
data = np.array([[1.0, 1], [1, 1]], dtype=aesara.config.floatX)
assert posdef(data) == 0
|
def test_posdef_symmetric3():
"""The test returns 0 if the matrix has 0 eigenvalue.
Is this correct?
"""
data = np.array([[1.0, 1], [1, 1]], dtype=aesara.config.floatX)
assert posdef(data) == 0
|
50,526 |
def _generate_spiketrains(freq, length, trigger_events, injection_pos,
trigger_pre_size, trigger_post_size,
time_unit=1*pq.s):
"""
Generate two spiketrains from a homogeneous Poisson process with
injected coincidences.
"""
st1 = homogeneous_poisson_process(rate=freq,
t_start=(0*pq.s).rescale(time_unit),
t_stop=length.rescale(time_unit))
st2 = homogeneous_poisson_process(rate=freq,
t_start=(0*pq.s.rescale(time_unit)),
t_stop=length.rescale(time_unit))
# inject 10 coincidences within a 0.1s interval for each trial
injection = (np.linspace(0, 0.1, 10)*pq.s).rescale(time_unit)
all_injections = np.array([])
for i in trigger_events:
all_injections = np.concatenate(
(all_injections, (i+injection_pos)+injection), axis=0) * time_unit
st1 = st1.duplicate_with_new_data(
np.sort(np.concatenate((st1.times, all_injections)))*time_unit)
st2 = st2.duplicate_with_new_data(
np.sort(np.concatenate((st2.times, all_injections)))*time_unit)
# stack spiketrains by trial
st1_stacked = [st1.time_slice(
t_start=i - trigger_pre_size,
t_stop=i + trigger_post_size).time_shift(-i + trigger_pre_size)
for i in trigger_events]
st2_stacked = [st2.time_slice(
t_start=i - trigger_pre_size,
t_stop=i + trigger_post_size).time_shift(-i + trigger_pre_size)
for i in trigger_events]
spiketrains = np.stack((st1_stacked, st2_stacked), axis=1)
spiketrains = spiketrains.tolist()
return spiketrains, st1, st2
|
def _generate_spiketrains(freq, length, trigger_events, injection_pos,
trigger_pre_size, trigger_post_size,
time_unit=1*pq.s):
"""
Generate two spiketrains from a homogeneous Poisson process with
injected coincidences.
"""
st1 = StationaryPoissonProcess(rate=freq,
t_start=(0*pq.s).rescale(time_unit),
t_stop=length.rescale(time_unit)
).generate_spiketrain()
st2 = StationaryPoissonProcess(rate=freq,
t_start=(0*pq.s.rescale(time_unit)),
t_stop=length.rescale(time_unit)
).generate_spiketrain()
# inject 10 coincidences within a 0.1s interval for each trial
injection = (np.linspace(0, 0.1, 10)*pq.s).rescale(time_unit)
all_injections = np.array([])
for i in trigger_events:
all_injections = np.concatenate(
(all_injections, (i+injection_pos)+injection), axis=0) * time_unit
st1 = st1.duplicate_with_new_data(
np.sort(np.concatenate((st1.times, all_injections)))*time_unit)
st2 = st2.duplicate_with_new_data(
np.sort(np.concatenate((st2.times, all_injections)))*time_unit)
# stack spiketrains by trial
st1_stacked = [st1.time_slice(
t_start=i - trigger_pre_size,
t_stop=i + trigger_post_size).time_shift(-i + trigger_pre_size)
for i in trigger_events]
st2_stacked = [st2.time_slice(
t_start=i - trigger_pre_size,
t_stop=i + trigger_post_size).time_shift(-i + trigger_pre_size)
for i in trigger_events]
spiketrains = np.stack((st1_stacked, st2_stacked), axis=1)
spiketrains = spiketrains.tolist()
return spiketrains, st1, st2
|
21,217 |
def generate_command_cache(bench_path='.'):
"""Caches all available commands (even custom apps) via Frappe
Default caching behaviour: generated the first time any command (for a specific bench directory)
"""
python = get_env_cmd('python', bench_path=bench_path)
sites_path = os.path.join(bench_path, 'sites')
if os.path.exists(bench_cache_file):
os.remove(bench_cache_file)
try:
command = "{0} -m frappe.utils.bench_helper get-frappe-commands".format(python)
logger.debug('generate_command_cache(\'%s\') executing: %s', bench_path, command)
output = get_cmd_output(command, cwd=sites_path)
with open(bench_cache_file, 'w') as f:
json.dump(eval(output), f)
return json.loads(output)
except subprocess.CalledProcessError as e:
logger.error('generate_command_cache(\'%s\') failed executing: %s', bench_path, command, exc_info=e)
if hasattr(e, "stderr"):
print(e.stderr.decode('utf-8'))
raise e
|
def generate_command_cache(bench_path='.'):
"""Caches all available commands (even custom apps) via Frappe
Default caching behaviour: generated the first time any command (for a specific bench directory)
"""
python = get_env_cmd('python', bench_path=bench_path)
sites_path = os.path.join(bench_path, 'sites')
if os.path.exists(bench_cache_file):
os.remove(bench_cache_file)
try:
command = "{0} -m frappe.utils.bench_helper get-frappe-commands".format(python)
logger.debug('generate_command_cache(\'%s\') executing: %s', bench_path, command)
output = get_cmd_output(command, cwd=sites_path)
with open(bench_cache_file, 'w') as f:
json.dump(eval(output), f)
return json.loads(output)
except subprocess.CalledProcessError as e:
logger.error("generate_command_cache('%s') failed executing: %s", bench_path, command, exc_info=e)
if hasattr(e, "stderr"):
print(e.stderr.decode('utf-8'))
raise e
|
27,452 |
def lintify(meta, recipe_dir=None, conda_forge=False):
lints = []
hints = []
major_sections = list(meta.keys())
# If the recipe_dir exists (no guarantee within this function) , we can
# find the meta.yaml within it.
meta_fname = os.path.join(recipe_dir or "", "meta.yaml")
sources_section = get_section(meta, "source", lints)
build_section = get_section(meta, "build", lints)
requirements_section = get_section(meta, "requirements", lints)
test_section = get_section(meta, "test", lints)
about_section = get_section(meta, "about", lints)
extra_section = get_section(meta, "extra", lints)
package_section = get_section(meta, "package", lints)
outputs_section = get_section(meta, "outputs", lints)
recipe_dirname = os.path.basename(recipe_dir) if recipe_dir else "recipe"
is_staged_recipes = recipe_dirname != "recipe"
# 0: Top level keys should be expected
unexpected_sections = []
for section in major_sections:
if section not in EXPECTED_SECTION_ORDER:
lints.append(
"The top level meta key {} is unexpected".format(section)
)
unexpected_sections.append(section)
for section in unexpected_sections:
major_sections.remove(section)
# 1: Top level meta.yaml keys should have a specific order.
lint_section_order(major_sections, lints)
# 2: The about section should have a home, license and summary.
lint_about_contents(about_section, lints)
# 3a: The recipe should have some maintainers.
if not extra_section.get("recipe-maintainers", []):
lints.append(
"The recipe could do with some maintainers listed in "
"the `extra/recipe-maintainers` section."
)
# 3b: Maintainers should be a list
if not (
isinstance(extra_section.get("recipe-maintainers", []), Sequence)
and not isinstance(
extra_section.get("recipe-maintainers", []), str_type
)
):
lints.append("Recipe maintainers should be a json list.")
# 4: The recipe should have some tests.
if not any(key in TEST_KEYS for key in test_section):
a_test_file_exists = recipe_dir is not None and any(
os.path.exists(os.path.join(recipe_dir, test_file))
for test_file in TEST_FILES
)
if not a_test_file_exists:
has_outputs_test = False
no_test_hints = []
if outputs_section:
for out in outputs_section:
test_out = get_section(out, "test", lints)
if any(key in TEST_KEYS for key in test_out):
has_outputs_test = True
else:
no_test_hints.append(
"It looks like the '{}' output doesn't "
"have any tests.".format(out.get("name", "???"))
)
if has_outputs_test:
hints.extend(no_test_hints)
else:
lints.append("The recipe must have some tests.")
# 5: License cannot be 'unknown.'
license = about_section.get("license", "").lower()
if "unknown" == license.strip():
lints.append("The recipe license cannot be unknown.")
# 6: Selectors should be in a tidy form.
if recipe_dir is not None and os.path.exists(meta_fname):
bad_selectors = []
bad_lines = []
# Good selectors look like ".*\s\s#\s[...]"
good_selectors_pat = re.compile(r"(.+?)\s{2,}#\s\[(.+)\](?(2).*)$")
with io.open(meta_fname, "rt") as fh:
for selector_line, line_number in selector_lines(fh):
if not good_selectors_pat.match(selector_line):
bad_selectors.append(selector_line)
bad_lines.append(line_number)
if bad_selectors:
lints.append(
"Selectors are suggested to take a "
"``<two spaces>#<one space>[<expression>]`` form."
" See lines {}".format(bad_lines)
)
# 7: The build section should have a build number.
if build_section.get("number", None) is None:
lints.append("The recipe must have a `build/number` section.")
# 8: The build section should be before the run section in requirements.
seen_requirements = [
k for k in requirements_section if k in REQUIREMENTS_ORDER
]
requirements_order_sorted = sorted(
seen_requirements, key=REQUIREMENTS_ORDER.index
)
if seen_requirements != requirements_order_sorted:
lints.append(
"The `requirements/` sections should be defined "
"in the following order: "
+ ", ".join(REQUIREMENTS_ORDER)
+ "; instead saw: "
+ ", ".join(seen_requirements)
+ "."
)
# 9: Files downloaded should have a hash.
for source_section in sources_section:
if "url" in source_section and not (
{"sha1", "sha256", "md5"} & set(source_section.keys())
):
lints.append(
"When defining a source/url please add a sha256, sha1 "
"or md5 checksum (sha256 preferably)."
)
# 10: License should not include the word 'license'.
license = about_section.get("license", "").lower()
if "license" in license.lower() and "unlicense" not in license.lower():
lints.append(
"The recipe `license` should not include the word " '"License".'
)
# 11: There should be one empty line at the end of the file.
if recipe_dir is not None and os.path.exists(meta_fname):
with io.open(meta_fname, "r") as f:
lines = f.read().split("\n")
# Count the number of empty lines from the end of the file
empty_lines = itertools.takewhile(lambda x: x == "", reversed(lines))
end_empty_lines_count = len(list(empty_lines))
if end_empty_lines_count > 1:
lints.append(
"There are {} too many lines. "
"There should be one empty line at the end of the "
"file.".format(end_empty_lines_count - 1)
)
elif end_empty_lines_count < 1:
lints.append(
"There are too few lines. There should be one empty "
"line at the end of the file."
)
# 12: License family must be valid (conda-build checks for that)
try:
ensure_valid_license_family(meta)
except RuntimeError as e:
lints.append(str(e))
# 12a: License family must be valid (conda-build checks for that)
license_family = about_section.get("license_family", license).lower()
license_file = about_section.get("license_file", "")
if license_file == "" and any(
f for f in NEEDED_FAMILIES if f in license_family
):
lints.append("license_file entry is missing, but is required.")
# 13: Check that the recipe name is valid
recipe_name = package_section.get("name", "").strip()
if re.match("^[a-z0-9_\-.]+$", recipe_name) is None:
lints.append(
"Recipe name has invalid characters. only lowercase alpha, numeric, "
"underscores, hyphens and dots allowed"
)
# 14: Run conda-forge specific lints
if conda_forge:
run_conda_forge_specific(meta, recipe_dir, lints, hints)
# 15: Check if we are using legacy patterns
build_reqs = requirements_section.get("build", None)
if build_reqs and ("numpy x.x" in build_reqs):
lints.append(
"Using pinned numpy packages is a deprecated pattern. Consider "
"using the method outlined "
"[here](https://conda-forge.org/docs/maintainer/knowledge_base.html#linking-numpy)."
)
# 16: Subheaders should be in the allowed subheadings
for section in major_sections:
expected_subsections = FIELDS.get(section, [])
if not expected_subsections:
continue
for subsection in get_section(meta, section, lints):
if (
section != "source"
and section != "outputs"
and subsection not in expected_subsections
):
lints.append(
"The {} section contained an unexpected "
"subsection name. {} is not a valid subsection"
" name.".format(section, subsection)
)
elif section == "source" or section == "outputs":
for source_subsection in subsection:
if source_subsection not in expected_subsections:
lints.append(
"The {} section contained an unexpected "
"subsection name. {} is not a valid subsection"
" name.".format(section, source_subsection)
)
# 17: noarch doesn't work with selectors for runtime dependencies
if build_section.get("noarch") is not None and os.path.exists(meta_fname):
with io.open(meta_fname, "rt") as fh:
in_runreqs = False
for line in fh:
line_s = line.strip()
if line_s == "host:" or line_s == "run:":
in_runreqs = True
runreqs_spacing = line[: -len(line.lstrip())]
continue
if line_s.startswith("skip:") and is_selector_line(line):
lints.append(
"`noarch` packages can't have selectors. If "
"the selectors are necessary, please remove "
"`noarch: {}`.".format(build_section["noarch"])
)
break
if in_runreqs:
if runreqs_spacing == line[: -len(line.lstrip())]:
in_runreqs = False
continue
if is_selector_line(line):
lints.append(
"`noarch` packages can't have selectors. If "
"the selectors are necessary, please remove "
"`noarch: {}`.".format(build_section["noarch"])
)
break
# 19: check version
if package_section.get("version") is not None:
ver = str(package_section.get("version"))
try:
conda_build.conda_interface.VersionOrder(ver)
except:
lints.append(
"Package version {} doesn't match conda spec".format(ver)
)
# 20: Jinja2 variable definitions should be nice.
if recipe_dir is not None and os.path.exists(meta_fname):
bad_jinja = []
bad_lines = []
# Good Jinja2 variable definitions look like "{% set .+ = .+ %}"
good_jinja_pat = re.compile(r"\s*\{%\s(set)\s[^\s]+\s=\s[^\s]+\s%\}")
with io.open(meta_fname, "rt") as fh:
for jinja_line, line_number in jinja_lines(fh):
if not good_jinja_pat.match(jinja_line):
bad_jinja.append(jinja_line)
bad_lines.append(line_number)
if bad_jinja:
lints.append(
"Jinja2 variable definitions are suggested to "
"take a ``{{%<one space>set<one space>"
"<variable name><one space>=<one space>"
"<expression><one space>%}}`` form. See lines "
"{}".format(bad_lines)
)
# 21: Legacy usage of compilers
if build_reqs and ("toolchain" in build_reqs):
lints.append(
"Using toolchain directly in this manner is deprecated. Consider "
"using the compilers outlined "
"[here](https://conda-forge.org/docs/maintainer/knowledge_base.html#compilers)."
)
# 22: Single space in pinned requirements
for section, requirements in requirements_section.items():
for requirement in requirements or []:
req, _, _ = requirement.partition("#")
if "{{" in req:
continue
parts = req.split()
if len(parts) > 2 and parts[1] in [
"!=",
"=",
"==",
">",
"<",
"<=",
">=",
]:
# check for too many spaces
lints.append(
(
"``requirements: {section}: {requirement}`` should not "
"contain a space between relational operator and the version, i.e. "
"``{name} {pin}``"
).format(
section=section,
requirement=requirement,
name=parts[0],
pin="".join(parts[1:]),
)
)
continue
# check that there is a space if there is a pin
bad_char_idx = [(parts[0].find(c), c) for c in "><="]
bad_char_idx = [bci for bci in bad_char_idx if bci[0] >= 0]
if bad_char_idx:
bad_char_idx.sort()
i = bad_char_idx[0][0]
lints.append(
(
"``requirements: {section}: {requirement}`` must "
"contain a space between the name and the pin, i.e. "
"``{name} {pin}``"
).format(
section=section,
requirement=requirement,
name=parts[0][:i],
pin=parts[0][i:] + "".join(parts[1:]),
)
)
continue
# 23: non noarch builds shouldn't use version constraints on python and r-base
check_languages = ["python", "r-base"]
host_reqs = requirements_section.get("host") or []
run_reqs = requirements_section.get("run") or []
for language in check_languages:
if build_section.get("noarch") is None and not outputs_section:
filtered_host_reqs = [
req
for req in host_reqs
if req == (str(language) or req.startswith(str(language))) and not req.startswith(str(language)+'-')
]
filtered_run_reqs = [
req
for req in run_reqs
if req == (str(language) or req.startswith(str(language))) and not req.startswith(str(language)+'-')
]
if filtered_host_reqs and not filtered_run_reqs:
lints.append(
"If {0} is a host requirement, it should be a run requirement.".format(
str(language)
)
)
for reqs in [filtered_host_reqs, filtered_run_reqs]:
if str(language) in reqs:
continue
for req in reqs:
constraint = req.split(" ", 1)[1]
if constraint.startswith(">") or constraint.startswith(
"<"
):
lints.append(
"Non noarch packages should have {0} requirement without any version constraints.".format(
str(language)
)
)
# hints
# 1: suggest pip
if "script" in build_section:
scripts = build_section["script"]
if isinstance(scripts, str):
scripts = [scripts]
for script in scripts:
if "python setup.py install" in script:
hints.append(
"Whenever possible python packages should use pip. "
"See https://conda-forge.org/docs/maintainer/adding_pkgs.html#use-pip"
)
# 2: suggest python noarch (skip on feedstocks)
if (
build_section.get("noarch") is None
and build_reqs
and not any(["_compiler_stub" in b for b in build_reqs])
and ("pip" in build_reqs)
and (is_staged_recipes or not conda_forge)
):
with io.open(meta_fname, "rt") as fh:
in_runreqs = False
no_arch_possible = True
for line in fh:
line_s = line.strip()
if line_s == "host:" or line_s == "run:":
in_runreqs = True
runreqs_spacing = line[: -len(line.lstrip())]
continue
if line_s.startswith("skip:") and is_selector_line(line):
no_arch_possible = False
break
if in_runreqs:
if runreqs_spacing == line[: -len(line.lstrip())]:
in_runreqs = False
continue
if is_selector_line(line):
no_arch_possible = False
break
if no_arch_possible:
hints.append(
"Whenever possible python packages should use noarch. "
"See https://conda-forge.org/docs/maintainer/knowledge_base.html#noarch-builds"
)
# 3: suggest fixing all recipe/*.sh shellcheck findings
shellcheck_enabled = False
shell_scripts = []
if recipe_dir:
shell_scripts = glob(os.path.join(recipe_dir, "*.sh"))
# support feedstocks and staged-recipes
forge_yaml = glob(
os.path.join(recipe_dir, "..", "conda-forge.yml")
) or glob(os.path.join(recipe_dir, "..", "..", "conda-forge.yml"),)
if shell_scripts and forge_yaml:
with open(forge_yaml[0], "r") as fh:
code = yaml.load(fh)
shellcheck_enabled = code.get("shellcheck", {}).get(
"enabled", shellcheck_enabled
)
if shellcheck_enabled and shutil.which("shellcheck") and shell_scripts:
MAX_SHELLCHECK_LINES = 50
cmd = [
"shellcheck",
"--enable=all",
"--shell=bash",
# SC2154: var is referenced but not assigned,
# see https://github.com/koalaman/shellcheck/wiki/SC2154
"--exclude=SC2154",
]
p = subprocess.Popen(
cmd + shell_scripts,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env={
"PATH": os.getenv("PATH")
}, # exclude other env variables to protect against token leakage
)
sc_stdout, _ = p.communicate()
if p.returncode == 1:
# All files successfully scanned with some issues.
findings = (
sc_stdout.decode(sys.stdout.encoding)
.replace("\r\n", "\n")
.splitlines()
)
hints.append(
"Whenever possible fix all shellcheck findings ('"
+ " ".join(cmd)
+ " recipe/*.sh -f diff | git apply' helps)"
)
hints.extend(findings[:50])
if len(findings) > MAX_SHELLCHECK_LINES:
hints.append(
"Output restricted, there are '%s' more lines."
% (len(findings) - MAX_SHELLCHECK_LINES)
)
elif p.returncode != 0:
# Something went wrong.
hints.append(
"There have been errors while scanning with shellcheck."
)
return lints, hints
|
def lintify(meta, recipe_dir=None, conda_forge=False):
lints = []
hints = []
major_sections = list(meta.keys())
# If the recipe_dir exists (no guarantee within this function) , we can
# find the meta.yaml within it.
meta_fname = os.path.join(recipe_dir or "", "meta.yaml")
sources_section = get_section(meta, "source", lints)
build_section = get_section(meta, "build", lints)
requirements_section = get_section(meta, "requirements", lints)
test_section = get_section(meta, "test", lints)
about_section = get_section(meta, "about", lints)
extra_section = get_section(meta, "extra", lints)
package_section = get_section(meta, "package", lints)
outputs_section = get_section(meta, "outputs", lints)
recipe_dirname = os.path.basename(recipe_dir) if recipe_dir else "recipe"
is_staged_recipes = recipe_dirname != "recipe"
# 0: Top level keys should be expected
unexpected_sections = []
for section in major_sections:
if section not in EXPECTED_SECTION_ORDER:
lints.append(
"The top level meta key {} is unexpected".format(section)
)
unexpected_sections.append(section)
for section in unexpected_sections:
major_sections.remove(section)
# 1: Top level meta.yaml keys should have a specific order.
lint_section_order(major_sections, lints)
# 2: The about section should have a home, license and summary.
lint_about_contents(about_section, lints)
# 3a: The recipe should have some maintainers.
if not extra_section.get("recipe-maintainers", []):
lints.append(
"The recipe could do with some maintainers listed in "
"the `extra/recipe-maintainers` section."
)
# 3b: Maintainers should be a list
if not (
isinstance(extra_section.get("recipe-maintainers", []), Sequence)
and not isinstance(
extra_section.get("recipe-maintainers", []), str_type
)
):
lints.append("Recipe maintainers should be a json list.")
# 4: The recipe should have some tests.
if not any(key in TEST_KEYS for key in test_section):
a_test_file_exists = recipe_dir is not None and any(
os.path.exists(os.path.join(recipe_dir, test_file))
for test_file in TEST_FILES
)
if not a_test_file_exists:
has_outputs_test = False
no_test_hints = []
if outputs_section:
for out in outputs_section:
test_out = get_section(out, "test", lints)
if any(key in TEST_KEYS for key in test_out):
has_outputs_test = True
else:
no_test_hints.append(
"It looks like the '{}' output doesn't "
"have any tests.".format(out.get("name", "???"))
)
if has_outputs_test:
hints.extend(no_test_hints)
else:
lints.append("The recipe must have some tests.")
# 5: License cannot be 'unknown.'
license = about_section.get("license", "").lower()
if "unknown" == license.strip():
lints.append("The recipe license cannot be unknown.")
# 6: Selectors should be in a tidy form.
if recipe_dir is not None and os.path.exists(meta_fname):
bad_selectors = []
bad_lines = []
# Good selectors look like ".*\s\s#\s[...]"
good_selectors_pat = re.compile(r"(.+?)\s{2,}#\s\[(.+)\](?(2).*)$")
with io.open(meta_fname, "rt") as fh:
for selector_line, line_number in selector_lines(fh):
if not good_selectors_pat.match(selector_line):
bad_selectors.append(selector_line)
bad_lines.append(line_number)
if bad_selectors:
lints.append(
"Selectors are suggested to take a "
"``<two spaces>#<one space>[<expression>]`` form."
" See lines {}".format(bad_lines)
)
# 7: The build section should have a build number.
if build_section.get("number", None) is None:
lints.append("The recipe must have a `build/number` section.")
# 8: The build section should be before the run section in requirements.
seen_requirements = [
k for k in requirements_section if k in REQUIREMENTS_ORDER
]
requirements_order_sorted = sorted(
seen_requirements, key=REQUIREMENTS_ORDER.index
)
if seen_requirements != requirements_order_sorted:
lints.append(
"The `requirements/` sections should be defined "
"in the following order: "
+ ", ".join(REQUIREMENTS_ORDER)
+ "; instead saw: "
+ ", ".join(seen_requirements)
+ "."
)
# 9: Files downloaded should have a hash.
for source_section in sources_section:
if "url" in source_section and not (
{"sha1", "sha256", "md5"} & set(source_section.keys())
):
lints.append(
"When defining a source/url please add a sha256, sha1 "
"or md5 checksum (sha256 preferably)."
)
# 10: License should not include the word 'license'.
license = about_section.get("license", "").lower()
if "license" in license.lower() and "unlicense" not in license.lower():
lints.append(
"The recipe `license` should not include the word " '"License".'
)
# 11: There should be one empty line at the end of the file.
if recipe_dir is not None and os.path.exists(meta_fname):
with io.open(meta_fname, "r") as f:
lines = f.read().split("\n")
# Count the number of empty lines from the end of the file
empty_lines = itertools.takewhile(lambda x: x == "", reversed(lines))
end_empty_lines_count = len(list(empty_lines))
if end_empty_lines_count > 1:
lints.append(
"There are {} too many lines. "
"There should be one empty line at the end of the "
"file.".format(end_empty_lines_count - 1)
)
elif end_empty_lines_count < 1:
lints.append(
"There are too few lines. There should be one empty "
"line at the end of the file."
)
# 12: License family must be valid (conda-build checks for that)
try:
ensure_valid_license_family(meta)
except RuntimeError as e:
lints.append(str(e))
# 12a: License family must be valid (conda-build checks for that)
license_family = about_section.get("license_family", license).lower()
license_file = about_section.get("license_file", "")
if license_file == "" and any(
f for f in NEEDED_FAMILIES if f in license_family
):
lints.append("license_file entry is missing, but is required.")
# 13: Check that the recipe name is valid
recipe_name = package_section.get("name", "").strip()
if re.match("^[a-z0-9_\-.]+$", recipe_name) is None:
lints.append(
"Recipe name has invalid characters. only lowercase alpha, numeric, "
"underscores, hyphens and dots allowed"
)
# 14: Run conda-forge specific lints
if conda_forge:
run_conda_forge_specific(meta, recipe_dir, lints, hints)
# 15: Check if we are using legacy patterns
build_reqs = requirements_section.get("build", None)
if build_reqs and ("numpy x.x" in build_reqs):
lints.append(
"Using pinned numpy packages is a deprecated pattern. Consider "
"using the method outlined "
"[here](https://conda-forge.org/docs/maintainer/knowledge_base.html#linking-numpy)."
)
# 16: Subheaders should be in the allowed subheadings
for section in major_sections:
expected_subsections = FIELDS.get(section, [])
if not expected_subsections:
continue
for subsection in get_section(meta, section, lints):
if (
section != "source"
and section != "outputs"
and subsection not in expected_subsections
):
lints.append(
"The {} section contained an unexpected "
"subsection name. {} is not a valid subsection"
" name.".format(section, subsection)
)
elif section == "source" or section == "outputs":
for source_subsection in subsection:
if source_subsection not in expected_subsections:
lints.append(
"The {} section contained an unexpected "
"subsection name. {} is not a valid subsection"
" name.".format(section, source_subsection)
)
# 17: noarch doesn't work with selectors for runtime dependencies
if build_section.get("noarch") is not None and os.path.exists(meta_fname):
with io.open(meta_fname, "rt") as fh:
in_runreqs = False
for line in fh:
line_s = line.strip()
if line_s == "host:" or line_s == "run:":
in_runreqs = True
runreqs_spacing = line[: -len(line.lstrip())]
continue
if line_s.startswith("skip:") and is_selector_line(line):
lints.append(
"`noarch` packages can't have selectors. If "
"the selectors are necessary, please remove "
"`noarch: {}`.".format(build_section["noarch"])
)
break
if in_runreqs:
if runreqs_spacing == line[: -len(line.lstrip())]:
in_runreqs = False
continue
if is_selector_line(line):
lints.append(
"`noarch` packages can't have selectors. If "
"the selectors are necessary, please remove "
"`noarch: {}`.".format(build_section["noarch"])
)
break
# 19: check version
if package_section.get("version") is not None:
ver = str(package_section.get("version"))
try:
conda_build.conda_interface.VersionOrder(ver)
except:
lints.append(
"Package version {} doesn't match conda spec".format(ver)
)
# 20: Jinja2 variable definitions should be nice.
if recipe_dir is not None and os.path.exists(meta_fname):
bad_jinja = []
bad_lines = []
# Good Jinja2 variable definitions look like "{% set .+ = .+ %}"
good_jinja_pat = re.compile(r"\s*\{%\s(set)\s[^\s]+\s=\s[^\s]+\s%\}")
with io.open(meta_fname, "rt") as fh:
for jinja_line, line_number in jinja_lines(fh):
if not good_jinja_pat.match(jinja_line):
bad_jinja.append(jinja_line)
bad_lines.append(line_number)
if bad_jinja:
lints.append(
"Jinja2 variable definitions are suggested to "
"take a ``{{%<one space>set<one space>"
"<variable name><one space>=<one space>"
"<expression><one space>%}}`` form. See lines "
"{}".format(bad_lines)
)
# 21: Legacy usage of compilers
if build_reqs and ("toolchain" in build_reqs):
lints.append(
"Using toolchain directly in this manner is deprecated. Consider "
"using the compilers outlined "
"[here](https://conda-forge.org/docs/maintainer/knowledge_base.html#compilers)."
)
# 22: Single space in pinned requirements
for section, requirements in requirements_section.items():
for requirement in requirements or []:
req, _, _ = requirement.partition("#")
if "{{" in req:
continue
parts = req.split()
if len(parts) > 2 and parts[1] in [
"!=",
"=",
"==",
">",
"<",
"<=",
">=",
]:
# check for too many spaces
lints.append(
(
"``requirements: {section}: {requirement}`` should not "
"contain a space between relational operator and the version, i.e. "
"``{name} {pin}``"
).format(
section=section,
requirement=requirement,
name=parts[0],
pin="".join(parts[1:]),
)
)
continue
# check that there is a space if there is a pin
bad_char_idx = [(parts[0].find(c), c) for c in "><="]
bad_char_idx = [bci for bci in bad_char_idx if bci[0] >= 0]
if bad_char_idx:
bad_char_idx.sort()
i = bad_char_idx[0][0]
lints.append(
(
"``requirements: {section}: {requirement}`` must "
"contain a space between the name and the pin, i.e. "
"``{name} {pin}``"
).format(
section=section,
requirement=requirement,
name=parts[0][:i],
pin=parts[0][i:] + "".join(parts[1:]),
)
)
continue
# 23: non noarch builds shouldn't use version constraints on python and r-base
check_languages = ["python", "r-base"]
host_reqs = requirements_section.get("host") or []
run_reqs = requirements_section.get("run") or []
for language in check_languages:
if build_section.get("noarch") is None and not outputs_section:
filtered_host_reqs = [
req
for req in host_reqs
if req == (str(language) or req.startswith(str(language))) and not req.startswith(str(language)+'-')
]
filtered_run_reqs = [
req
for req in run_reqs
if req == str(language) or req.startswith(str(language)+" ")
]
if filtered_host_reqs and not filtered_run_reqs:
lints.append(
"If {0} is a host requirement, it should be a run requirement.".format(
str(language)
)
)
for reqs in [filtered_host_reqs, filtered_run_reqs]:
if str(language) in reqs:
continue
for req in reqs:
constraint = req.split(" ", 1)[1]
if constraint.startswith(">") or constraint.startswith(
"<"
):
lints.append(
"Non noarch packages should have {0} requirement without any version constraints.".format(
str(language)
)
)
# hints
# 1: suggest pip
if "script" in build_section:
scripts = build_section["script"]
if isinstance(scripts, str):
scripts = [scripts]
for script in scripts:
if "python setup.py install" in script:
hints.append(
"Whenever possible python packages should use pip. "
"See https://conda-forge.org/docs/maintainer/adding_pkgs.html#use-pip"
)
# 2: suggest python noarch (skip on feedstocks)
if (
build_section.get("noarch") is None
and build_reqs
and not any(["_compiler_stub" in b for b in build_reqs])
and ("pip" in build_reqs)
and (is_staged_recipes or not conda_forge)
):
with io.open(meta_fname, "rt") as fh:
in_runreqs = False
no_arch_possible = True
for line in fh:
line_s = line.strip()
if line_s == "host:" or line_s == "run:":
in_runreqs = True
runreqs_spacing = line[: -len(line.lstrip())]
continue
if line_s.startswith("skip:") and is_selector_line(line):
no_arch_possible = False
break
if in_runreqs:
if runreqs_spacing == line[: -len(line.lstrip())]:
in_runreqs = False
continue
if is_selector_line(line):
no_arch_possible = False
break
if no_arch_possible:
hints.append(
"Whenever possible python packages should use noarch. "
"See https://conda-forge.org/docs/maintainer/knowledge_base.html#noarch-builds"
)
# 3: suggest fixing all recipe/*.sh shellcheck findings
shellcheck_enabled = False
shell_scripts = []
if recipe_dir:
shell_scripts = glob(os.path.join(recipe_dir, "*.sh"))
# support feedstocks and staged-recipes
forge_yaml = glob(
os.path.join(recipe_dir, "..", "conda-forge.yml")
) or glob(os.path.join(recipe_dir, "..", "..", "conda-forge.yml"),)
if shell_scripts and forge_yaml:
with open(forge_yaml[0], "r") as fh:
code = yaml.load(fh)
shellcheck_enabled = code.get("shellcheck", {}).get(
"enabled", shellcheck_enabled
)
if shellcheck_enabled and shutil.which("shellcheck") and shell_scripts:
MAX_SHELLCHECK_LINES = 50
cmd = [
"shellcheck",
"--enable=all",
"--shell=bash",
# SC2154: var is referenced but not assigned,
# see https://github.com/koalaman/shellcheck/wiki/SC2154
"--exclude=SC2154",
]
p = subprocess.Popen(
cmd + shell_scripts,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env={
"PATH": os.getenv("PATH")
}, # exclude other env variables to protect against token leakage
)
sc_stdout, _ = p.communicate()
if p.returncode == 1:
# All files successfully scanned with some issues.
findings = (
sc_stdout.decode(sys.stdout.encoding)
.replace("\r\n", "\n")
.splitlines()
)
hints.append(
"Whenever possible fix all shellcheck findings ('"
+ " ".join(cmd)
+ " recipe/*.sh -f diff | git apply' helps)"
)
hints.extend(findings[:50])
if len(findings) > MAX_SHELLCHECK_LINES:
hints.append(
"Output restricted, there are '%s' more lines."
% (len(findings) - MAX_SHELLCHECK_LINES)
)
elif p.returncode != 0:
# Something went wrong.
hints.append(
"There have been errors while scanning with shellcheck."
)
return lints, hints
|
6,920 |
def get_users_for_mentions():
return frappe.get_all('User',
fields=['name as id', 'full_name as value'],
filters={
'name': ['not in', ('Administrator', 'Guest')],
'allowed_in_mentions': True,
'user_type': 'System User',
'enabled': 1
})
|
def get_users_for_mentions():
return frappe.get_all('User',
fields=['name as id', 'full_name as value'],
filters={
'name': ['not in', ('Administrator', 'Guest')],
'allowed_in_mentions': True,
'user_type': 'System User',
'enabled': True,
})
|
45,511 |
def test_posting_env_config_return_400_when_slack_project_config_does_not_exists(
admin_client, environment, environment_api_key
):
# Given
url = reverse(
"api-v1:environments:integrations-slack-list",
args=[environment_api_key],
)
# When
response = admin_client.post(
url,
data=json.dumps({"channel_id": "test_id", "enabled": True}),
content_type="application/json",
)
# Then
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "Slack api token not found" in response.json()[0]
|
def test_posting_env_config_return_400_when_slack_project_config_does_not_exist(
admin_client, environment, environment_api_key
):
# Given
url = reverse(
"api-v1:environments:integrations-slack-list",
args=[environment_api_key],
)
# When
response = admin_client.post(
url,
data=json.dumps({"channel_id": "test_id", "enabled": True}),
content_type="application/json",
)
# Then
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "Slack api token not found" in response.json()[0]
|
19,991 |
def find_color_card(img, threshold='adaptgauss', threshvalue=125, blurry=False, background='dark'):
"""Automatically detects a color card and output info to use in create_color_card_mask function
Inputs:
img = Input RGB image data containing a color card.
threshold = Threshold method, either 'normal', 'otsu', or 'adaptgauss', optional (default 'adaptgauss)
thresh_value = Thresholding value, optional (default 125)
blurry = Bool (default False) if True then image sharpening applied
background = Type of image background either 'dark' or 'light (default 'dark'); if 'light' then histogram
expansion applied to better detect edges, but histogram expansion will be hindered if there
is a dark background
Returns:
df = Dataframe containing information about the filtered contours
start_coord = Two element tuple of starting coordinates, location of the top left pixel detected
spacing = Two element tuple of spacing between centers of chips
:param img: numpy.ndarray
:param threshold: str
:param threshvalue: int
:param blurry: bool
:param background: str
:return df: pandas.core.frame.DataFrame
:return start_coord: tuple
:return spacing: tuple
"""
# Imports
import skimage
import pandas as pd
from scipy.spatial.distance import squareform, pdist
# Get image attributes
height, width, channels = img.shape
totalpx = float(height * width)
# Minimum and maximum square size based upon 12 MP image
minarea = 1000. / 12000000. * totalpx
maxarea = 8000000. / 12000000. * totalpx
# Create gray image for further processing
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Laplacian Fourier Transform detection of blurriness
blurfactor = cv2.Laplacian(gray_img, cv2.CV_64F).var()
# If image is blurry then try to deblur using kernel
if blurry:
# from https://www.packtpub.com/mapt/book/Application+Development/9781785283932/2/ch02lvl1sec22/Sharpening
kernel = np.array([[-1, -1, -1, -1, -1],
[-1, 2, 2, 2, -1],
[-1, 2, 8, 2, -1],
[-1, 2, 2, 2, -1],
[-1, -1, -1, -1, -1]]) / 8.0
# Store result back out for further processing
gray_img = cv2.filter2D(gray_img, -1, kernel)
# In darker samples, the expansion of the histogram hinders finding the squares due to problems with the otsu
# thresholding. If your image has a bright background then apply
if background == 'light':
clahe = cv2.createCLAHE(clipLimit=3.25, tileGridSize=(4, 4))
# apply CLAHE histogram expansion to find squares better with canny edge detection
gray_img = clahe.apply(gray_img)
elif background != 'dark':
fatal_error('Background parameter ' + str(background) + ' is not "light" or "dark"!')
# Thresholding
if threshold == "otsu":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0)
ret, threshold = cv2.threshold(gaussian, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
elif threshold == "normal":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0)
ret, threshold = cv2.threshold(gaussian, threshvalue, 255, cv2.THRESH_BINARY)
elif threshold == "adaptgauss":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (11, 11), 0)
threshold = cv2.adaptiveThreshold(gaussian, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY_INV, 51, 2)
else:
fatal_error('Threshold ' + str(threshold) + ' is not "otsu", "normal", or "adaptgauss"!')
# Apply automatic Canny edge detection using the computed median
edges = skimage.feature.canny(threshold)
edges.dtype = 'uint8'
# Compute contours to find the squares of the card
_, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Variable of which contour is which
mindex = []
# Variable to store moments
mu = []
# Variable to x,y coordinates in tuples
mc = []
# Variable to x coordinate as integer
mx = []
# Variable to y coordinate as integer
my = []
# Variable to store area
marea = []
# Variable to store whether something is a square (1) or not (0)
msquare = []
# Variable to store square approximation coordinates
msquarecoords = []
# Variable to store child hierarchy element
mchild = []
# Fitted rectangle height
mheight = []
# Fitted rectangle width
mwidth = []
# Ratio of height/width
mwhratio = []
# Extract moments from contour image
for x in range(0, len(contours)):
mu.append(cv2.moments(contours[x]))
marea.append(cv2.contourArea(contours[x]))
mchild.append(int(hierarchy[0][x][2]))
mindex.append(x)
# Cycle through moment data and compute location for each moment
for m in mu:
if m['m00'] != 0: # This is the area term for a moment
mc.append((int(m['m10'] / m['m00']), int(m['m01']) / m['m00']))
mx.append(int(m['m10'] / m['m00']))
my.append(int(m['m01'] / m['m00']))
else:
mc.append((0, 0))
mx.append((0))
my.append((0))
# Loop over our contours and extract data about them
for index, c in enumerate(contours):
# Area isn't 0, but greater than min-area and less than max-area
if marea[index] != 0 and minarea < marea[index] < maxarea:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.15 * peri, True)
center, wh, angle = cv2.minAreaRect(c) # Rotated rectangle
mwidth.append(wh[0])
mheight.append(wh[1])
mwhratio.append(wh[0] / wh[1])
msquare.append(len(approx))
# If the approx contour has 4 points then we can assume we have 4-sided objects
if len(approx) == 4 or 5:
msquarecoords.append(approx)
else: # It's not square
msquare.append(0)
msquarecoords.append(0)
else: # Contour has area of 0, not interesting
msquare.append(0)
msquarecoords.append(0)
mwidth.append(0)
mheight.append(0)
mwhratio.append(0)
# Make a pandas df from data for filtering out junk
locarea = {'index': mindex, 'X': mx, 'Y': my, 'width': mwidth, 'height': mheight, 'WHratio': mwhratio,
'Area': marea, 'square': msquare, 'child': mchild}
df = pd.DataFrame(locarea)
# Add calculated blur factor to output
df['blurriness'] = blurfactor
# Filter df for attributes that would isolate squares of reasonable size
df = df[(df['Area'] > minarea) & (df['Area'] < maxarea) & (df['child'] != -1) &
(df['square'].isin([4, 5])) & (df['WHratio'] < 1.2) & (df['WHratio'] > 0.85)]
# Filter nested squares from dataframe, was having issues with median being towards smaller nested squares
df = df[~(df['index'].isin(df['index'] + 1))]
# Count up squares that are within a given radius, more squares = more likelihood of them being the card
# Median width of square time 2.5 gives proximity radius for searching for similar squares
median_sq_width_px = df["width"].median()
# Squares that are within 6 widths of the current square
pixeldist = median_sq_width_px * 6
# Computes euclidean distance matrix for the x and y contour centroids
distmatrix = pd.DataFrame(squareform(pdist(df[['X', 'Y']])))
# Add up distances that are less than ones have distance less than pixeldist pixels
distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1)
# Append distprox summary to dataframe
df = df.assign(distprox=distmatrixflat.values)
# Compute how similar in area the squares are. lots of similar values indicates card
# isolate area measurements
filtered_area = df['Area']
# Create empty matrix for storing comparisons
sizecomp = np.zeros((len(filtered_area), len(filtered_area)))
# Double loop through all areas to compare to each other
for p in range(0, len(filtered_area)):
for o in range(0, len(filtered_area)):
big = max(filtered_area.iloc[p], filtered_area.iloc[o])
small = min(filtered_area.iloc[p], filtered_area.iloc[o])
pct = 100. * (small / big)
sizecomp[p][o] = pct
# How many comparisons given 90% square similarity
sizematrix = pd.DataFrame(sizecomp).apply(lambda sim: sim[sim >= 90].count() - 1, axis=1)
# Append sizeprox summary to dataframe
df = df.assign(sizeprox=sizematrix.values)
# Reorder dataframe for better printing
df = df[['index', 'X', 'Y', 'width', 'height', 'WHratio', 'Area', 'square', 'child',
'blurriness', 'distprox', 'sizeprox']]
# Loosely filter for size and distance (relative size to median)
minsqwidth = median_sq_width_px * 0.80
maxsqwidth = median_sq_width_px * 1.2
df = df[(df['distprox'] >= 5) & (df['sizeprox'] >= 5) & (df['width'] > minsqwidth) &
(df['width'] < maxsqwidth)]
# Filter for proximity again to root out stragglers
# Find and count up squares that are within given radius,
# more squares = more likelihood of them being the card
# Median width of square time 2.5 gives proximity radius for searching for similar squares
median_sq_width_px = df["width"].median()
# Squares that are within 6 widths of the current square
pixeldist = median_sq_width_px * 5
# Computes euclidean distance matrix for the x and y contour centroids
distmatrix = pd.DataFrame(squareform(pdist(df[['X', 'Y']])))
# Add up distances that are less than ones have distance less than pixeldist pixels
distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1)
# Append distprox summary to dataframe
df = df.assign(distprox=distmatrixflat.values)
# Filter results for distance proximity to other squares
df = df[(df['distprox'] >= 4)]
# Extract the starting coordinate
start_coord = (int(df['X'].min()), int(df['Y'].min()))
# Calculate the range
spacingx_short = (df['X'].max() - df['X'].min()) / 3
spacingy_short = (df['Y'].max() - df['Y'].min()) / 3
spacingx_long = (df['X'].max() - df['X'].min()) / 5
spacingy_long = (df['Y'].max() - df['Y'].min()) / 5
# Chip spacing since 4x6 card assumed
spacing_short = min(spacingx_short, spacingy_short)
spacing_long = max(spacingx_long, spacingy_long)
# Smaller spacing measurement might have a chip missing
spacing = int(max(spacing_short, spacing_long))
spacing = (spacing, spacing)
return df, start_coord, spacing
|
def find_color_card(img, threshold='adaptgauss', threshvalue=125, blurry=False, background='dark'):
"""Automatically detects a color card and output info to use in create_color_card_mask function
Inputs:
img = Input RGB image data containing a color card.
threshold = Threshold method, either 'normal', 'otsu', or 'adaptgauss', optional (default 'adaptgauss')
thresh_value = Thresholding value, optional (default 125)
blurry = Bool (default False) if True then image sharpening applied
background = Type of image background either 'dark' or 'light (default 'dark'); if 'light' then histogram
expansion applied to better detect edges, but histogram expansion will be hindered if there
is a dark background
Returns:
df = Dataframe containing information about the filtered contours
start_coord = Two element tuple of starting coordinates, location of the top left pixel detected
spacing = Two element tuple of spacing between centers of chips
:param img: numpy.ndarray
:param threshold: str
:param threshvalue: int
:param blurry: bool
:param background: str
:return df: pandas.core.frame.DataFrame
:return start_coord: tuple
:return spacing: tuple
"""
# Imports
import skimage
import pandas as pd
from scipy.spatial.distance import squareform, pdist
# Get image attributes
height, width, channels = img.shape
totalpx = float(height * width)
# Minimum and maximum square size based upon 12 MP image
minarea = 1000. / 12000000. * totalpx
maxarea = 8000000. / 12000000. * totalpx
# Create gray image for further processing
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Laplacian Fourier Transform detection of blurriness
blurfactor = cv2.Laplacian(gray_img, cv2.CV_64F).var()
# If image is blurry then try to deblur using kernel
if blurry:
# from https://www.packtpub.com/mapt/book/Application+Development/9781785283932/2/ch02lvl1sec22/Sharpening
kernel = np.array([[-1, -1, -1, -1, -1],
[-1, 2, 2, 2, -1],
[-1, 2, 8, 2, -1],
[-1, 2, 2, 2, -1],
[-1, -1, -1, -1, -1]]) / 8.0
# Store result back out for further processing
gray_img = cv2.filter2D(gray_img, -1, kernel)
# In darker samples, the expansion of the histogram hinders finding the squares due to problems with the otsu
# thresholding. If your image has a bright background then apply
if background == 'light':
clahe = cv2.createCLAHE(clipLimit=3.25, tileGridSize=(4, 4))
# apply CLAHE histogram expansion to find squares better with canny edge detection
gray_img = clahe.apply(gray_img)
elif background != 'dark':
fatal_error('Background parameter ' + str(background) + ' is not "light" or "dark"!')
# Thresholding
if threshold == "otsu":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0)
ret, threshold = cv2.threshold(gaussian, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
elif threshold == "normal":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0)
ret, threshold = cv2.threshold(gaussian, threshvalue, 255, cv2.THRESH_BINARY)
elif threshold == "adaptgauss":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (11, 11), 0)
threshold = cv2.adaptiveThreshold(gaussian, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY_INV, 51, 2)
else:
fatal_error('Threshold ' + str(threshold) + ' is not "otsu", "normal", or "adaptgauss"!')
# Apply automatic Canny edge detection using the computed median
edges = skimage.feature.canny(threshold)
edges.dtype = 'uint8'
# Compute contours to find the squares of the card
_, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Variable of which contour is which
mindex = []
# Variable to store moments
mu = []
# Variable to x,y coordinates in tuples
mc = []
# Variable to x coordinate as integer
mx = []
# Variable to y coordinate as integer
my = []
# Variable to store area
marea = []
# Variable to store whether something is a square (1) or not (0)
msquare = []
# Variable to store square approximation coordinates
msquarecoords = []
# Variable to store child hierarchy element
mchild = []
# Fitted rectangle height
mheight = []
# Fitted rectangle width
mwidth = []
# Ratio of height/width
mwhratio = []
# Extract moments from contour image
for x in range(0, len(contours)):
mu.append(cv2.moments(contours[x]))
marea.append(cv2.contourArea(contours[x]))
mchild.append(int(hierarchy[0][x][2]))
mindex.append(x)
# Cycle through moment data and compute location for each moment
for m in mu:
if m['m00'] != 0: # This is the area term for a moment
mc.append((int(m['m10'] / m['m00']), int(m['m01']) / m['m00']))
mx.append(int(m['m10'] / m['m00']))
my.append(int(m['m01'] / m['m00']))
else:
mc.append((0, 0))
mx.append((0))
my.append((0))
# Loop over our contours and extract data about them
for index, c in enumerate(contours):
# Area isn't 0, but greater than min-area and less than max-area
if marea[index] != 0 and minarea < marea[index] < maxarea:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.15 * peri, True)
center, wh, angle = cv2.minAreaRect(c) # Rotated rectangle
mwidth.append(wh[0])
mheight.append(wh[1])
mwhratio.append(wh[0] / wh[1])
msquare.append(len(approx))
# If the approx contour has 4 points then we can assume we have 4-sided objects
if len(approx) == 4 or 5:
msquarecoords.append(approx)
else: # It's not square
msquare.append(0)
msquarecoords.append(0)
else: # Contour has area of 0, not interesting
msquare.append(0)
msquarecoords.append(0)
mwidth.append(0)
mheight.append(0)
mwhratio.append(0)
# Make a pandas df from data for filtering out junk
locarea = {'index': mindex, 'X': mx, 'Y': my, 'width': mwidth, 'height': mheight, 'WHratio': mwhratio,
'Area': marea, 'square': msquare, 'child': mchild}
df = pd.DataFrame(locarea)
# Add calculated blur factor to output
df['blurriness'] = blurfactor
# Filter df for attributes that would isolate squares of reasonable size
df = df[(df['Area'] > minarea) & (df['Area'] < maxarea) & (df['child'] != -1) &
(df['square'].isin([4, 5])) & (df['WHratio'] < 1.2) & (df['WHratio'] > 0.85)]
# Filter nested squares from dataframe, was having issues with median being towards smaller nested squares
df = df[~(df['index'].isin(df['index'] + 1))]
# Count up squares that are within a given radius, more squares = more likelihood of them being the card
# Median width of square time 2.5 gives proximity radius for searching for similar squares
median_sq_width_px = df["width"].median()
# Squares that are within 6 widths of the current square
pixeldist = median_sq_width_px * 6
# Computes euclidean distance matrix for the x and y contour centroids
distmatrix = pd.DataFrame(squareform(pdist(df[['X', 'Y']])))
# Add up distances that are less than ones have distance less than pixeldist pixels
distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1)
# Append distprox summary to dataframe
df = df.assign(distprox=distmatrixflat.values)
# Compute how similar in area the squares are. lots of similar values indicates card
# isolate area measurements
filtered_area = df['Area']
# Create empty matrix for storing comparisons
sizecomp = np.zeros((len(filtered_area), len(filtered_area)))
# Double loop through all areas to compare to each other
for p in range(0, len(filtered_area)):
for o in range(0, len(filtered_area)):
big = max(filtered_area.iloc[p], filtered_area.iloc[o])
small = min(filtered_area.iloc[p], filtered_area.iloc[o])
pct = 100. * (small / big)
sizecomp[p][o] = pct
# How many comparisons given 90% square similarity
sizematrix = pd.DataFrame(sizecomp).apply(lambda sim: sim[sim >= 90].count() - 1, axis=1)
# Append sizeprox summary to dataframe
df = df.assign(sizeprox=sizematrix.values)
# Reorder dataframe for better printing
df = df[['index', 'X', 'Y', 'width', 'height', 'WHratio', 'Area', 'square', 'child',
'blurriness', 'distprox', 'sizeprox']]
# Loosely filter for size and distance (relative size to median)
minsqwidth = median_sq_width_px * 0.80
maxsqwidth = median_sq_width_px * 1.2
df = df[(df['distprox'] >= 5) & (df['sizeprox'] >= 5) & (df['width'] > minsqwidth) &
(df['width'] < maxsqwidth)]
# Filter for proximity again to root out stragglers
# Find and count up squares that are within given radius,
# more squares = more likelihood of them being the card
# Median width of square time 2.5 gives proximity radius for searching for similar squares
median_sq_width_px = df["width"].median()
# Squares that are within 6 widths of the current square
pixeldist = median_sq_width_px * 5
# Computes euclidean distance matrix for the x and y contour centroids
distmatrix = pd.DataFrame(squareform(pdist(df[['X', 'Y']])))
# Add up distances that are less than ones have distance less than pixeldist pixels
distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1)
# Append distprox summary to dataframe
df = df.assign(distprox=distmatrixflat.values)
# Filter results for distance proximity to other squares
df = df[(df['distprox'] >= 4)]
# Extract the starting coordinate
start_coord = (int(df['X'].min()), int(df['Y'].min()))
# Calculate the range
spacingx_short = (df['X'].max() - df['X'].min()) / 3
spacingy_short = (df['Y'].max() - df['Y'].min()) / 3
spacingx_long = (df['X'].max() - df['X'].min()) / 5
spacingy_long = (df['Y'].max() - df['Y'].min()) / 5
# Chip spacing since 4x6 card assumed
spacing_short = min(spacingx_short, spacingy_short)
spacing_long = max(spacingx_long, spacingy_long)
# Smaller spacing measurement might have a chip missing
spacing = int(max(spacing_short, spacing_long))
spacing = (spacing, spacing)
return df, start_coord, spacing
|
53,976 |
def nae3sat(variables: typing.Union[int, typing.Sequence[dimod.typing.Variable]],
rho: float = 2.1,
*,
seed: typing.Union[None, int, np.random.Generator] = None,
replace: bool = True,
) -> BinaryQuadraticModel:
"""Generator for Not-All-Equal 3-SAT (NAE3SAT) Binary Quadratic Models.
NAE3SAT_ is an NP-complete problem class that consists in satistying a number of conjunctive
clauses that involve three variables (or variable negations). The variables on each clause
should be not-all-equal. Ie. all solutions except ``(+1, +1, +1)`` or
``(-1, -1, -1)`` are valid for each class.
.. _NAE3SAT: https://en.wikipedia.org/wiki/Not-all-equal_3-satisfiability
Args:
num_variables: The number of variables in the problem.
rho: The clause-to-variable ratio.
seed: Passed to :func:`numpy.random.default_rng()`, which is used
to generate the clauses and the negations.
replace: If true, then clauses are randomly sampled from the space
of all possible clauses. This can result in the same three variables
being present in multiple clauses.
As the number of variables grows the probability of this happening
shrinks rapidly and therefore it is often better to allow sampling
with replacement for performance.
Returns:
A binary quadratic models with spin variables.
"""
if isinstance(variables, collections.abc.Sequence):
num_variables = len(variables)
labels = variables
elif variables < 0:
raise ValueError("variables must be a sequence or a positive integer")
else:
num_variables = variables
labels = None
if rho < 0:
raise ValueError("rho must be positive")
num_clauses = round(rho * num_variables)
bqm = BinaryQuadraticModel(num_variables, Vartype.SPIN)
if replace:
bqm.add_quadratic_from(_iter_interactions(num_variables, num_clauses, seed))
else:
bqm.add_quadratic_from(_iter_interactions_without_replacement(num_variables, num_clauses, seed))
if labels:
bqm.relabel_variables(dict(enumerate(labels)))
return bqm
|
def nae3sat(variables: typing.Union[int, typing.Sequence[dimod.typing.Variable]],
rho: float = 2.1,
*,
seed: typing.Union[None, int, np.random.Generator] = None,
replace: bool = True,
) -> BinaryQuadraticModel:
"""Generator for Not-All-Equal 3-SAT (NAE3SAT) Binary Quadratic Models.
NAE3SAT_ is an NP-complete problem class that consists in satistying a number of conjunctive
clauses that involve three variables (or variable negations). The variables on each clause
should be not-all-equal. Ie. all solutions except ``(+1, +1, +1)`` or
``(-1, -1, -1)`` are valid for each class.
.. _NAE3SAT: https://en.wikipedia.org/wiki/Not-all-equal_3-satisfiability
Args:
num_variables: The number of variables in the problem.
rho: The clause-to-variable ratio.
seed: Passed to :func:`numpy.random.default_rng()`, which is used
to generate the clauses and the negations.
replace: If true, then clauses are randomly sampled from the space
of all possible clauses. This can result in the same three variables
being present in multiple clauses.
As the number of variables grows the probability of this happening
shrinks rapidly and therefore it is often better to allow sampling
with replacement for performance.
Returns:
A binary quadratic models with spin variables.
"""
if isinstance(variables, collections.abc.Sequence):
num_variables = len(variables)
labels = variables
elif variables < 3:
raise ValueError("variables must be a sequence or a positive integer of at least size 3")
else:
num_variables = variables
labels = None
if rho < 0:
raise ValueError("rho must be positive")
num_clauses = round(rho * num_variables)
bqm = BinaryQuadraticModel(num_variables, Vartype.SPIN)
if replace:
bqm.add_quadratic_from(_iter_interactions(num_variables, num_clauses, seed))
else:
bqm.add_quadratic_from(_iter_interactions_without_replacement(num_variables, num_clauses, seed))
if labels:
bqm.relabel_variables(dict(enumerate(labels)))
return bqm
|
27,990 |
def add_arguments_to_parser(parser):
"""
Add the subcommand's arguments to the given argparse.ArgumentParser.
"""
parser.add_argument('input',
type=str,
nargs='+',
metavar='folder',
help="The analysis result folder(s) containing "
"analysis results and fixits which should be "
"applied.")
parser.add_argument('-l', '--list',
action='store_true',
default=argparse.SUPPRESS,
help="List the available automatic fixes.")
parser.add_argument('--checker-name',
nargs='*',
help='Filter results by checker names. The checker '
'name can contain multiple * quantifiers which '
'matches any number of characters (zero or '
'more). So for example"*DeadStores" will '
'match "deadcode.DeadStores".')
parser.add_argument('--file',
metavar='FILE_PATH',
nargs='*',
help='Filter results by file path. The file path can '
'contain multiple * quantifiers which matches '
'any number of characters (zero or more). So if '
'you have /a/x.cpp and /a/y.cpp then "/a/*.cpp" '
'selects both.')
logger.add_verbose_arguments(parser)
parser.set_defaults(func=main)
|
def add_arguments_to_parser(parser):
"""
Add the subcommand's arguments to the given argparse.ArgumentParser.
"""
parser.add_argument('input',
type=str,
nargs='+',
metavar='folder',
help="The analysis result folder(s) containing "
"analysis results and fixits which should be "
"applied.")
parser.add_argument('-l', '--list',
action='store_true',
default=argparse.SUPPRESS,
help="List the available automatic fixes.")
parser.add_argument('--checker-name',
nargs='*',
help='Filter results by checker names. The checker '
'name can contain multiple * quantifiers which '
'matches any number of characters (zero or '
'more). So for example "*DeadStores" will '
'match "deadcode.DeadStores".')
parser.add_argument('--file',
metavar='FILE_PATH',
nargs='*',
help='Filter results by file path. The file path can '
'contain multiple * quantifiers which matches '
'any number of characters (zero or more). So if '
'you have /a/x.cpp and /a/y.cpp then "/a/*.cpp" '
'selects both.')
logger.add_verbose_arguments(parser)
parser.set_defaults(func=main)
|
3,808 |
def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='',
precision=6, equal_nan=True, equal_inf=True,
strict=False):
__tracebackhide__ = True # Hide traceback for py.test
from numpy.core import array, array2string, isnan, inf, bool_, errstate, all, max, object_
x = np.asanyarray(x)
y = np.asanyarray(y)
# original array for output formatting
ox, oy = x, y
def isnumber(x):
return x.dtype.char in '?bhilqpBHILQPefdgFDG'
def istime(x):
return x.dtype.char in "Mm"
def func_assert_same_pos(x, y, func=isnan, hasval='nan'):
"""Handling nan/inf.
Combine results of running func on x and y, checking that they are True
at the same locations.
"""
__tracebackhide__ = True # Hide traceback for py.test
x_id = func(x)
y_id = func(y)
# We include work-arounds here to handle three types of slightly
# pathological ndarray subclasses:
# (1) all() on `masked` array scalars can return masked arrays, so we
# use != True
# (2) __eq__ on some ndarray subclasses returns Python booleans
# instead of element-wise comparisons, so we cast to bool_() and
# use isinstance(..., bool) checks
# (3) subclasses with bare-bones __array_function__ implementations may
# not implement np.all(), so favor using the .all() method
# We are not committed to supporting such subclasses, but it's nice to
# support them if possible.
if bool_(x_id == y_id).all() != True:
msg = build_err_msg([x, y],
err_msg + '\nx and y %s location mismatch:'
% (hasval), verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
raise AssertionError(msg)
# If there is a scalar, then here we know the array has the same
# flag as it everywhere, so we should return the scalar flag.
if isinstance(x_id, bool) or x_id.ndim == 0:
return bool_(x_id)
elif isinstance(y_id, bool) or y_id.ndim == 0:
return bool_(y_id)
else:
return y_id
try:
if strict:
cond = x.shape == y.shape
else:
cond = (x.shape == () or y.shape == ()) or x.shape == y.shape
if not cond:
msg = build_err_msg([x, y],
err_msg
+ f'\n(shapes {x.shape}, {y.shape} mismatch)',
verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
raise AssertionError(msg)
flagged = bool_(False)
if isnumber(x) and isnumber(y):
if equal_nan:
flagged = func_assert_same_pos(x, y, func=isnan, hasval='nan')
if equal_inf:
flagged |= func_assert_same_pos(x, y,
func=lambda xy: xy == +inf,
hasval='+inf')
flagged |= func_assert_same_pos(x, y,
func=lambda xy: xy == -inf,
hasval='-inf')
elif istime(x) and istime(y):
# If one is datetime64 and the other timedelta64 there is no point
if equal_nan and x.dtype.type == y.dtype.type:
flagged = func_assert_same_pos(x, y, func=isnat, hasval="NaT")
if flagged.ndim > 0:
x, y = x[~flagged], y[~flagged]
# Only do the comparison if actual values are left
if x.size == 0:
return
elif flagged:
# no sense doing comparison if everything is flagged.
return
val = comparison(x, y)
if isinstance(val, bool):
cond = val
reduced = array([val])
else:
reduced = val.ravel()
cond = reduced.all()
# The below comparison is a hack to ensure that fully masked
# results, for which val.ravel().all() returns np.ma.masked,
# do not trigger a failure (np.ma.masked != True evaluates as
# np.ma.masked, which is falsy).
if cond != True:
n_mismatch = reduced.size - reduced.sum(dtype=intp)
n_elements = flagged.size if flagged.ndim != 0 else reduced.size
percent_mismatch = 100 * n_mismatch / n_elements
remarks = [
'Mismatched elements: {} / {} ({:.3g}%)'.format(
n_mismatch, n_elements, percent_mismatch)]
with errstate(all='ignore'):
# ignore errors for non-numeric types
with contextlib.suppress(TypeError):
error = abs(x - y)
max_abs_error = max(error)
if getattr(error, 'dtype', object_) == object_:
remarks.append('Max absolute difference: '
+ str(max_abs_error))
else:
remarks.append('Max absolute difference: '
+ array2string(max_abs_error))
# note: this definition of relative error matches that one
# used by assert_allclose (found in np.isclose)
# Filter values where the divisor would be zero
nonzero = bool_(y != 0)
if all(~nonzero):
max_rel_error = array(inf)
else:
max_rel_error = max(error[nonzero] / abs(y[nonzero]))
if getattr(error, 'dtype', object_) == object_:
remarks.append('Max relative difference: '
+ str(max_rel_error))
else:
remarks.append('Max relative difference: '
+ array2string(max_rel_error))
err_msg += '\n' + '\n'.join(remarks)
msg = build_err_msg([ox, oy], err_msg,
verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
raise AssertionError(msg)
except ValueError:
import traceback
efmt = traceback.format_exc()
header = f'error during assertion:\n\n{efmt}\n\n{header}'
msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
raise ValueError(msg)
|
def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='',
precision=6, equal_nan=True, equal_inf=True,
*, strict=False):
__tracebackhide__ = True # Hide traceback for py.test
from numpy.core import array, array2string, isnan, inf, bool_, errstate, all, max, object_
x = np.asanyarray(x)
y = np.asanyarray(y)
# original array for output formatting
ox, oy = x, y
def isnumber(x):
return x.dtype.char in '?bhilqpBHILQPefdgFDG'
def istime(x):
return x.dtype.char in "Mm"
def func_assert_same_pos(x, y, func=isnan, hasval='nan'):
"""Handling nan/inf.
Combine results of running func on x and y, checking that they are True
at the same locations.
"""
__tracebackhide__ = True # Hide traceback for py.test
x_id = func(x)
y_id = func(y)
# We include work-arounds here to handle three types of slightly
# pathological ndarray subclasses:
# (1) all() on `masked` array scalars can return masked arrays, so we
# use != True
# (2) __eq__ on some ndarray subclasses returns Python booleans
# instead of element-wise comparisons, so we cast to bool_() and
# use isinstance(..., bool) checks
# (3) subclasses with bare-bones __array_function__ implementations may
# not implement np.all(), so favor using the .all() method
# We are not committed to supporting such subclasses, but it's nice to
# support them if possible.
if bool_(x_id == y_id).all() != True:
msg = build_err_msg([x, y],
err_msg + '\nx and y %s location mismatch:'
% (hasval), verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
raise AssertionError(msg)
# If there is a scalar, then here we know the array has the same
# flag as it everywhere, so we should return the scalar flag.
if isinstance(x_id, bool) or x_id.ndim == 0:
return bool_(x_id)
elif isinstance(y_id, bool) or y_id.ndim == 0:
return bool_(y_id)
else:
return y_id
try:
if strict:
cond = x.shape == y.shape
else:
cond = (x.shape == () or y.shape == ()) or x.shape == y.shape
if not cond:
msg = build_err_msg([x, y],
err_msg
+ f'\n(shapes {x.shape}, {y.shape} mismatch)',
verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
raise AssertionError(msg)
flagged = bool_(False)
if isnumber(x) and isnumber(y):
if equal_nan:
flagged = func_assert_same_pos(x, y, func=isnan, hasval='nan')
if equal_inf:
flagged |= func_assert_same_pos(x, y,
func=lambda xy: xy == +inf,
hasval='+inf')
flagged |= func_assert_same_pos(x, y,
func=lambda xy: xy == -inf,
hasval='-inf')
elif istime(x) and istime(y):
# If one is datetime64 and the other timedelta64 there is no point
if equal_nan and x.dtype.type == y.dtype.type:
flagged = func_assert_same_pos(x, y, func=isnat, hasval="NaT")
if flagged.ndim > 0:
x, y = x[~flagged], y[~flagged]
# Only do the comparison if actual values are left
if x.size == 0:
return
elif flagged:
# no sense doing comparison if everything is flagged.
return
val = comparison(x, y)
if isinstance(val, bool):
cond = val
reduced = array([val])
else:
reduced = val.ravel()
cond = reduced.all()
# The below comparison is a hack to ensure that fully masked
# results, for which val.ravel().all() returns np.ma.masked,
# do not trigger a failure (np.ma.masked != True evaluates as
# np.ma.masked, which is falsy).
if cond != True:
n_mismatch = reduced.size - reduced.sum(dtype=intp)
n_elements = flagged.size if flagged.ndim != 0 else reduced.size
percent_mismatch = 100 * n_mismatch / n_elements
remarks = [
'Mismatched elements: {} / {} ({:.3g}%)'.format(
n_mismatch, n_elements, percent_mismatch)]
with errstate(all='ignore'):
# ignore errors for non-numeric types
with contextlib.suppress(TypeError):
error = abs(x - y)
max_abs_error = max(error)
if getattr(error, 'dtype', object_) == object_:
remarks.append('Max absolute difference: '
+ str(max_abs_error))
else:
remarks.append('Max absolute difference: '
+ array2string(max_abs_error))
# note: this definition of relative error matches that one
# used by assert_allclose (found in np.isclose)
# Filter values where the divisor would be zero
nonzero = bool_(y != 0)
if all(~nonzero):
max_rel_error = array(inf)
else:
max_rel_error = max(error[nonzero] / abs(y[nonzero]))
if getattr(error, 'dtype', object_) == object_:
remarks.append('Max relative difference: '
+ str(max_rel_error))
else:
remarks.append('Max relative difference: '
+ array2string(max_rel_error))
err_msg += '\n' + '\n'.join(remarks)
msg = build_err_msg([ox, oy], err_msg,
verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
raise AssertionError(msg)
except ValueError:
import traceback
efmt = traceback.format_exc()
header = f'error during assertion:\n\n{efmt}\n\n{header}'
msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
raise ValueError(msg)
|
31,271 |
def main():
try:
indicator = demisto.args()['indicator']
resp = demisto.executeCommand("getIndicator", {'value': indicator})
if isError(resp) or not resp:
demisto.results(resp)
sys.exit(0)
data = resp[0].get("Contents")
if not data:
demisto.results("No results.")
sys.exit(0)
for entry in data:
for results, outputs in iterate_indicator_entry(indicator, entry):
return_results(results)
appendContext(DbotScoreKey, outputs)
except Exception as error:
return_error(str(error), error)
|
def main():
try:
indicator = demisto.args()['indicator']
resp = demisto.executeCommand("getIndicator", {'value': indicator})
if isError(resp) or not resp:
demisto.results(resp)
sys.exit(0)
data = resp[0].get("Contents")
if not data:
demisto.results("No results.")
return
for entry in data:
for results, outputs in iterate_indicator_entry(indicator, entry):
return_results(results)
appendContext(DbotScoreKey, outputs)
except Exception as error:
return_error(str(error), error)
|
46,292 |
def is_diagonal(matrix, tol=1e-8):
"""Determine whether affine is a diagonal matrix.
Parameters
----------
matrix : 2-D array
The matrix to test.
tol : float, optional
Consider any entries with magnitude < `tol` as 0.
Returns
-------
is_diag : bool
Boolean indicating whether affine is diagonal.
"""
if matrix.ndim != 2 or matrix.shape[0] != matrix.shape[1]:
raise ValueError("matrix must be square")
non_diag = matrix[~np.eye(matrix.shape[0], dtype=bool)]
if tol == 0:
return np.count_nonzero(non_diag) == 0
else:
return np.max(np.abs(non_diag)) <= tol
|
def is_diagonal(matrix, tol=1e-8):
"""Determine whether a square matrix is diagonal up to some tolerance.
Parameters
----------
matrix : 2-D array
The matrix to test.
tol : float, optional
Consider any entries with magnitude < `tol` as 0.
Returns
-------
is_diag : bool
Boolean indicating whether affine is diagonal.
"""
if matrix.ndim != 2 or matrix.shape[0] != matrix.shape[1]:
raise ValueError("matrix must be square")
non_diag = matrix[~np.eye(matrix.shape[0], dtype=bool)]
if tol == 0:
return np.count_nonzero(non_diag) == 0
else:
return np.max(np.abs(non_diag)) <= tol
|
50,372 |
def _send_email_to_user(request, user, msg, *, email=None, allow_unverified=False):
# If we were not given a specific email object, then we'll default to using
# the User's primary email address.
if email is None:
email = user.primary_email
# If we were not able to locate an email address for this user, then we will just
# have to skip sending email to them. If we have an email for them, then we will
# check to see if it is verified, if it is not then we will also skip sending email
# to them **UNLESS** we've been told to allow unverified emails.
if email is None or not (email.verified or allow_unverified):
return
# We should only store/display IP address of an 'email sent' event if the user
# who triggered the email event is the one who receives the email. Else display
# 'Redacted' to prevent user privacy concerns
user_email = request.db.query(Email).filter(Email.email == email.email).one()
ip_address = (
request.remote_addr if user_email.user_id == request.user.id else "Redacted"
)
request.task(send_email).delay(
_compute_recipient(user, email.email), attr.asdict(msg), user.id, ip_address
)
|
def _send_email_to_user(request, user, msg, *, email=None, allow_unverified=False):
# If we were not given a specific email object, then we'll default to using
# the User's primary email address.
if email is None:
email = user.primary_email
# If we were not able to locate an email address for this user, then we will just
# have to skip sending email to them. If we have an email for them, then we will
# check to see if it is verified, if it is not then we will also skip sending email
# to them **UNLESS** we've been told to allow unverified emails.
if email is None or not (email.verified or allow_unverified):
return
# We should only store/display IP address of an 'email sent' event if the user
# who triggered the email event is the one who receives the email. Else display
# 'Redacted' to prevent user privacy concerns
ip_address = (
request.remote_addr if email == request.user.primary_email else "Redacted"
)
request.task(send_email).delay(
_compute_recipient(user, email.email), attr.asdict(msg), user.id, ip_address
)
|
40,709 |
def _check_output_types(output: Any):
y_pred, y = output
if y_pred.dtype not in (torch.float16, torch.float32, torch.float64):
raise TypeError("Input y_pred dtype should be float 16, 32 or 64, but given {}".format(y_pred.dtype))
if y.dtype not in (torch.float16, torch.float32, torch.float64):
raise TypeError("Input y dtype should be float 16, 32 or 64, but given {}".format(y.dtype))
|
def _check_output_types(output: Tuple[torch.Tensor, torch.Tensor]):
y_pred, y = output
if y_pred.dtype not in (torch.float16, torch.float32, torch.float64):
raise TypeError("Input y_pred dtype should be float 16, 32 or 64, but given {}".format(y_pred.dtype))
if y.dtype not in (torch.float16, torch.float32, torch.float64):
raise TypeError("Input y dtype should be float 16, 32 or 64, but given {}".format(y.dtype))
|
41,905 |
def _get_contour_plot(study: Study, params: Optional[List[str]] = None) -> "go.Figure":
layout = go.Layout(title="Contour Plot")
trials = [trial for trial in study.trials if trial.state == TrialState.COMPLETE]
if len(trials) == 0:
_logger.warning("Your study does not have any completed trials.")
return go.Figure(data=[], layout=layout)
all_params = {p_name for t in trials for p_name in t.params.keys()}
if params is None:
sorted_params = sorted(list(all_params))
elif len(params) <= 1:
_logger.warning("The length of params must be greater than 1.")
return go.Figure(data=[], layout=layout)
else:
for input_p_name in params:
if input_p_name not in all_params:
raise ValueError("Parameter {} does not exist in your study.".format(input_p_name))
sorted_params = sorted(list(set(params)))
padding_ratio = 0.05
param_values_range = {}
update_category_axes = {}
for p_name in sorted_params:
values = [t.params[p_name] for t in trials if p_name in t.params]
min_value = min(values)
max_value = max(values)
if _is_log_scale(trials, p_name):
padding = (math.log10(max_value) - math.log10(min_value)) * padding_ratio
min_value = math.pow(10, math.log10(min_value) - padding)
max_value = math.pow(10, math.log10(max_value) + padding)
elif _is_categorical(trials, p_name):
update_category_axes[p_name] = any([str(v).isnumeric() for v in set(values)])
else:
padding = (max_value - min_value) * padding_ratio
min_value = min_value - padding
max_value = max_value + padding
param_values_range[p_name] = (min_value, max_value)
if len(sorted_params) == 2:
x_param = sorted_params[0]
y_param = sorted_params[1]
sub_plots = _generate_contour_subplot(
trials, x_param, y_param, study.direction, param_values_range
)
figure = go.Figure(data=sub_plots, layout=layout)
figure.update_xaxes(title_text=x_param, range=param_values_range[x_param])
figure.update_yaxes(title_text=y_param, range=param_values_range[y_param])
if _is_categorical(trials, x_param) and update_category_axes[x_param]:
figure.update_xaxes(type="category")
if _is_categorical(trials, y_param) and update_category_axes[y_param]:
figure.update_yaxes(type="category")
if _is_log_scale(trials, x_param):
log_range = [math.log10(p) for p in param_values_range[x_param]]
figure.update_xaxes(range=log_range, type="log")
if _is_log_scale(trials, y_param):
log_range = [math.log10(p) for p in param_values_range[y_param]]
figure.update_yaxes(range=log_range, type="log")
else:
figure = make_subplots(
rows=len(sorted_params), cols=len(sorted_params), shared_xaxes=True, shared_yaxes=True
)
figure.update_layout(layout)
showscale = True # showscale option only needs to be specified once
for x_i, x_param in enumerate(sorted_params):
for y_i, y_param in enumerate(sorted_params):
if x_param == y_param:
figure.add_trace(go.Scatter(), row=y_i + 1, col=x_i + 1)
else:
sub_plots = _generate_contour_subplot(
trials, x_param, y_param, study.direction, param_values_range
)
contour = sub_plots[0]
scatter = sub_plots[1]
contour.update(showscale=showscale) # showscale's default is True
if showscale:
showscale = False
figure.add_trace(contour, row=y_i + 1, col=x_i + 1)
figure.add_trace(scatter, row=y_i + 1, col=x_i + 1)
figure.update_xaxes(range=param_values_range[x_param], row=y_i + 1, col=x_i + 1)
figure.update_yaxes(range=param_values_range[y_param], row=y_i + 1, col=x_i + 1)
if _is_categorical(trials, x_param) and update_category_axes[x_param]:
figure.update_xaxes(type="category", row=y_i + 1, col=x_i + 1)
if _is_categorical(trials, y_param) and update_category_axes[y_param]:
figure.update_yaxes(type="category", row=y_i + 1, col=x_i + 1)
if _is_log_scale(trials, x_param):
log_range = [math.log10(p) for p in param_values_range[x_param]]
figure.update_xaxes(range=log_range, type="log", row=y_i + 1, col=x_i + 1)
if _is_log_scale(trials, y_param):
log_range = [math.log10(p) for p in param_values_range[y_param]]
figure.update_yaxes(range=log_range, type="log", row=y_i + 1, col=x_i + 1)
if x_i == 0:
figure.update_yaxes(title_text=y_param, row=y_i + 1, col=x_i + 1)
if y_i == len(sorted_params) - 1:
figure.update_xaxes(title_text=x_param, row=y_i + 1, col=x_i + 1)
return figure
|
def _get_contour_plot(study: Study, params: Optional[List[str]] = None) -> "go.Figure":
layout = go.Layout(title="Contour Plot")
trials = [trial for trial in study.trials if trial.state == TrialState.COMPLETE]
if len(trials) == 0:
_logger.warning("Your study does not have any completed trials.")
return go.Figure(data=[], layout=layout)
all_params = {p_name for t in trials for p_name in t.params.keys()}
if params is None:
sorted_params = sorted(list(all_params))
elif len(params) <= 1:
_logger.warning("The length of params must be greater than 1.")
return go.Figure(data=[], layout=layout)
else:
for input_p_name in params:
if input_p_name not in all_params:
raise ValueError("Parameter {} does not exist in your study.".format(input_p_name))
sorted_params = sorted(list(set(params)))
padding_ratio = 0.05
param_values_range = {}
update_category_axes = {}
for p_name in sorted_params:
values = [t.params[p_name] for t in trials if p_name in t.params]
min_value = min(values)
max_value = max(values)
if _is_log_scale(trials, p_name):
padding = (math.log10(max_value) - math.log10(min_value)) * padding_ratio
min_value = math.pow(10, math.log10(min_value) - padding)
max_value = math.pow(10, math.log10(max_value) + padding)
elif _is_categorical(trials, p_name):
update_category_axes[p_name] = any([str(v).isnumeric() for v in set(values)])
else:
padding = (max_value - min_value) * padding_ratio
min_value = min_value - padding
max_value = max_value + padding
param_values_range[p_name] = (min_value, max_value)
if len(sorted_params) == 2:
x_param = sorted_params[0]
y_param = sorted_params[1]
sub_plots = _generate_contour_subplot(
trials, x_param, y_param, study.direction, param_values_range
)
figure = go.Figure(data=sub_plots, layout=layout)
figure.update_xaxes(title_text=x_param, range=param_values_range[x_param])
figure.update_yaxes(title_text=y_param, range=param_values_range[y_param])
if update_category_axes.get(x_param, False):
figure.update_xaxes(type="category")
if _is_categorical(trials, y_param) and update_category_axes[y_param]:
figure.update_yaxes(type="category")
if _is_log_scale(trials, x_param):
log_range = [math.log10(p) for p in param_values_range[x_param]]
figure.update_xaxes(range=log_range, type="log")
if _is_log_scale(trials, y_param):
log_range = [math.log10(p) for p in param_values_range[y_param]]
figure.update_yaxes(range=log_range, type="log")
else:
figure = make_subplots(
rows=len(sorted_params), cols=len(sorted_params), shared_xaxes=True, shared_yaxes=True
)
figure.update_layout(layout)
showscale = True # showscale option only needs to be specified once
for x_i, x_param in enumerate(sorted_params):
for y_i, y_param in enumerate(sorted_params):
if x_param == y_param:
figure.add_trace(go.Scatter(), row=y_i + 1, col=x_i + 1)
else:
sub_plots = _generate_contour_subplot(
trials, x_param, y_param, study.direction, param_values_range
)
contour = sub_plots[0]
scatter = sub_plots[1]
contour.update(showscale=showscale) # showscale's default is True
if showscale:
showscale = False
figure.add_trace(contour, row=y_i + 1, col=x_i + 1)
figure.add_trace(scatter, row=y_i + 1, col=x_i + 1)
figure.update_xaxes(range=param_values_range[x_param], row=y_i + 1, col=x_i + 1)
figure.update_yaxes(range=param_values_range[y_param], row=y_i + 1, col=x_i + 1)
if _is_categorical(trials, x_param) and update_category_axes[x_param]:
figure.update_xaxes(type="category", row=y_i + 1, col=x_i + 1)
if _is_categorical(trials, y_param) and update_category_axes[y_param]:
figure.update_yaxes(type="category", row=y_i + 1, col=x_i + 1)
if _is_log_scale(trials, x_param):
log_range = [math.log10(p) for p in param_values_range[x_param]]
figure.update_xaxes(range=log_range, type="log", row=y_i + 1, col=x_i + 1)
if _is_log_scale(trials, y_param):
log_range = [math.log10(p) for p in param_values_range[y_param]]
figure.update_yaxes(range=log_range, type="log", row=y_i + 1, col=x_i + 1)
if x_i == 0:
figure.update_yaxes(title_text=y_param, row=y_i + 1, col=x_i + 1)
if y_i == len(sorted_params) - 1:
figure.update_xaxes(title_text=x_param, row=y_i + 1, col=x_i + 1)
return figure
|
1,249 |
def proc_file(f, opts):
verbose(1, f"Loading {f}")
row = [f"@l{f}"]
try:
vol = nib.load(f)
h = vol.header
except Exception as e:
row += ['failed']
verbose(2, f"Failed to gather information -- {str(e)}")
return row
row += [str(safe_get(h, 'data_dtype')),
f"@l[{ap(safe_get(h, 'data_shape'), '%3g')}]",
f"@l{ap(safe_get(h, 'zooms'), '%.2f', 'x')}"]
# Slope
if hasattr(h, 'has_data_slope') and \
(h.has_data_slope or h.has_data_intercept) and \
not h.get_slope_inter() in [(1.0, 0.0), (None, None)]:
row += ['@l*%.3g+%.3g' % h.get_slope_inter()]
else:
row += ['']
if hasattr(h, 'extensions') and len(h.extensions):
row += ['@l#exts: %d' % len(h.extensions)]
else:
row += ['']
if opts.header_fields:
# signals "all fields"
if opts.header_fields == 'all':
# TODO: might vary across file types, thus prior sensing
# would be needed
header_fields = h.keys()
else:
header_fields = opts.header_fields.split(',')
for f in header_fields:
if not f: # skip empty
continue
try:
row += [str(h[f])]
except (KeyError, ValueError):
row += [_err()]
try:
if (hasattr(h, 'get_qform') and hasattr(h, 'get_sform') and
(h.get_qform() != h.get_sform()).any()):
row += ['sform']
else:
row += ['']
except Exception as e:
verbose(2, f"Failed to obtain qform or sform -- {str(e)}")
if isinstance(h, nib.AnalyzeHeader):
row += ['']
else:
row += [_err()]
if opts.stats or opts.counts:
# We are doomed to load data
try:
d = np.asarray(vol.dataobj)
if not opts.stats_zeros:
d = d[np.nonzero(d)]
else:
# at least flatten it -- functionality below doesn't
# depend on the original shape, so let's use a flat view
d = d.reshape(-1)
if opts.stats:
# just # of elements
row += ["@l[%d]" % np.prod(d.shape)]
# stats
row += [len(d) and f'@l[{np.min(d):.2g}, {np.max(d):.2g}]' or '-']
if opts.counts:
items, inv = np.unique(d, return_inverse=True)
if len(items) > 1000 and not opts.all_counts:
counts = _err("%d uniques. Use --all-counts" % len(items))
else:
freq = np.bincount(inv)
counts = " ".join("%g:%d" % (i, f) for i, f in zip(items, freq))
row += ["@l" + counts]
except IOError as e:
verbose(2, f"Failed to obtain stats/counts -- {str(e)}")
row += [_err()]
return row
|
def proc_file(f, opts):
verbose(1, f"Loading {f}")
row = [f"@l{f}"]
try:
vol = nib.load(f)
h = vol.header
except Exception as e:
row += ['failed']
verbose(2, f"Failed to gather information -- {str(e)}")
return row
row += [str(safe_get(h, 'data_dtype')),
f"@l[{ap(safe_get(h, 'data_shape'), '%3g')}]",
f"@l{ap(safe_get(h, 'zooms'), '%.2f', 'x')}"]
# Slope
if hasattr(h, 'has_data_slope') and \
(h.has_data_slope or h.has_data_intercept) and \
not h.get_slope_inter() in [(1.0, 0.0), (None, None)]:
row += ['@l*%.3g+%.3g' % h.get_slope_inter()]
else:
row += ['']
if hasattr(h, 'extensions') and len(h.extensions):
row += ['@l#exts: %d' % len(h.extensions)]
else:
row += ['']
if opts.header_fields:
# signals "all fields"
if opts.header_fields == 'all':
# TODO: might vary across file types, thus prior sensing
# would be needed
header_fields = h.keys()
else:
header_fields = opts.header_fields.split(',')
for f in header_fields:
if not f: # skip empty
continue
try:
row += [str(h[f])]
except (KeyError, ValueError):
row += [_err()]
try:
if (hasattr(h, 'get_qform') and hasattr(h, 'get_sform') and
(h.get_qform() != h.get_sform()).any()):
row += ['sform']
else:
row += ['']
except Exception as e:
verbose(2, f"Failed to obtain qform or sform -- {str(e)}")
if isinstance(h, nib.AnalyzeHeader):
row += ['']
else:
row += [_err()]
if opts.stats or opts.counts:
# We are doomed to load data
try:
d = np.asarray(vol.dataobj)
if not opts.stats_zeros:
d = d[np.nonzero(d)]
else:
# at least flatten it -- functionality below doesn't
# depend on the original shape, so let's use a flat view
d = d.reshape(-1)
if opts.stats:
# just # of elements
row += ["@l[%d]" % np.prod(d.shape)]
# stats
row += [len(d) and f'@l[{np.min(d):.2g}, {np.max(d):.2g}]' or '-']
if opts.counts:
items, inv = np.unique(d, return_inverse=True)
if len(items) > 1000 and not opts.all_counts:
counts = _err("%d uniques. Use --all-counts" % len(items))
else:
freq = np.bincount(inv)
counts = " ".join("%g:%d" % (i, f) for i, f in zip(items, freq))
row += ["@l" + counts]
except IOError as e:
verbose(2, f"Failed to obtain stats/counts -- {e}")
row += [_err()]
return row
|
57,510 |
def validator(
*fields: str,
pre: bool = False,
each_item: bool = False,
always: bool = False,
check_fields: bool = True,
whole: bool = None,
allow_reuse: bool = False,
) -> Callable[[AnyCallable], classmethod]:
"""
Decorate methods on the class indicating that they should be used to validate fields
:param fields: which field(s) the method should be called on
:param pre: whether or not this validator should be called before the standard validators (else after)
:param each_item: for complex objects (sets, lists etc.) whether to validate individual elements rather than the
whole object
:param always: whether this method and other validators should be called even if the value is missing
:param check_fields: whether to check that the fields actually exist on the model
:param allow_reuse: whether to track and raise an error if another validator refers to the decorated function
"""
if not fields:
raise ConfigError('validator with no fields specified')
elif isinstance(fields[0], FunctionType):
raise ConfigError(
"validators should be used with fields and keyword arguments, not bare. " # noqa: Q000
"E.g. usage should be `@validator('<field_name>', ...)`"
)
elif not all([isinstance(field, str) for field in fields]):
raise ConfigError(
"validator fields should be passed as separate string args. Do not pass multiple fields as a list, etc. "
"E.g. usage should be `@validator('<field_name_1>', '<field_name_2>', ...)` "
"NOT `@validator(['<field_name_1>', '<field_name_2>', ...], ...)`"
)
if whole is not None:
warnings.warn(
'The "whole" keyword argument is deprecated, use "each_item" (inverse meaning, default False) instead',
DeprecationWarning,
)
assert each_item is False, '"each_item" and "whole" conflict, remove "whole"'
each_item = not whole
def dec(f: AnyCallable) -> classmethod:
f_cls = _prepare_validator(f, allow_reuse)
setattr(
f_cls,
VALIDATOR_CONFIG_KEY,
(
fields,
Validator(func=f_cls.__func__, pre=pre, each_item=each_item, always=always, check_fields=check_fields),
),
)
return f_cls
return dec
|
def validator(
*fields: str,
pre: bool = False,
each_item: bool = False,
always: bool = False,
check_fields: bool = True,
whole: bool = None,
allow_reuse: bool = False,
) -> Callable[[AnyCallable], classmethod]:
"""
Decorate methods on the class indicating that they should be used to validate fields
:param fields: which field(s) the method should be called on
:param pre: whether or not this validator should be called before the standard validators (else after)
:param each_item: for complex objects (sets, lists etc.) whether to validate individual elements rather than the
whole object
:param always: whether this method and other validators should be called even if the value is missing
:param check_fields: whether to check that the fields actually exist on the model
:param allow_reuse: whether to track and raise an error if another validator refers to the decorated function
"""
if not fields:
raise ConfigError('validator with no fields specified')
elif isinstance(fields[0], FunctionType):
raise ConfigError(
"validators should be used with fields and keyword arguments, not bare. " # noqa: Q000
"E.g. usage should be `@validator('<field_name>', ...)`"
)
elif not all([isinstance(field, str) for field in fields]):
raise ConfigError(
"validator fields should be passed as separate string args. "
"E.g. usage should be `@validator('<field_name_1>', '<field_name_2>', ...)`"
)
if whole is not None:
warnings.warn(
'The "whole" keyword argument is deprecated, use "each_item" (inverse meaning, default False) instead',
DeprecationWarning,
)
assert each_item is False, '"each_item" and "whole" conflict, remove "whole"'
each_item = not whole
def dec(f: AnyCallable) -> classmethod:
f_cls = _prepare_validator(f, allow_reuse)
setattr(
f_cls,
VALIDATOR_CONFIG_KEY,
(
fields,
Validator(func=f_cls.__func__, pre=pre, each_item=each_item, always=always, check_fields=check_fields),
),
)
return f_cls
return dec
|
30,389 |
def decrypt_email_body(client: Client, args: Dict):
""" Decrypt the message
Args:
client: Client
args: Dict
"""
encrypt_message = demisto.getFilePath(args.get('encrypt_message'))
# Load private key and cert.
client.smime.load_key(client.private_key_file, client.public_key_file)
# Load the encrypted data.
p7, data = SMIME.smime_load_pkcs7(encrypt_message['path'])
# Decrypt p7.
out = client.smime.decrypt(p7).decode('utf-8')
entry_context = {
'SMIME': {
'Message': out,
}
}
human_readable = f'The encrypted message is: \n{out}'
return human_readable, entry_context
|
def decrypt_email_body(client: Client, args: Dict):
""" Decrypt the message
Args:
client: Client
args: Dict
"""
encrypt_message = demisto.getFilePath(args.get('encrypt_message'))
# Load private key and cert.
client.smime.load_key(client.private_key_file, client.public_key_file)
# Load the encrypted data.
p7, data = SMIME.smime_load_pkcs7(encrypt_message['path'])
# Decrypt p7.
out = client.smime.decrypt(p7).decode('utf-8')
entry_context = {
'SMIME.Decrypted': {
'Message': out,
}
}
human_readable = f'The encrypted message is: \n{out}'
return human_readable, entry_context
|
12,066 |
def test_no_candidates(pip_conf, runner):
with open("requirements", "w") as req_in:
req_in.write("small-fake-a==>0.3b1,<0.3b1")
out = runner.invoke(cli, ["-n", "requirements"])
assert out.exit_code == 2
assert "Skipped pre-versions:" in out.stderr
|
def test_no_candidates(pip_conf, runner):
with open("requirements", "w") as req_in:
req_in.write("small-fake-a>0.3b1,<0.3b1")
out = runner.invoke(cli, ["-n", "requirements"])
assert out.exit_code == 2
assert "Skipped pre-versions:" in out.stderr
|
49,070 |
def test_ContinuousRV():
pdf = sqrt(2)*exp(-x**2/2)/(2*sqrt(pi)) # Normal distribution
# X and Y should be equivalent
X = ContinuousRV(x, pdf, check=True)
Y = Normal('y', 0, 1)
assert variance(X) == variance(Y)
assert P(X > 0) == P(Y > 0)
Z = ContinuousRV(z, exp(-z), set=Interval(0, oo))
assert Z.pspace.domain.set == Interval(0, oo)
assert E(Z) == 1
assert P(Z > 5) == exp(-5)
raises(ValueError, lambda: ContinuousRV(z, exp(-z), set=Interval(0, 10), check=True))
# the correct pdf for Gamma(k, theta) but the integral in `check`
# integrates to something equivalent to 1 and not to 1 exactly
_x, k, theta = symbols("x k theta", positive=True)
pdf = 1/(gamma(k)*theta**k)*_x**(k-1)*exp(-_x/theta)
X = ContinuousRV(_x, pdf, set=Interval(0, oo))
Y = Gamma('y', k, theta)
assert (E(X) - E(Y)).simplify() == 0
assert (variance(X) - variance(Y)).simplify() == 0
|
def test_ContinuousRV():
pdf = sqrt(2)*exp(-x**2/2)/(2*sqrt(pi)) # Normal distribution
# X and Y should be equivalent
X = ContinuousRV(x, pdf, check=True)
Y = Normal('y', 0, 1)
assert variance(X) == variance(Y)
k = 2
n = 10
samples = dict(scipy=[], numpy=[], pymc3=[])
for lib in samples.keys():
for i in range(2):
samples[lib].append(np.array(list(sample(Z, numsamples=n, library=lib, seed=0))))
for lib, samps in samples.items():
assert (
all(
[(samps[i] == samps[i + 1]).all() for i in range(k - 1)]
),
"Library " + lib + " samples differ despite seeding."
)
for lib in samples.keys():
s0 = np.array(list(sample(Z, numsamples=n, library=lib, seed=0)))
s1 = np.array(list(sample(Z, numsamples=n, library=lib, seed=1)))
assert not (s0 == s1).all()
Z = ContinuousRV(z, exp(-z), set=Interval(0, oo))
assert Z.pspace.domain.set == Interval(0, oo)
assert E(Z) == 1
assert P(Z > 5) == exp(-5)
raises(ValueError, lambda: ContinuousRV(z, exp(-z), set=Interval(0, 10), check=True))
# the correct pdf for Gamma(k, theta) but the integral in `check`
# integrates to something equivalent to 1 and not to 1 exactly
_x, k, theta = symbols("x k theta", positive=True)
pdf = 1/(gamma(k)*theta**k)*_x**(k-1)*exp(-_x/theta)
X = ContinuousRV(_x, pdf, set=Interval(0, oo))
Y = Gamma('y', k, theta)
assert (E(X) - E(Y)).simplify() == 0
assert (variance(X) - variance(Y)).simplify() == 0
|
32,005 |
def get_impacted_resources(client: PrismaCloudComputeClient, args: dict) -> CommandResults:
"""
Get the impacted resources list.
Implement the command 'prisma-cloud-compute-vulnerabilities-impacted-resources-list'
Args:
client (PrismaCloudComputeClient): prisma-cloud-compute client.
args (dict): prisma-cloud-compute-vulnerabilities-impacted-resources-list command arguments
Returns:
CommandResults: command-results object.
"""
limit, offset = parse_limit_and_offset_values(limit=args.pop("limit", "50"), offset=args.pop("offset", "0"))
cves = argToList(arg=args.get("cve", []))
impacted_images, impacted_hosts, context_output = [], [], []
for cve in cves:
if cve_impacted_resources := client.get_impacted_resources(cve=cve):
if "riskTree" in cve_impacted_resources and cve_impacted_resources.get("riskTree") is not None:
cve_impacted_resources["riskTree"] = dict(
filter_api_response(
api_response=list(cve_impacted_resources.get("riskTree", {}).items()), # type: ignore
limit=limit,
offset=offset
)
)
for image_details in cve_impacted_resources.get("riskTree", {}).values():
for image in image_details:
image_table_details = {
"Image": image.get("image"),
"Container": image.get("container"),
"Host": image.get("host"),
"Namespace": image.get("namespace")
}
if image_table_details not in impacted_images:
impacted_images.append(image_table_details)
if "hosts" in cve_impacted_resources:
cve_impacted_resources["hosts"] = filter_api_response(
api_response=cve_impacted_resources.get("hosts"), limit=limit, offset=offset
)
for host in cve_impacted_resources.get("hosts", []):
host_table_details = {"Hostname": host}
if host_table_details not in impacted_hosts:
impacted_hosts.append(host_table_details)
context_output.append(cve_impacted_resources)
if context_output:
impacted_images_table = tableToMarkdown(
name="Impacted Images",
t=impacted_images,
headers=["Image", "Container", "Host", "Namespace"],
removeNull=True
)
impacted_hosts_table = tableToMarkdown(
name="Impacted Hosts",
t=impacted_hosts,
headers=["Hostname"],
removeNull=True
)
table = impacted_images_table + impacted_hosts_table
else:
context_output, table = [], "No results found"
return CommandResults(
outputs_prefix="PrismaCloudCompute.VulnerabilitiesImpactedResource",
outputs_key_field="_id",
outputs=context_output if context_output else None,
readable_output=table,
)
|
def get_impacted_resources(client: PrismaCloudComputeClient, args: dict) -> CommandResults:
"""
Get the impacted resources list.
Implement the command 'prisma-cloud-compute-vulnerabilities-impacted-resources-list'
Args:
client (PrismaCloudComputeClient): prisma-cloud-compute client.
args (dict): prisma-cloud-compute-vulnerabilities-impacted-resources-list command arguments
Returns:
CommandResults: command-results object.
"""
limit, offset = parse_limit_and_offset_values(limit=args.pop("limit", "50"), offset=args.pop("offset", "0"))
cves = argToList(arg=args.get("cve", []))
impacted_images, impacted_hosts, context_output = [], [], []
for cve in cves:
if cve_impacted_resources := client.get_impacted_resources(cve=cve):
if "riskTree" in cve_impacted_resources and cve_impacted_resources.get("riskTree") is not None:
cve_impacted_resources["riskTree"] = dict(
filter_api_response(
api_response=list(cve_impacted_resources.get("riskTree", {}).items()), # type: ignore
limit=limit,
offset=offset
)
)
for image_details in cve_impacted_resources.get("riskTree", {}).values():
for image in image_details:
image_table_details = {
"Image": image.get("image"),
"Container": image.get("container"),
"Host": image.get("host"),
"Namespace": image.get("namespace")
}
if image_table_details not in impacted_images:
impacted_images.append(image_table_details)
if "hosts" in cve_impacted_resources:
cve_impacted_resources["hosts"] = filter_api_response(
api_response=cve_impacted_resources.get("hosts"), limit=limit, offset=offset
)
for host in cve_impacted_resources.get("hosts", []):
host_table_details = {"Hostname": host}
if host_table_details not in impacted_hosts:
impacted_hosts.append(host_table_details)
context_output.append(cve_impacted_resources)
if context_output:
impacted_images_table = tableToMarkdown(
name="Impacted Images",
t=impacted_images,
headers=["Image", "Container", "Host", "Namespace"],
removeNull=True
)
impacted_hosts_table = tableToMarkdown(
name="Impacted Hosts",
t=impacted_hosts,
headers=["Hostname"],
removeNull=True
)
table = impacted_images_table + impacted_hosts_table
else:
context_output, table = [], "No results found."
return CommandResults(
outputs_prefix="PrismaCloudCompute.VulnerabilitiesImpactedResource",
outputs_key_field="_id",
outputs=context_output if context_output else None,
readable_output=table,
)
|
12,206 |
def list_all_known_prefixes():
all_env_paths = set()
# If the user is an admin, load environments from all user home directories
if is_admin():
if on_win:
home_dir_dir = dirname(expand('~'))
search_dirs = tuple(entry.path for entry in scandir(home_dir_dir))
else:
from pwd import getpwall
search_dirs = tuple(pwentry.pw_dir for pwentry in getpwall()) or (expand('~'),)
else:
search_dirs = (expand('~'),)
for home_dir in search_dirs:
environments_txt_file = get_user_environments_txt_file(home_dir)
if isfile(environments_txt_file):
try:
# When the user is an admin, some environments.txt files might
# not be readable (if on network file system for example)
all_env_paths.update(_clean_environments_txt(environments_txt_file))
except PermissionError:
continue
# in case environments.txt files aren't complete, also add all known conda environments in
# all envs_dirs
envs_dirs = (envs_dir for envs_dir in context.envs_dirs if isdir(envs_dir))
all_env_paths.update(path for path in (
entry.path for envs_dir in envs_dirs for entry in scandir(envs_dir)
) if path not in all_env_paths and is_conda_environment(path))
all_env_paths.add(context.root_prefix)
return sorted(all_env_paths)
|
def list_all_known_prefixes():
all_env_paths = set()
# If the user is an admin, load environments from all user home directories
if is_admin():
if on_win:
home_dir_dir = dirname(expand('~'))
search_dirs = tuple(entry.path for entry in scandir(home_dir_dir))
else:
from pwd import getpwall
search_dirs = tuple(pwentry.pw_dir for pwentry in getpwall()) or (expand('~'),)
else:
search_dirs = (expand('~'),)
for home_dir in search_dirs:
environments_txt_file = get_user_environments_txt_file(home_dir)
if isfile(environments_txt_file):
try:
# When the user is an admin, some environments.txt files might
# not be readable (if on network file system for example)
all_env_paths.update(_clean_environments_txt(environments_txt_file))
except PermissionError:
log.warning(f"Unable to access {environments_txt_file}")
# in case environments.txt files aren't complete, also add all known conda environments in
# all envs_dirs
envs_dirs = (envs_dir for envs_dir in context.envs_dirs if isdir(envs_dir))
all_env_paths.update(path for path in (
entry.path for envs_dir in envs_dirs for entry in scandir(envs_dir)
) if path not in all_env_paths and is_conda_environment(path))
all_env_paths.add(context.root_prefix)
return sorted(all_env_paths)
|
30,020 |
def rk4(derivs, y0, t):
"""
Integrate 1-D or N-D system of ODEs using 4-th order Runge-Kutta.
This is a toy implementation which may be useful if you find
yourself stranded on a system w/o scipy. Otherwise use
:func:`scipy.integrate`.
Example::
>>> ### 2D system
>>> def derivs(x):
... d1 = x[0] + 2*x[1]
... d2 = -3*x[0] + 4*x[1]
... return (d1, d2)
>>> dt = 0.0005
>>> t = arange(0.0, 2.0, dt)
>>> y0 = (1,2)
>>> yout = rk4(derivs6, y0, t)
If you have access to scipy, you should probably be using the
scipy.integrate tools rather than this function.
This would then require re-adding the time variable to the signature of derivs.
Args:
derivs: the derivative of the system and has the signature ``dy = derivs(yi)``
y0: initial state vector
t: sample times
Returns:
yout: Runge-Kutta approximation of the ODE
"""
try:
Ny = len(y0)
except TypeError:
yout = np.zeros((len(t),), np.float_)
else:
yout = np.zeros((len(t), Ny), np.float_)
yout[0] = y0
for i in np.arange(len(t) - 1):
this = t[i]
dt = t[i + 1] - this
dt2 = dt / 2.0
y0 = yout[i]
k1 = np.asarray(derivs(y0))
k2 = np.asarray(derivs(y0 + dt2 * k1))
k3 = np.asarray(derivs(y0 + dt2 * k2))
k4 = np.asarray(derivs(y0 + dt * k3))
yout[i + 1] = y0 + dt / 6.0 * (k1 + 2 * k2 + 2 * k3 + k4)
# We only care about the final timestep and we cleave off action value which will be zero
return yout[-1][:4]
|
def rk4(derivs, y0, t):
"""
Integrate 1-D or N-D system of ODEs using 4-th order Runge-Kutta.
This is a toy implementation which may be useful if you find
yourself stranded on a system w/o scipy. Otherwise use
:func:`scipy.integrate`.
Example:
>>> ### 2D system
>>> def derivs(x):
... d1 = x[0] + 2*x[1]
... d2 = -3*x[0] + 4*x[1]
... return (d1, d2)
>>> dt = 0.0005
>>> t = arange(0.0, 2.0, dt)
>>> y0 = (1,2)
>>> yout = rk4(derivs6, y0, t)
If you have access to scipy, you should probably be using the
scipy.integrate tools rather than this function.
This would then require re-adding the time variable to the signature of derivs.
Args:
derivs: the derivative of the system and has the signature ``dy = derivs(yi)``
y0: initial state vector
t: sample times
Returns:
yout: Runge-Kutta approximation of the ODE
"""
try:
Ny = len(y0)
except TypeError:
yout = np.zeros((len(t),), np.float_)
else:
yout = np.zeros((len(t), Ny), np.float_)
yout[0] = y0
for i in np.arange(len(t) - 1):
this = t[i]
dt = t[i + 1] - this
dt2 = dt / 2.0
y0 = yout[i]
k1 = np.asarray(derivs(y0))
k2 = np.asarray(derivs(y0 + dt2 * k1))
k3 = np.asarray(derivs(y0 + dt2 * k2))
k4 = np.asarray(derivs(y0 + dt * k3))
yout[i + 1] = y0 + dt / 6.0 * (k1 + 2 * k2 + 2 * k3 + k4)
# We only care about the final timestep and we cleave off action value which will be zero
return yout[-1][:4]
|
2,441 |
def spectral_embedding(
adjacency,
*,
n_components=8,
eigen_solver=None,
random_state=None,
eigen_tol=0.0,
norm_laplacian=True,
drop_first=True,
):
"""Project the sample on the first eigenvectors of the graph Laplacian.
The adjacency matrix is used to compute a normalized graph Laplacian
whose spectrum (especially the eigenvectors associated to the
smallest eigenvalues) has an interpretation in terms of minimal
number of cuts necessary to split the graph into comparably sized
components.
This embedding can also 'work' even if the ``adjacency`` variable is
not strictly the adjacency matrix of a graph but more generally
an affinity or similarity matrix between samples (for instance the
heat kernel of a euclidean distance matrix or a k-NN matrix).
However care must taken to always make the affinity matrix symmetric
so that the eigenvector decomposition works as expected.
Note : Laplacian Eigenmaps is the actual algorithm implemented here.
Read more in the :ref:`User Guide <spectral_embedding>`.
Parameters
----------
adjacency : {array-like, sparse graph} of shape (n_samples, n_samples)
The adjacency matrix of the graph to embed.
n_components : int, default=8
The dimension of the projection subspace.
eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities. If None, then ``'arpack'`` is
used.
random_state : int, RandomState instance or None, default=None
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when `eigen_solver ==
'amg'`, and for the K-Means initialization. Use an int to make
the results deterministic across calls (See
:term:`Glossary <random_state>`).If None,the random number
generator is the RandomState instance used by np.random.
.. note::
When using `eigen_solver == 'amg'`,
it is necessary to also fix the global numpy seed with
`np.random.seed(int)` to get deterministic results. See
https://github.com/pyamg/pyamg/issues/139 for further
information.
eigen_tol : float, default=0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
norm_laplacian : bool, default=True
If True, then compute symmetric normalized Laplacian.
drop_first : bool, default=True
Whether to drop the first eigenvector. For spectral embedding, this
should be True as the first eigenvector should be constant vector for
connected graph, but for spectral clustering, this should be kept as
False to retain the first eigenvector.
Returns
-------
embedding : ndarray of shape (n_samples, n_components)
The reduced samples.
Notes
-----
Spectral Embedding (Laplacian Eigenmaps) is most useful when the graph
has one connected component. If there graph has many components, the first
few eigenvectors will simply uncover the connected components of the graph.
References
----------
* https://en.wikipedia.org/wiki/LOBPCG
* Toward the Optimal Preconditioned Eigensolver: Locally Optimal
Block Preconditioned Conjugate Gradient Method
Andrew V. Knyazev
https://doi.org/10.1137%2FS1064827500366124
"""
adjacency = check_symmetric(adjacency)
try:
from pyamg import smoothed_aggregation_solver
except ImportError as e:
if eigen_solver == "amg":
raise ValueError(
"The eigen_solver was set to 'amg', but pyamg is not available."
) from e
if eigen_solver is None:
eigen_solver = "arpack"
elif eigen_solver not in ("arpack", "lobpcg", "amg"):
raise ValueError(
"Unknown value for eigen_solver: '%s'."
"Should be 'amg', 'arpack', or 'lobpcg'" % eigen_solver
)
random_state = check_random_state(random_state)
n_nodes = adjacency.shape[0]
# Whether to drop the first eigenvector
if drop_first:
n_components = n_components + 1
if not _graph_is_connected(adjacency):
warnings.warn(
"Graph is not fully connected, spectral embedding may not work as expected."
)
laplacian, dd = csgraph_laplacian(
adjacency, normed=norm_laplacian, return_diag=True
)
if (
eigen_solver == "arpack"
or eigen_solver != "lobpcg"
and (not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components)
):
# lobpcg used with eigen_solver='amg' has bugs for low number of nodes
# for details see the source code in scipy:
# https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen
# /lobpcg/lobpcg.py#L237
# or matlab:
# https://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m
laplacian = _set_diag(laplacian, 1, norm_laplacian)
# Here we'll use shift-invert mode for fast eigenvalues
# (see https://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
# for a short explanation of what this means)
# Because the normalized Laplacian has eigenvalues between 0 and 2,
# I - L has eigenvalues between -1 and 1. ARPACK is most efficient
# when finding eigenvalues of largest magnitude (keyword which='LM')
# and when these eigenvalues are very large compared to the rest.
# For very large, very sparse graphs, I - L can have many, many
# eigenvalues very near 1.0. This leads to slow convergence. So
# instead, we'll use ARPACK's shift-invert mode, asking for the
# eigenvalues near 1.0. This effectively spreads-out the spectrum
# near 1.0 and leads to much faster convergence: potentially an
# orders-of-magnitude speedup over simply using keyword which='LA'
# in standard mode.
try:
# We are computing the opposite of the laplacian inplace so as
# to spare a memory allocation of a possibly very large array
laplacian *= -1
v0 = _init_arpack_v0(laplacian.shape[0], random_state)
_, diffusion_map = eigsh(
laplacian, k=n_components, sigma=1.0, which="LM", tol=eigen_tol, v0=v0
)
embedding = diffusion_map.T[n_components::-1]
if norm_laplacian:
# recover u = D^-1/2 x from the eigenvector output x
embedding = embedding / dd
except RuntimeError:
# When submatrices are exactly singular, an LU decomposition
# in arpack fails. We fallback to lobpcg
eigen_solver = "lobpcg"
# Revert the laplacian to its opposite to have lobpcg work
laplacian *= -1
elif eigen_solver == "amg":
# Use AMG to get a preconditioner and speed up the eigenvalue
# problem.
if not sparse.issparse(laplacian):
warnings.warn("AMG works better for sparse matrices")
# lobpcg needs double precision floats
laplacian = check_array(laplacian, dtype=np.float64, accept_sparse=True)
laplacian = _set_diag(laplacian, 1, norm_laplacian)
# The Laplacian matrix is always singular, having at least one zero
# eigenvalue, corresponding to the trivial eigenvector, which is a
# constant. Using a singular matrix for preconditioning may result in
# random failures in LOBPCG and is not supported by the existing
# theory:
# see https://doi.org/10.1007/s10208-015-9297-1
# Shift the Laplacian so its diagononal is not all ones. The shift
# does change the eigenpairs however, so we'll feed the shifted
# matrix to the solver and afterward set it back to the original.
diag_shift = 1e-5 * sparse.eye(laplacian.shape[0])
laplacian += diag_shift
ml = smoothed_aggregation_solver(check_array(laplacian, accept_sparse="csr"))
laplacian -= diag_shift
M = ml.aspreconditioner()
# Create initial approximation X to eigenvectors
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
_, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.0e-5, largest=False)
embedding = diffusion_map.T
if norm_laplacian:
# recover u = D^-1/2 x from the eigenvector output x
embedding = embedding / dd
if embedding.shape[0] == 1:
raise ValueError
if eigen_solver == "lobpcg":
# lobpcg needs double precision floats
laplacian = check_array(laplacian, dtype=np.float64, accept_sparse=True)
if n_nodes < 5 * n_components + 1:
# see note above under arpack why lobpcg has problems with small
# number of nodes
# lobpcg will fallback to eigh, so we short circuit it
if sparse.isspmatrix(laplacian):
laplacian = laplacian.toarray()
_, diffusion_map = eigh(laplacian, check_finite=False)
embedding = diffusion_map.T[:n_components]
if norm_laplacian:
# recover u = D^-1/2 x from the eigenvector output x
embedding = embedding / dd
else:
laplacian = _set_diag(laplacian, 1, norm_laplacian)
# We increase the number of eigenvectors requested, as lobpcg
# doesn't behave well in low dimension and create initial
# approximation X to eigenvectors
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
_, diffusion_map = lobpcg(
laplacian, X, tol=1e-5, largest=False, maxiter=2000
)
embedding = diffusion_map.T[:n_components]
if norm_laplacian:
# recover u = D^-1/2 x from the eigenvector output x
embedding = embedding / dd
if embedding.shape[0] == 1:
raise ValueError
embedding = _deterministic_vector_sign_flip(embedding)
if drop_first:
return embedding[1:n_components].T
else:
return embedding[:n_components].T
|
def spectral_embedding(
adjacency,
*,
n_components=8,
eigen_solver=None,
random_state=None,
eigen_tol=0.0,
norm_laplacian=True,
drop_first=True,
):
"""Project the sample on the first eigenvectors of the graph Laplacian.
The adjacency matrix is used to compute a normalized graph Laplacian
whose spectrum (especially the eigenvectors associated to the
smallest eigenvalues) has an interpretation in terms of minimal
number of cuts necessary to split the graph into comparably sized
components.
This embedding can also 'work' even if the ``adjacency`` variable is
not strictly the adjacency matrix of a graph but more generally
an affinity or similarity matrix between samples (for instance the
heat kernel of a euclidean distance matrix or a k-NN matrix).
However care must taken to always make the affinity matrix symmetric
so that the eigenvector decomposition works as expected.
Note : Laplacian Eigenmaps is the actual algorithm implemented here.
Read more in the :ref:`User Guide <spectral_embedding>`.
Parameters
----------
adjacency : {array-like, sparse graph} of shape (n_samples, n_samples)
The adjacency matrix of the graph to embed.
n_components : int, default=8
The dimension of the projection subspace.
eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities. If None, then ``'arpack'`` is
used.
random_state : int, RandomState instance or None, default=None
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when `eigen_solver ==
'amg'`, and for the K-Means initialization. Use an int to make
the results deterministic across calls (See
:term:`Glossary <random_state>`). If None,the random number
generator is the RandomState instance used by np.random.
.. note::
When using `eigen_solver == 'amg'`,
it is necessary to also fix the global numpy seed with
`np.random.seed(int)` to get deterministic results. See
https://github.com/pyamg/pyamg/issues/139 for further
information.
eigen_tol : float, default=0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
norm_laplacian : bool, default=True
If True, then compute symmetric normalized Laplacian.
drop_first : bool, default=True
Whether to drop the first eigenvector. For spectral embedding, this
should be True as the first eigenvector should be constant vector for
connected graph, but for spectral clustering, this should be kept as
False to retain the first eigenvector.
Returns
-------
embedding : ndarray of shape (n_samples, n_components)
The reduced samples.
Notes
-----
Spectral Embedding (Laplacian Eigenmaps) is most useful when the graph
has one connected component. If there graph has many components, the first
few eigenvectors will simply uncover the connected components of the graph.
References
----------
* https://en.wikipedia.org/wiki/LOBPCG
* Toward the Optimal Preconditioned Eigensolver: Locally Optimal
Block Preconditioned Conjugate Gradient Method
Andrew V. Knyazev
https://doi.org/10.1137%2FS1064827500366124
"""
adjacency = check_symmetric(adjacency)
try:
from pyamg import smoothed_aggregation_solver
except ImportError as e:
if eigen_solver == "amg":
raise ValueError(
"The eigen_solver was set to 'amg', but pyamg is not available."
) from e
if eigen_solver is None:
eigen_solver = "arpack"
elif eigen_solver not in ("arpack", "lobpcg", "amg"):
raise ValueError(
"Unknown value for eigen_solver: '%s'."
"Should be 'amg', 'arpack', or 'lobpcg'" % eigen_solver
)
random_state = check_random_state(random_state)
n_nodes = adjacency.shape[0]
# Whether to drop the first eigenvector
if drop_first:
n_components = n_components + 1
if not _graph_is_connected(adjacency):
warnings.warn(
"Graph is not fully connected, spectral embedding may not work as expected."
)
laplacian, dd = csgraph_laplacian(
adjacency, normed=norm_laplacian, return_diag=True
)
if (
eigen_solver == "arpack"
or eigen_solver != "lobpcg"
and (not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components)
):
# lobpcg used with eigen_solver='amg' has bugs for low number of nodes
# for details see the source code in scipy:
# https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen
# /lobpcg/lobpcg.py#L237
# or matlab:
# https://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m
laplacian = _set_diag(laplacian, 1, norm_laplacian)
# Here we'll use shift-invert mode for fast eigenvalues
# (see https://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
# for a short explanation of what this means)
# Because the normalized Laplacian has eigenvalues between 0 and 2,
# I - L has eigenvalues between -1 and 1. ARPACK is most efficient
# when finding eigenvalues of largest magnitude (keyword which='LM')
# and when these eigenvalues are very large compared to the rest.
# For very large, very sparse graphs, I - L can have many, many
# eigenvalues very near 1.0. This leads to slow convergence. So
# instead, we'll use ARPACK's shift-invert mode, asking for the
# eigenvalues near 1.0. This effectively spreads-out the spectrum
# near 1.0 and leads to much faster convergence: potentially an
# orders-of-magnitude speedup over simply using keyword which='LA'
# in standard mode.
try:
# We are computing the opposite of the laplacian inplace so as
# to spare a memory allocation of a possibly very large array
laplacian *= -1
v0 = _init_arpack_v0(laplacian.shape[0], random_state)
_, diffusion_map = eigsh(
laplacian, k=n_components, sigma=1.0, which="LM", tol=eigen_tol, v0=v0
)
embedding = diffusion_map.T[n_components::-1]
if norm_laplacian:
# recover u = D^-1/2 x from the eigenvector output x
embedding = embedding / dd
except RuntimeError:
# When submatrices are exactly singular, an LU decomposition
# in arpack fails. We fallback to lobpcg
eigen_solver = "lobpcg"
# Revert the laplacian to its opposite to have lobpcg work
laplacian *= -1
elif eigen_solver == "amg":
# Use AMG to get a preconditioner and speed up the eigenvalue
# problem.
if not sparse.issparse(laplacian):
warnings.warn("AMG works better for sparse matrices")
# lobpcg needs double precision floats
laplacian = check_array(laplacian, dtype=np.float64, accept_sparse=True)
laplacian = _set_diag(laplacian, 1, norm_laplacian)
# The Laplacian matrix is always singular, having at least one zero
# eigenvalue, corresponding to the trivial eigenvector, which is a
# constant. Using a singular matrix for preconditioning may result in
# random failures in LOBPCG and is not supported by the existing
# theory:
# see https://doi.org/10.1007/s10208-015-9297-1
# Shift the Laplacian so its diagononal is not all ones. The shift
# does change the eigenpairs however, so we'll feed the shifted
# matrix to the solver and afterward set it back to the original.
diag_shift = 1e-5 * sparse.eye(laplacian.shape[0])
laplacian += diag_shift
ml = smoothed_aggregation_solver(check_array(laplacian, accept_sparse="csr"))
laplacian -= diag_shift
M = ml.aspreconditioner()
# Create initial approximation X to eigenvectors
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
_, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.0e-5, largest=False)
embedding = diffusion_map.T
if norm_laplacian:
# recover u = D^-1/2 x from the eigenvector output x
embedding = embedding / dd
if embedding.shape[0] == 1:
raise ValueError
if eigen_solver == "lobpcg":
# lobpcg needs double precision floats
laplacian = check_array(laplacian, dtype=np.float64, accept_sparse=True)
if n_nodes < 5 * n_components + 1:
# see note above under arpack why lobpcg has problems with small
# number of nodes
# lobpcg will fallback to eigh, so we short circuit it
if sparse.isspmatrix(laplacian):
laplacian = laplacian.toarray()
_, diffusion_map = eigh(laplacian, check_finite=False)
embedding = diffusion_map.T[:n_components]
if norm_laplacian:
# recover u = D^-1/2 x from the eigenvector output x
embedding = embedding / dd
else:
laplacian = _set_diag(laplacian, 1, norm_laplacian)
# We increase the number of eigenvectors requested, as lobpcg
# doesn't behave well in low dimension and create initial
# approximation X to eigenvectors
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
_, diffusion_map = lobpcg(
laplacian, X, tol=1e-5, largest=False, maxiter=2000
)
embedding = diffusion_map.T[:n_components]
if norm_laplacian:
# recover u = D^-1/2 x from the eigenvector output x
embedding = embedding / dd
if embedding.shape[0] == 1:
raise ValueError
embedding = _deterministic_vector_sign_flip(embedding)
if drop_first:
return embedding[1:n_components].T
else:
return embedding[:n_components].T
|
10,512 |
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(type='path', required=True, aliases=['dest', 'destfile', 'name']),
state=dict(type='str', default='present', choices=['absent', 'present']),
regexp=dict(type='str', aliases=['regex']),
literal=dict(type='str'),
line=dict(type='str', aliases=['value']),
insertafter=dict(type='str'),
insertbefore=dict(type='str'),
backrefs=dict(type='bool', default=False),
create=dict(type='bool', default=False),
backup=dict(type='bool', default=False),
firstmatch=dict(type='bool', default=False),
validate=dict(type='str'),
),
mutually_exclusive=[
['insertbefore', 'insertafter'], ['regexp', 'literal'], ['backrefs', 'literal']],
add_file_common_args=True,
supports_check_mode=True,
)
params = module.params
create = params['create']
backup = params['backup']
backrefs = params['backrefs']
path = params['path']
firstmatch = params['firstmatch']
regexp = params['regexp']
literal = params['literal']
line = params['line']
if regexp == '' or literal == '':
module.warn(
"The regular expression is an empty string, which will match every line in the file. "
"This may have unintended consequences, such as replacing the last line in the file rather than appending. "
"If this is desired, use '^' to match every line in the file and avoid this warning.")
b_path = to_bytes(path, errors='surrogate_or_strict')
if os.path.isdir(b_path):
module.fail_json(rc=256, msg='Path %s is a directory !' % path)
if params['state'] == 'present':
if backrefs and regexp is None:
module.fail_json(msg='regexp is required with backrefs=true')
if line is None:
module.fail_json(msg='line is required with state=present')
# Deal with the insertafter default value manually, to avoid errors
# because of the mutually_exclusive mechanism.
ins_bef, ins_aft = params['insertbefore'], params['insertafter']
if ins_bef is None and ins_aft is None:
ins_aft = 'EOF'
present(module, path, regexp, literal, line,
ins_aft, ins_bef, create, backup, backrefs, firstmatch)
else:
if regexp is None and literal is None and line is None:
module.fail_json(msg='one of line, literal or regexp is required with state=absent')
absent(module, path, regexp, literal, line, backup)
|
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(type='path', required=True, aliases=['dest', 'destfile', 'name']),
state=dict(type='str', default='present', choices=['absent', 'present']),
regexp=dict(type='str', aliases=['regex']),
literal=dict(type='str'),
line=dict(type='str', aliases=['value']),
insertafter=dict(type='str'),
insertbefore=dict(type='str'),
backrefs=dict(type='bool', default=False),
create=dict(type='bool', default=False),
backup=dict(type='bool', default=False),
firstmatch=dict(type='bool', default=False),
validate=dict(type='str'),
),
mutually_exclusive=[
['insertbefore', 'insertafter'], ['regexp', 'literal'], ['backrefs', 'literal']],
add_file_common_args=True,
supports_check_mode=True,
)
params = module.params
create = params['create']
backup = params['backup']
backrefs = params['backrefs']
path = params['path']
firstmatch = params['firstmatch']
regexp = params['regexp']
literal = params['literal']
line = params['line']
if regexp == '' or literal == '':
module.warn(
"The regular expression is an empty string, which will match every line in the file. "
"This may have unintended consequences, such as replacing the last line in the file rather than appending. "
"If this is desired, use '^' to match every line in the file and avoid this warning.")
b_path = to_bytes(path, errors='surrogate_or_strict')
if os.path.isdir(b_path):
module.fail_json(rc=256, msg='Path %s is a directory !' % path)
if params['state'] == 'present':
if backrefs and regexp is None:
module.fail_json(msg='regexp is required with backrefs=true')
if line is None:
module.fail_json(msg='line is required with state=present')
# Deal with the insertafter default value manually, to avoid errors
# because of the mutually_exclusive mechanism.
ins_bef, ins_aft = params['insertbefore'], params['insertafter']
if ins_bef is None and ins_aft is None:
ins_aft = 'EOF'
present(module, path, regexp, literal, line,
ins_aft, ins_bef, create, backup, backrefs, firstmatch)
else:
if (regexp, literal, line) == (None, None, None):
module.fail_json(msg='one of line, literal or regexp is required with state=absent')
absent(module, path, regexp, literal, line, backup)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.