id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
1,187 | def _parameterization_dir(param,maxlen):
"""
Returns the directory name for the given parameterization string as follows:
- If the parameterization is longer than maxlen characters, then
return the SHA-1 hex digest.
- Otherwise, return the parameterization unchanged.
"""
if len(param) > maxlen:
return sha1(param.encode()).hexdigest()
return param
| def _parameterization_dir(param, maxlen):
"""
Returns the directory name for the given parameterization string as follows:
- If the parameterization is longer than maxlen characters, then
return the SHA-1 hex digest.
- Otherwise, return the parameterization unchanged.
"""
if len(param) > maxlen:
return sha1(param.encode()).hexdigest()
return param
|
58,042 | def install_software(topology: Topology, version: str,
device_filter_string: str = None, sync: bool = False) -> InstallSoftwareCommandResult:
"""
Install the given software version onto the device. Download the software first with
pan-os-platform-download-software
:param topology: `Topology` instance !no-auto-argument
:param device_filter_string: String to filter to only install to specific devices or serial numbers
:param version: software version to upgrade to, ex. 9.1.2
:param sync: If provided, runs the download synchronously - make sure 'execution-timeout' is increased.
"""
if sync == "false":
sync = False
result: InstallSoftwareCommandResult = UniversalCommand.install_software(topology, version,
device_filter_str=device_filter_string,
sync=sync)
return result
| def install_software(topology: Topology, version: str,
device_filter_string: str = None, sync: bool = False) -> InstallSoftwareCommandResult:
"""
Install the given software version onto the device. Download the software first with
pan-os-platform-download-software
:param topology: `Topology` instance !no-auto-argument
:param device_filter_string: String to filter to only install to specific devices or serial numbers
:param version: software version to upgrade to, ex. 9.1.2
:param sync: If provided, runs the download synchronously - make sure 'execution-timeout' is increased.
"""
if sync == "false":
sync = False
return UniversalCommand.install_software(topology, version,
device_filter_str=device_filter_string,
sync=_sync)
|
30,409 | def check_base_branch(pr_num):
print_color('Starting to fetch the base branch of PR num {}'.format(pr_num), LOG_COLORS.GREEN)
base_branch = get_base_branch(pr_num)
print_color('Finished to fetch the base branch of PR num {}'.format(pr_num), LOG_COLORS.GREEN)
if base_branch == 'master':
print_error("You cannot merge into master when creating an external PR.")
sys.exit(1)
else:
print_color('Base branch of PR num {} is not master - Great!'.format(pr_num), LOG_COLORS.GREEN)
sys.exit(0)
| def check_base_branch(pr_num):
print_color(f'Fetching the base branch of pull request #{pr_num}', LOG_COLORS.NATIVE)
base_branch = get_base_branch(pr_num)
print_color('Finished to fetch the base branch of PR num {}'.format(pr_num), LOG_COLORS.GREEN)
if base_branch == 'master':
print_error("You cannot merge into master when creating an external PR.")
sys.exit(1)
else:
print_color('Base branch of PR num {} is not master - Great!'.format(pr_num), LOG_COLORS.GREEN)
sys.exit(0)
|
13,893 | def main(args=None):
parser = create_argument_parser()
cli_options = parser.parse_args(args=args)
# load the config
cfg_name = find_config_name(cli_options)
cfg_options = {}
if cfg_name is not None:
with io.open(cfg_name, encoding='UTF-8') as cfg_file:
cfg_options = parse_config_into_dict(
parse_config_file(cfg_file, filename=cfg_name))
options_dict = merge_options_and_set_defaults(
[cfg_options, cli_options.__dict__])
options = Options(**options_dict)
configure_logging(logger, logging.DEBUG if options.verbose else logging.INFO)
if cli_options.version:
logger.info(
"gcovr {version}\n"
"\n"
"{copyright}",
version=__version__, copyright=COPYRIGHT)
sys.exit(0)
if options.html_title == '':
logger.error(
"an empty --html_title= is not allowed.")
sys.exit(1)
if options.html_medium_threshold == 0:
logger.error(
"value of --html-medium-threshold= should not be zero.")
sys.exit(1)
if options.html_medium_threshold > options.html_high_threshold:
logger.error(
"value of --html-medium-threshold={} should be\n"
"lower than or equal to the value of --html-high-threshold={}.",
options.html_medium_threshold, options.html_high_threshold)
sys.exit(1)
if options.html_tab_size < 1:
logger.error(
"value of --html-tab-size= should be greater 0.")
sys.exit(1)
potential_html_output = (
(options.html and options.html.value)
or (options.html_details and options.html_details.value)
or (options.output and options.output.value))
if options.html_details and not potential_html_output:
logger.error(
"a named output must be given, if the option --html-details\n"
"is used.")
sys.exit(1)
if options.html_self_contained is False and not potential_html_output:
logger.error(
"can only disable --html-self-contained when a named output is given.")
sys.exit(1)
if options.objdir is not None:
if not options.objdir:
logger.error(
"empty --object-directory option.\n"
"\tThis option specifies the path to the object file "
"directory of your project.\n"
"\tThis option cannot be an empty string.")
sys.exit(1)
tmp = options.objdir.replace('/', os.sep).replace('\\', os.sep)
while os.sep + os.sep in tmp:
tmp = tmp.replace(os.sep + os.sep, os.sep)
if normpath(options.objdir) != tmp:
logger.warning(
"relative referencing in --object-directory.\n"
"\tthis could cause strange errors when gcovr attempts to\n"
"\tidentify the original gcc working directory.")
if not os.path.exists(normpath(options.objdir)):
logger.error(
"Bad --object-directory option.\n"
"\tThe specified directory does not exist.")
sys.exit(1)
options.starting_dir = os.path.abspath(os.getcwd())
if not options.root:
logger.error(
"empty --root option.\n"
"\tRoot specifies the path to the root "
"directory of your project.\n"
"\tThis option cannot be an empty string.")
sys.exit(1)
options.root_dir = os.path.abspath(options.root)
#
# Setup filters
#
# The root filter isn't technically a filter,
# but is used to turn absolute paths into relative paths
options.root_filter = re.compile('^' + re.escape(options.root_dir + os.sep))
if options.exclude_dirs is not None:
options.exclude_dirs = [
f.build_filter() for f in options.exclude_dirs]
options.exclude = [f.build_filter() for f in options.exclude]
options.filter = [f.build_filter() for f in options.filter]
if not options.filter:
options.filter = [DirectoryPrefixFilter(options.root_dir)]
options.gcov_exclude = [
f.build_filter() for f in options.gcov_exclude]
options.gcov_filter = [f.build_filter() for f in options.gcov_filter]
if not options.gcov_filter:
options.gcov_filter = [AlwaysMatchFilter()]
# Output the filters for debugging
for name, filters in [
('--root', [options.root_filter]),
('--filter', options.filter),
('--exclude', options.exclude),
('--gcov-filter', options.gcov_filter),
('--gcov-exclude', options.gcov_exclude),
('--exclude-directories', options.exclude_dirs),
]:
logger.debug(f'Filters for {name}: ({len(filters)})')
for f in filters:
logger.debug(f'- {f}')
if options.exclude_lines_by_pattern:
try:
re.compile(options.exclude_lines_by_pattern)
except re.error as e:
logger.error(
"--exclude-lines-by-pattern: "
"Invalid regular expression: {}, error: {}",
repr(options.exclude_lines_by_pattern), e)
sys.exit(1)
covdata = dict()
if options.add_tracefile:
collect_coverage_from_tracefiles(covdata, options)
else:
collect_coverage_from_gcov(covdata, options)
logger.debug(f"Gathered coveraged data for {len(covdata)} files")
# Print reports
error_occurred = print_reports(covdata, options)
if error_occurred:
logger.error(
"Error occurred while printing reports"
)
sys.exit(7)
if options.fail_under_line > 0.0 or options.fail_under_branch > 0.0:
fail_under(covdata, options.fail_under_line, options.fail_under_branch)
| def main(args=None):
parser = create_argument_parser()
cli_options = parser.parse_args(args=args)
# load the config
cfg_name = find_config_name(cli_options)
cfg_options = {}
if cfg_name is not None:
with io.open(cfg_name, encoding='UTF-8') as cfg_file:
cfg_options = parse_config_into_dict(
parse_config_file(cfg_file, filename=cfg_name))
options_dict = merge_options_and_set_defaults(
[cfg_options, cli_options.__dict__])
options = Options(**options_dict)
configure_logging(logger, logging.DEBUG if options.verbose else logging.INFO)
if cli_options.version:
logger.info(
"gcovr {version}\n"
"\n"
"{copyright}",
version=__version__, copyright=COPYRIGHT)
sys.exit(0)
if options.html_title == '':
logger.error(
"an empty --html_title= is not allowed.")
sys.exit(1)
if options.html_medium_threshold == 0:
logger.error(
"value of --html-medium-threshold= should not be zero.")
sys.exit(1)
if options.html_medium_threshold > options.html_high_threshold:
logger.error(
"value of --html-medium-threshold={} should be\n"
"lower than or equal to the value of --html-high-threshold={}.",
options.html_medium_threshold, options.html_high_threshold)
sys.exit(1)
if options.html_tab_size < 1:
logger.error(
"value of --html-tab-size= should be greater 0.")
sys.exit(1)
potential_html_output = (
(options.html and options.html.value)
or (options.html_details and options.html_details.value)
or (options.output and options.output.value))
if options.html_details and not potential_html_output:
logger.error(
"a named output must be given, if the option --html-details\n"
"is used.")
sys.exit(1)
if options.html_self_contained is False and not potential_html_output:
logger.error(
"can only disable --html-self-contained when a named output is given.")
sys.exit(1)
if options.objdir is not None:
if not options.objdir:
logger.error(
"empty --object-directory option.\n"
"\tThis option specifies the path to the object file "
"directory of your project.\n"
"\tThis option cannot be an empty string.")
sys.exit(1)
tmp = options.objdir.replace('/', os.sep).replace('\\', os.sep)
while os.sep + os.sep in tmp:
tmp = tmp.replace(os.sep + os.sep, os.sep)
if normpath(options.objdir) != tmp:
logger.warning(
"relative referencing in --object-directory.\n"
"\tthis could cause strange errors when gcovr attempts to\n"
"\tidentify the original gcc working directory.")
if not os.path.exists(normpath(options.objdir)):
logger.error(
"Bad --object-directory option.\n"
"\tThe specified directory does not exist.")
sys.exit(1)
options.starting_dir = os.path.abspath(os.getcwd())
if not options.root:
logger.error(
"empty --root option.\n"
"\tRoot specifies the path to the root "
"directory of your project.\n"
"\tThis option cannot be an empty string.")
sys.exit(1)
options.root_dir = os.path.abspath(options.root)
#
# Setup filters
#
# The root filter isn't technically a filter,
# but is used to turn absolute paths into relative paths
options.root_filter = re.compile('^' + re.escape(options.root_dir + os.sep))
if options.exclude_dirs is not None:
options.exclude_dirs = [
f.build_filter() for f in options.exclude_dirs]
options.exclude = [f.build_filter() for f in options.exclude]
options.filter = [f.build_filter() for f in options.filter]
if not options.filter:
options.filter = [DirectoryPrefixFilter(options.root_dir)]
options.gcov_exclude = [
f.build_filter() for f in options.gcov_exclude]
options.gcov_filter = [f.build_filter() for f in options.gcov_filter]
if not options.gcov_filter:
options.gcov_filter = [AlwaysMatchFilter()]
# Output the filters for debugging
for name, filters in [
('--root', [options.root_filter]),
('--filter', options.filter),
('--exclude', options.exclude),
('--gcov-filter', options.gcov_filter),
('--gcov-exclude', options.gcov_exclude),
('--exclude-directories', options.exclude_dirs),
]:
logger.debug(f'Filters for {name}: ({len(filters)})')
for f in filters:
logger.debug(f" - {f}")
if options.exclude_lines_by_pattern:
try:
re.compile(options.exclude_lines_by_pattern)
except re.error as e:
logger.error(
"--exclude-lines-by-pattern: "
"Invalid regular expression: {}, error: {}",
repr(options.exclude_lines_by_pattern), e)
sys.exit(1)
covdata = dict()
if options.add_tracefile:
collect_coverage_from_tracefiles(covdata, options)
else:
collect_coverage_from_gcov(covdata, options)
logger.debug(f"Gathered coveraged data for {len(covdata)} files")
# Print reports
error_occurred = print_reports(covdata, options)
if error_occurred:
logger.error(
"Error occurred while printing reports"
)
sys.exit(7)
if options.fail_under_line > 0.0 or options.fail_under_branch > 0.0:
fail_under(covdata, options.fail_under_line, options.fail_under_branch)
|
32,331 | def saas_security_get_events_command(client: Client, args: Dict) -> Union[str, CommandResults]:
"""
Fetches events from the saas-security queue and return them to the war-room.
in case should_push_events is set to True, they will be also sent to XSIAM.
"""
limit = arg_to_number(args.get('limit')) or 100
validate_limit(limit=limit)
should_push_events = argToBoolean(args.get('should_push_events'))
if events := fetch_events(client=client, max_fetch=limit):
if should_push_events:
send_events_to_xsiam(events=events, vendor='PaloAltoNetworks', product='SaasSecurity')
return CommandResults(
readable_output=tableToMarkdown(
'SaaS Security Logs',
events,
headers=['log_type', 'item_type', 'item_name', 'timestamp', 'serial'],
headerTransform=underscoreToCamelCase,
removeNull=True
),
raw_response=events,
outputs=events,
outputs_key_field=['timestamp', 'log_type, item_name, item_type'],
outputs_prefix='SaasSecurity.Event'
)
return 'No events were found.'
| def saas_security_get_events_command(client: Client, args: Dict) -> Union[str, CommandResults]:
"""
Fetches events from the saas-security queue and return them to the war-room.
in case should_push_events is set to True, they will be also sent to XSIAM.
"""
limit = arg_to_number(args.get('limit')) or 100
validate_limit(limit)
should_push_events = argToBoolean(args.get('should_push_events'))
if events := fetch_events(client=client, max_fetch=limit):
if should_push_events:
send_events_to_xsiam(events=events, vendor='PaloAltoNetworks', product='SaasSecurity')
return CommandResults(
readable_output=tableToMarkdown(
'SaaS Security Logs',
events,
headers=['log_type', 'item_type', 'item_name', 'timestamp', 'serial'],
headerTransform=underscoreToCamelCase,
removeNull=True
),
raw_response=events,
outputs=events,
outputs_key_field=['timestamp', 'log_type, item_name, item_type'],
outputs_prefix='SaasSecurity.Event'
)
return 'No events were found.'
|
4,131 | def approx_pi(n: cython.int=10000000):
val: cython.double = 0.
k: cython.int
for k in range(1, n + 1):
val += recip_square(k)
return (6 * val) ** .5
| def approx_pi(n: cython.int=10000000):
val: float = 0.
k: cython.int
for k in range(1, n + 1):
val += recip_square(k)
return (6 * val) ** .5
|
56,838 | def _get_account_confirmation_template_params(commcare_user, message_token, url_name):
url = absolute_reverse(url_name, args=[commcare_user.domain, message_token])
name = ""
if commcare_user.first_name or commcare_user.last_name:
name = " ".join([commcare_user.first_name, commcare_user.last_name])
return {
'name': name,
'domain': commcare_user.domain,
'username': commcare_user.raw_username,
'url': url,
'url_prefix': get_static_url_prefix(),
'hq_name': commcare_hq_names()['commcare_hq_names']['COMMCARE_HQ_NAME']
}
| def _get_account_confirmation_template_params(commcare_user, message_token, url_name):
url = absolute_reverse(url_name, args=[commcare_user.domain, message_token])
return {
'name': commcare_user.full_name,
'domain': commcare_user.domain,
'username': commcare_user.raw_username,
'url': url,
'url_prefix': get_static_url_prefix(),
'hq_name': commcare_hq_names()['commcare_hq_names']['COMMCARE_HQ_NAME']
}
|
46,398 | def which(
command: str, *, return_bool: bool = False, raise_error: bool = False, raise_msg: str = None, env: str = None
) -> Union[bool, None, str]:
r"""Test to see if a command is available.
Returns
-------
str or None
By default, returns command path if command found or `None` if not.
Environment is $PATH or `os.pathsep`-separated `env`, less any None values.
bool
When `return_bool=True`, returns whether or not found.
Raises
------
ModuleNotFoundError
When `raises_error=True` and command not found. Raises generic message plus any `raise_msg`.
"""
if env is None:
lenv = {"PATH": os.environ.get("PATH", "") + os.pathsep + os.path.dirname(sys.executable)}
else:
lenv = {"PATH": os.pathsep.join([os.path.abspath(x) for x in env.split(os.pathsep) if x != ""])}
lenv = {k: v for k, v in lenv.items() if v is not None}
ans = shutil.which(command, mode=os.F_OK | os.X_OK, path=lenv["PATH"])
if raise_error and ans is None:
raise ModuleNotFoundError(
f"Command '{command}' not found in envvar PATH.{' ' + raise_msg if raise_msg else ''}"
)
if return_bool:
return bool(ans)
else:
return ans
| def which(
command: str, *, return_bool: bool = False, raise_error: bool = False, raise_msg: str = None, env: str = None
) -> Union[bool, None, str]:
r"""Test to see if a command is available.
Returns
-------
str or None
By default, returns command path if command found or `None` if not.
Environment is $PATH or `os.pathsep`-separated `env`, less any None values.
bool
When `return_bool=True`, returns whether or not found.
Raises
------
ModuleNotFoundError
When `raises_error=True` and command not found. Raises generic message plus any `raise_msg`.
"""
if env is None:
lenv = {"PATH": os.pathsep + os.environ.get("PATH", "") + os.pathsep + os.path.dirname(sys.executable)}
else:
lenv = {"PATH": os.pathsep.join([os.path.abspath(x) for x in env.split(os.pathsep) if x != ""])}
lenv = {k: v for k, v in lenv.items() if v is not None}
ans = shutil.which(command, mode=os.F_OK | os.X_OK, path=lenv["PATH"])
if raise_error and ans is None:
raise ModuleNotFoundError(
f"Command '{command}' not found in envvar PATH.{' ' + raise_msg if raise_msg else ''}"
)
if return_bool:
return bool(ans)
else:
return ans
|
7,132 | def test_guess_spatial_dimensions():
im1 = np.zeros((5, 5))
im2 = np.zeros((5, 5, 5))
im3 = np.zeros((5, 5, 3))
im4 = np.zeros((5, 5, 5, 3))
im5 = np.zeros((5,))
testing.assert_equal(guess_spatial_dimensions(im1), 2)
testing.assert_equal(guess_spatial_dimensions(im2), 3)
testing.assert_equal(guess_spatial_dimensions(im3), None)
testing.assert_equal(guess_spatial_dimensions(im4), 3)
with testing.raises(ValueError):
guess_spatial_dimensions(im5)
| def test_guess_spatial_dimensions():
im1 = np.zeros((5, 5))
im2 = np.zeros((5, 5, 5))
im3 = np.zeros((5, 5, 3))
im4 = np.zeros((5, 5, 5, 3))
im5 = np.zeros((5,))
testing.assert_equal(guess_spatial_dimensions(im1), 2)
testing.assert_equal(guess_spatial_dimensions(im2), 3)
testing.assert_equal(guess_spatial_dimensions(im3), None)
testing.assert_equal(_guess_spatial_dimensions(im4), 3)
with testing.raises(ValueError):
guess_spatial_dimensions(im5)
|
41,719 | def test_ignite_pruning_handler():
# type: () -> None
def update(engine, batch):
# type: (Engine, Iterable) -> None
pass
trainer = Engine(update)
# The pruner is activated.
study = optuna.create_study(pruner=DeterministicPruner(True))
trial = create_running_trial(study, 1.0)
handler = optuna.integration.IgnitePruningHandler(trial, 'accuracy', trainer)
with patch.object(trainer, 'state', epoch=Mock(return_value=1), metrics={'accuracy': 1}):
with pytest.raises(optuna.structs.TrialPruned):
handler(trainer)
# # The pruner is not activated.
study = optuna.create_study(pruner=DeterministicPruner(False))
trial = create_running_trial(study, 1.0)
handler = optuna.integration.IgnitePruningHandler(trial, 'accuracy', trainer)
with patch.object(trainer, 'state', epoch=Mock(return_value=1), metrics={'accuracy': 1}):
handler(trainer)
| def test_ignite_pruning_handler():
# type: () -> None
def update(engine, batch):
# type: (Engine, Iterable) -> None
pass
trainer = Engine(update)
# The pruner is activated.
study = optuna.create_study(pruner=DeterministicPruner(True))
trial = create_running_trial(study, 1.0)
handler = optuna.integration.IgnitePruningHandler(trial, 'accuracy', trainer)
with patch.object(trainer, 'state', epoch=Mock(return_value=1), metrics={'accuracy': 1}):
with pytest.raises(optuna.structs.TrialPruned):
handler(trainer)
# The pruner is not activated.
study = optuna.create_study(pruner=DeterministicPruner(False))
trial = create_running_trial(study, 1.0)
handler = optuna.integration.IgnitePruningHandler(trial, 'accuracy', trainer)
with patch.object(trainer, 'state', epoch=Mock(return_value=1), metrics={'accuracy': 1}):
handler(trainer)
|
38,877 | def evaluate_from_args(args: argparse.Namespace) -> Dict[str, Any]:
common_logging.FILE_FRIENDLY_LOGGING = args.file_friendly_logging
# Disable some of the more verbose logging statements
logging.getLogger("allennlp.common.params").disabled = True
logging.getLogger("allennlp.nn.initializers").disabled = True
logging.getLogger("allennlp.modules.token_embedders.embedding").setLevel(logging.INFO)
# Load from archive
archive = load_archive(
args.archive_file,
weights_file=args.weights_file,
cuda_device=args.cuda_device,
overrides=args.overrides,
)
config = deepcopy(archive.config)
prepare_environment(config)
model = archive.model
model.eval()
# Load the evaluation data
dataset_reader = archive.validation_dataset_reader
# split files
evaluation_data_path_list = args.input_file.split(":")
if args.output_file is not None:
output_file_list = args.output_file.split(":")
assert len(output_file_list) == len(
evaluation_data_path_list
), "number of output path must be equal number of dataset "
if args.predictions_output_file is not None:
predictions_output_file_list = args.predictions_output_file.split(":")
assert len(predictions_output_file_list) == len(
evaluation_data_path_list
), "number of predictions_output_file path must be equal number of dataset "
# output file
output_file_path = None
predictions_output_file_path = None
for index in range(len(evaluation_data_path_list)):
config = deepcopy(archive.config)
evaluation_data_path = evaluation_data_path_list[index]
if args.output_file is not None:
output_file_path = output_file_list[index]
if args.predictions_output_file is not None:
predictions_output_file_path = predictions_output_file_list[index]
logger.info("Reading evaluation data from %s", evaluation_data_path)
data_loader_params = config.get("validation_data_loader", None)
if data_loader_params is None:
data_loader_params = config.get("data_loader")
if args.batch_size:
data_loader_params["batch_size"] = args.batch_size
data_loader = DataLoader.from_params(
params=data_loader_params, reader=dataset_reader, data_path=evaluation_data_path
)
embedding_sources = (
json.loads(args.embedding_sources_mapping) if args.embedding_sources_mapping else {}
)
if args.extend_vocab:
logger.info("Vocabulary is being extended with test instances.")
model.vocab.extend_from_instances(instances=data_loader.iter_instances())
model.extend_embedder_vocab(embedding_sources)
data_loader.index_with(model.vocab)
metrics = evaluate(
model,
data_loader,
args.cuda_device,
args.batch_weight_key,
output_file=output_file_path,
predictions_output_file=predictions_output_file_path,
)
logger.info("Finished evaluating.")
return metrics
| def evaluate_from_args(args: argparse.Namespace) -> Dict[str, Any]:
common_logging.FILE_FRIENDLY_LOGGING = args.file_friendly_logging
# Disable some of the more verbose logging statements
logging.getLogger("allennlp.common.params").disabled = True
logging.getLogger("allennlp.nn.initializers").disabled = True
logging.getLogger("allennlp.modules.token_embedders.embedding").setLevel(logging.INFO)
# Load from archive
archive = load_archive(
args.archive_file,
weights_file=args.weights_file,
cuda_device=args.cuda_device,
overrides=args.overrides,
)
config = deepcopy(archive.config)
prepare_environment(config)
model = archive.model
model.eval()
# Load the evaluation data
dataset_reader = archive.validation_dataset_reader
# split files
evaluation_data_path_list = args.input_file.split(":")
if args.output_file is not None:
output_file_list = args.output_file.split(":")
assert len(output_file_list) == len(
evaluation_data_path_list
), "number of output path must be equal number of dataset "
if args.predictions_output_file is not None:
predictions_output_file_list = args.predictions_output_file.split(":")
assert len(predictions_output_file_list) == len(
evaluation_data_path_list
), "The number of `predictions_output_file` paths must be equal to the number of datasets being evaluated. "
# output file
output_file_path = None
predictions_output_file_path = None
for index in range(len(evaluation_data_path_list)):
config = deepcopy(archive.config)
evaluation_data_path = evaluation_data_path_list[index]
if args.output_file is not None:
output_file_path = output_file_list[index]
if args.predictions_output_file is not None:
predictions_output_file_path = predictions_output_file_list[index]
logger.info("Reading evaluation data from %s", evaluation_data_path)
data_loader_params = config.get("validation_data_loader", None)
if data_loader_params is None:
data_loader_params = config.get("data_loader")
if args.batch_size:
data_loader_params["batch_size"] = args.batch_size
data_loader = DataLoader.from_params(
params=data_loader_params, reader=dataset_reader, data_path=evaluation_data_path
)
embedding_sources = (
json.loads(args.embedding_sources_mapping) if args.embedding_sources_mapping else {}
)
if args.extend_vocab:
logger.info("Vocabulary is being extended with test instances.")
model.vocab.extend_from_instances(instances=data_loader.iter_instances())
model.extend_embedder_vocab(embedding_sources)
data_loader.index_with(model.vocab)
metrics = evaluate(
model,
data_loader,
args.cuda_device,
args.batch_weight_key,
output_file=output_file_path,
predictions_output_file=predictions_output_file_path,
)
logger.info("Finished evaluating.")
return metrics
|
7,545 | def get_pkg_data_path(*path, package=None):
"""Make path from source-included data directories.
Parameters
----------
*path : str
Name/location of the desired data file/directory.
May be a tuple of strings -- for ``os.path`` intelligent path joining.
package : str, optional, keyword only
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
Returns
-------
path : str
Name/location of the desired data file/directory.
"""
if package is None:
module = find_current_module(1, finddiff=['astropy.utils.data', 'contextlib'])
if module is None:
# not called from inside an astropy package. So just pass name
# through
return os.path.join(*path)
if not hasattr(module, '__package__') or not module.__package__:
# The __package__ attribute may be missing or set to None; see
# PEP-366, also astropy issue #1256
if '.' in module.__name__:
package = module.__name__.rpartition('.')[0]
else:
package = module.__name__
else:
package = module.__package__
else:
module = resolve_name(package)
rootpkgname = package.partition('.')[0]
rootpkg = resolve_name(rootpkgname)
module_path = os.path.dirname(module.__file__)
path = os.path.join(module_path, *path)
root_dir = os.path.dirname(rootpkg.__file__)
if not _is_inside(path, root_dir):
raise RuntimeError(f"attempted to get a local data file outside "
f"of the {rootpkgname} tree.")
return path
| def get_pkg_data_path(*path, package=None):
"""Make path from source-included data directories.
Parameters
----------
*path : str
Name/location of the desired data file/directory.
May be a tuple of strings for ``os.path`` joining.
package : str, optional, keyword only
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
Returns
-------
path : str
Name/location of the desired data file/directory.
"""
if package is None:
module = find_current_module(1, finddiff=['astropy.utils.data', 'contextlib'])
if module is None:
# not called from inside an astropy package. So just pass name
# through
return os.path.join(*path)
if not hasattr(module, '__package__') or not module.__package__:
# The __package__ attribute may be missing or set to None; see
# PEP-366, also astropy issue #1256
if '.' in module.__name__:
package = module.__name__.rpartition('.')[0]
else:
package = module.__name__
else:
package = module.__package__
else:
module = resolve_name(package)
rootpkgname = package.partition('.')[0]
rootpkg = resolve_name(rootpkgname)
module_path = os.path.dirname(module.__file__)
path = os.path.join(module_path, *path)
root_dir = os.path.dirname(rootpkg.__file__)
if not _is_inside(path, root_dir):
raise RuntimeError(f"attempted to get a local data file outside "
f"of the {rootpkgname} tree.")
return path
|
58,117 | def generate_dbotscore(response: Dict) -> List:
"""Creates CommandResult object based on the contents of 'response' argument
and provides DBotScore objects.
Parameters
----------
response : dict
Object returned by ANYRUN API call in 'get_report' function.
Returns
-------
List
A list of CommandResults objects.
"""
data = response.get('data', {})
analysis = data.get('analysis', {})
main_object = analysis.get('content', {}).get('mainObject', {})
submission_type = main_object.get('type')
submission_type = 'hash' if submission_type in {'file', 'download'} else submission_type
threat_text = analysis.get('scores', {}).get('verdict', {}).get('threatLevelText', '').casefold()
reputation_map = {
"shared": Common.DBotScore.NONE,
"unknown": Common.DBotScore.NONE,
"whitelisted": Common.DBotScore.GOOD,
"malicious": Common.DBotScore.BAD,
"suspicious": Common.DBotScore.SUSPICIOUS
}
returned_data = []
main_entity = None
main_entity_type = None
# Add the hash or URL first
if submission_type == 'hash':
hashes = main_object.get('hashes', {})
info = main_object.get('info', {})
file_type = info.get('file')
exif = info.get('exif', {})
main_entity = hashes.get('sha256') or hashes.get('sha1') or hashes.get('md5')
main_entity_type = FeedIndicatorType.File
dbot_score = Common.DBotScore(
indicator=hashes.get('sha256') or hashes.get('sha1') or hashes.get('md5'),
indicator_type=DBotScoreType.FILE,
integration_name='ANYRUN',
score=THREAT_TEXT_TO_DBOTSCORE.get(threat_text) or Common.DBotScore.NONE
)
returned_data.append(CommandResults(
indicator=Common.File(
dbot_score=dbot_score,
md5=hashes.get('md5'),
sha1=hashes.get('sha1'),
sha256=hashes.get('sha256'),
file_type=file_type,
associated_file_names=exif.get('OriginalFileName')
)
))
else:
main_entity = main_object.get('url')
main_entity_type = FeedIndicatorType.URL
url_outputs = {
'Data': main_object.get('url')
}
dbot_score = Common.DBotScore(
indicator=main_object.get('url'),
indicator_type=DBotScoreType.URL,
integration_name='ANYRUN',
score=THREAT_TEXT_TO_DBOTSCORE.get(threat_text) or Common.DBotScore.NONE
)
if dbot_score.score >= 2:
url_outputs['Malicious'] = {
'Vendor': 'ANYRUN',
'Description': threat_text
}
returned_data.append(CommandResults(
outputs_prefix='URL',
outputs_key_field=['Data'],
outputs=url_outputs,
indicator=Common.URL(
url=main_object.get('url'),
dbot_score=dbot_score,
)
))
# Check if network information is available in the report
if 'network' in data:
network_data = data.get('network')
# Then add all the network-related indicators - 'connections'
if 'connections' in network_data:
connections = network_data.get('connections')
for current_connection in connections:
reputation = current_connection.get('Reputation')
if reputation in reputation_map.keys():
current_dbot_score = Common.DBotScore(
indicator=current_connection.get('IP'),
indicator_type=DBotScoreType.IP,
integration_name='ANYRUN',
score=reputation_map[reputation]
)
relationships = [EntityRelationship(
name=EntityRelationship.Relationships.COMMUNICATED_WITH,
entity_a=main_entity,
entity_a_type=main_entity_type,
entity_b=current_connection.get('IP'),
entity_b_type=FeedIndicatorType.IP,
brand="ANYRUN"
)]
ip_indicator = Common.IP(
ip=current_connection.get('IP'),
asn=current_connection.get('ASN'),
port=current_connection.get('Port'),
geo_country=current_connection.get('Country'),
dbot_score=current_dbot_score,
relationships=relationships
)
if current_connection.get('IP') not in [
x.indicator.ip for x in returned_data if isinstance(x.indicator, Common.IP)
]:
returned_data.append(CommandResults(
readable_output=tableToMarkdown(
f"{current_connection.get('IP')}",
[{
"Description": f"This IP was observed after detonation of {main_entity} in ANYRUN"
}]
),
indicator=ip_indicator,
relationships=relationships
))
# Then add all the network-related indicators - 'dnsRequests'
if 'dnsRequests' in network_data:
for current_dnsRequests in network_data.get('dnsRequests'):
reputation = current_dnsRequests.get('Reputation')
if reputation in reputation_map.keys():
current_dbot_score = Common.DBotScore(
indicator=current_dnsRequests.get('Domain'),
indicator_type=DBotScoreType.DOMAIN,
integration_name='ANYRUN',
score=reputation_map[reputation]
)
relationships = [EntityRelationship(
name=EntityRelationship.Relationships.COMMUNICATED_WITH,
entity_a=main_entity,
entity_a_type=main_entity_type,
entity_b=current_dnsRequests.get('Domain'),
entity_b_type=FeedIndicatorType.Domain,
brand="ANYRUN"
)]
if "IP" in current_dnsRequests:
for ip in current_dnsRequests.get('IP', []):
relationships.append(
EntityRelationship(
name=EntityRelationship.Relationships.RESOLVES_TO,
entity_a=current_dnsRequests.get('Domain'),
entity_a_type=FeedIndicatorType.Domain,
entity_b=ip,
entity_b_type=FeedIndicatorType.IP
)
)
domain_ip_dbot_score = Common.DBotScore(
indicator=ip,
indicator_type=DBotScoreType.IP,
integration_name="ANYRUN",
score=Common.DBotScore.NONE
)
domain_ip_indicator = Common.IP(
ip=ip,
dbot_score=domain_ip_dbot_score
)
returned_data.append(CommandResults(
indicator=domain_ip_indicator,
readable_output=tableToMarkdown(
f"{ip}",
[{
"Description": f"This IP was resovled from {current_dnsRequests.get('Domain')}"
}]
)
))
domain_indicator = Common.Domain(
domain=current_dnsRequests.get('Domain'),
dbot_score=current_dbot_score,
relationships=relationships
)
if current_dnsRequests.get('Domain') not in [
x.indicator.domain for x in returned_data if isinstance(x.indicator, Common.Domain)
]:
returned_data.append(CommandResults(
readable_output=tableToMarkdown(
f"{current_dnsRequests.get('Domain')}",
[{
"Description": f"This domain was observed after detonation of {main_entity} in ANYRUN"
}]
),
indicator=domain_indicator,
relationships=relationships
))
# Then add all the network-related indicators - 'httpRequests'
if 'httpRequests' in network_data:
for current_httpRequests in network_data.get('httpRequests'):
reputation = current_httpRequests['Reputation']
if reputation in reputation_map.keys():
current_dbot_score = Common.DBotScore(
indicator=current_httpRequests.get('URL'),
indicator_type=DBotScoreType.URL,
integration_name='ANYRUN',
score=reputation_map[reputation]
)
relationships = [EntityRelationship(
name=EntityRelationship.Relationships.COMMUNICATED_WITH,
entity_a=main_entity,
entity_a_type=main_entity_type,
entity_b=current_httpRequests.get('URL'),
entity_b_type=FeedIndicatorType.URL,
brand="ANYRUN"
)]
url_indicator = Common.URL(
url=current_httpRequests.get('URL'),
geo_country=current_httpRequests.get('Country'),
port=current_httpRequests.get('Port'),
dbot_score=current_dbot_score,
relationships=relationships
)
if current_httpRequests.get('URL') not in [
x.indicator.url for x in returned_data if isinstance(x.indicator, Common.URL)
]:
returned_data.append(CommandResults(
readable_output=tableToMarkdown(
f"{current_httpRequests.get('URL')}",
[{
"Description": f"This URL was observed after detonation of {main_entity} in ANYRUN"
}]
),
indicator=url_indicator,
relationships=relationships
))
if 'mitre' in data:
mitre_data = data.get('mitre')
for item in mitre_data:
relationships = [EntityRelationship(
name=EntityRelationship.Relationships.RELATED_TO,
entity_a=main_entity,
entity_a_type=main_entity_type,
entity_b=item.get('name'),
entity_b_type='Attack Pattern'
)]
attack_indicator = Common.AttackPattern(
stix_id=None,
value=item.get('name'),
mitre_id=item.get('id')
)
returned_data.append(CommandResults(
readable_output=tableToMarkdown(
f"{item.get('name')}",
[{
"Description": f"This Attack Pattern was observed after detonation of {main_entity} in ANYRUN"
}]
),
indicator=attack_indicator,
relationships=relationships
))
return returned_data
| def generate_dbotscore(response: Dict) -> List:
"""Creates CommandResult object based on the contents of 'response' argument
and provides DBotScore objects.
Parameters
----------
response : dict
Object returned by ANYRUN API call in 'get_report' function.
Returns
-------
List
A list of CommandResults objects.
"""
data = response.get('data', {})
analysis = data.get('analysis', {})
main_object = analysis.get('content', {}).get('mainObject', {})
submission_type = main_object.get('type')
submission_type = 'hash' if submission_type in {'file', 'download'} else submission_type
threat_text = analysis.get('scores', {}).get('verdict', {}).get('threatLevelText', '').casefold()
reputation_map = {
"shared": Common.DBotScore.NONE,
"unknown": Common.DBotScore.NONE,
"whitelisted": Common.DBotScore.GOOD,
"malicious": Common.DBotScore.BAD,
"suspicious": Common.DBotScore.SUSPICIOUS
}
returned_data = []
main_entity = None
main_entity_type = None
# Add the hash or URL first
if submission_type == 'hash':
hashes = main_object.get('hashes', {})
info = main_object.get('info', {})
file_type = info.get('file')
exif = info.get('exif', {})
main_entity = hashes.get('sha256') or hashes.get('sha1') or hashes.get('md5')
main_entity_type = FeedIndicatorType.File
dbot_score = Common.DBotScore(
indicator=hashes.get('sha256') or hashes.get('sha1') or hashes.get('md5'),
indicator_type=DBotScoreType.FILE,
integration_name='ANYRUN',
score=THREAT_TEXT_TO_DBOTSCORE.get(threat_text) or Common.DBotScore.NONE,
reliability=params.get('reliability')
)
returned_data.append(CommandResults(
indicator=Common.File(
dbot_score=dbot_score,
md5=hashes.get('md5'),
sha1=hashes.get('sha1'),
sha256=hashes.get('sha256'),
file_type=file_type,
associated_file_names=exif.get('OriginalFileName')
)
))
else:
main_entity = main_object.get('url')
main_entity_type = FeedIndicatorType.URL
url_outputs = {
'Data': main_object.get('url')
}
dbot_score = Common.DBotScore(
indicator=main_object.get('url'),
indicator_type=DBotScoreType.URL,
integration_name='ANYRUN',
score=THREAT_TEXT_TO_DBOTSCORE.get(threat_text) or Common.DBotScore.NONE
)
if dbot_score.score >= 2:
url_outputs['Malicious'] = {
'Vendor': 'ANYRUN',
'Description': threat_text
}
returned_data.append(CommandResults(
outputs_prefix='URL',
outputs_key_field=['Data'],
outputs=url_outputs,
indicator=Common.URL(
url=main_object.get('url'),
dbot_score=dbot_score,
)
))
# Check if network information is available in the report
if 'network' in data:
network_data = data.get('network')
# Then add all the network-related indicators - 'connections'
if 'connections' in network_data:
connections = network_data.get('connections')
for current_connection in connections:
reputation = current_connection.get('Reputation')
if reputation in reputation_map.keys():
current_dbot_score = Common.DBotScore(
indicator=current_connection.get('IP'),
indicator_type=DBotScoreType.IP,
integration_name='ANYRUN',
score=reputation_map[reputation]
)
relationships = [EntityRelationship(
name=EntityRelationship.Relationships.COMMUNICATED_WITH,
entity_a=main_entity,
entity_a_type=main_entity_type,
entity_b=current_connection.get('IP'),
entity_b_type=FeedIndicatorType.IP,
brand="ANYRUN"
)]
ip_indicator = Common.IP(
ip=current_connection.get('IP'),
asn=current_connection.get('ASN'),
port=current_connection.get('Port'),
geo_country=current_connection.get('Country'),
dbot_score=current_dbot_score,
relationships=relationships
)
if current_connection.get('IP') not in [
x.indicator.ip for x in returned_data if isinstance(x.indicator, Common.IP)
]:
returned_data.append(CommandResults(
readable_output=tableToMarkdown(
f"{current_connection.get('IP')}",
[{
"Description": f"This IP was observed after detonation of {main_entity} in ANYRUN"
}]
),
indicator=ip_indicator,
relationships=relationships
))
# Then add all the network-related indicators - 'dnsRequests'
if 'dnsRequests' in network_data:
for current_dnsRequests in network_data.get('dnsRequests'):
reputation = current_dnsRequests.get('Reputation')
if reputation in reputation_map.keys():
current_dbot_score = Common.DBotScore(
indicator=current_dnsRequests.get('Domain'),
indicator_type=DBotScoreType.DOMAIN,
integration_name='ANYRUN',
score=reputation_map[reputation]
)
relationships = [EntityRelationship(
name=EntityRelationship.Relationships.COMMUNICATED_WITH,
entity_a=main_entity,
entity_a_type=main_entity_type,
entity_b=current_dnsRequests.get('Domain'),
entity_b_type=FeedIndicatorType.Domain,
brand="ANYRUN"
)]
if "IP" in current_dnsRequests:
for ip in current_dnsRequests.get('IP', []):
relationships.append(
EntityRelationship(
name=EntityRelationship.Relationships.RESOLVES_TO,
entity_a=current_dnsRequests.get('Domain'),
entity_a_type=FeedIndicatorType.Domain,
entity_b=ip,
entity_b_type=FeedIndicatorType.IP
)
)
domain_ip_dbot_score = Common.DBotScore(
indicator=ip,
indicator_type=DBotScoreType.IP,
integration_name="ANYRUN",
score=Common.DBotScore.NONE
)
domain_ip_indicator = Common.IP(
ip=ip,
dbot_score=domain_ip_dbot_score
)
returned_data.append(CommandResults(
indicator=domain_ip_indicator,
readable_output=tableToMarkdown(
f"{ip}",
[{
"Description": f"This IP was resovled from {current_dnsRequests.get('Domain')}"
}]
)
))
domain_indicator = Common.Domain(
domain=current_dnsRequests.get('Domain'),
dbot_score=current_dbot_score,
relationships=relationships
)
if current_dnsRequests.get('Domain') not in [
x.indicator.domain for x in returned_data if isinstance(x.indicator, Common.Domain)
]:
returned_data.append(CommandResults(
readable_output=tableToMarkdown(
f"{current_dnsRequests.get('Domain')}",
[{
"Description": f"This domain was observed after detonation of {main_entity} in ANYRUN"
}]
),
indicator=domain_indicator,
relationships=relationships
))
# Then add all the network-related indicators - 'httpRequests'
if 'httpRequests' in network_data:
for current_httpRequests in network_data.get('httpRequests'):
reputation = current_httpRequests['Reputation']
if reputation in reputation_map.keys():
current_dbot_score = Common.DBotScore(
indicator=current_httpRequests.get('URL'),
indicator_type=DBotScoreType.URL,
integration_name='ANYRUN',
score=reputation_map[reputation]
)
relationships = [EntityRelationship(
name=EntityRelationship.Relationships.COMMUNICATED_WITH,
entity_a=main_entity,
entity_a_type=main_entity_type,
entity_b=current_httpRequests.get('URL'),
entity_b_type=FeedIndicatorType.URL,
brand="ANYRUN"
)]
url_indicator = Common.URL(
url=current_httpRequests.get('URL'),
geo_country=current_httpRequests.get('Country'),
port=current_httpRequests.get('Port'),
dbot_score=current_dbot_score,
relationships=relationships
)
if current_httpRequests.get('URL') not in [
x.indicator.url for x in returned_data if isinstance(x.indicator, Common.URL)
]:
returned_data.append(CommandResults(
readable_output=tableToMarkdown(
f"{current_httpRequests.get('URL')}",
[{
"Description": f"This URL was observed after detonation of {main_entity} in ANYRUN"
}]
),
indicator=url_indicator,
relationships=relationships
))
if 'mitre' in data:
mitre_data = data.get('mitre')
for item in mitre_data:
relationships = [EntityRelationship(
name=EntityRelationship.Relationships.RELATED_TO,
entity_a=main_entity,
entity_a_type=main_entity_type,
entity_b=item.get('name'),
entity_b_type='Attack Pattern'
)]
attack_indicator = Common.AttackPattern(
stix_id=None,
value=item.get('name'),
mitre_id=item.get('id')
)
returned_data.append(CommandResults(
readable_output=tableToMarkdown(
f"{item.get('name')}",
[{
"Description": f"This Attack Pattern was observed after detonation of {main_entity} in ANYRUN"
}]
),
indicator=attack_indicator,
relationships=relationships
))
return returned_data
|
34,508 | def _write_core_yaml(
training_data_path: Path, output_path: Path, source_path: Path
) -> None:
from rasa.core.training.story_reader.yaml_story_reader import KEY_ACTIVE_LOOP
reader = MarkdownStoryReader()
writer = YAMLStoryWriter()
loop = asyncio.get_event_loop()
steps = loop.run_until_complete(reader.read_from_file(training_data_path))
if YAMLStoryWriter.stories_contain_loops(steps):
print_warning(
f"Training data file '{source_path}' contains forms. "
f"'form' key will be converted to '{KEY_ACTIVE_LOOP}' key. "
f"Please note that in order for these stories to work you still "
f"need the 'FormPolicy' to be active. However the 'FormPolicy' is "
f"deprecated, please consider switching to the new 'RulePolicy', "
f"you can find the documentation here: {DOCS_URL_RULES}."
)
writer.dump(output_path, steps)
print_success(f"Converted Core file: '{source_path}' >> '{output_path}'.")
| def _write_core_yaml(
training_data_path: Path, output_path: Path, source_path: Path
) -> None:
from rasa.core.training.story_reader.yaml_story_reader import KEY_ACTIVE_LOOP
reader = MarkdownStoryReader()
writer = YAMLStoryWriter()
loop = asyncio.get_event_loop()
steps = loop.run_until_complete(reader.read_from_file(training_data_path))
if YAMLStoryWriter.stories_contain_loops(steps):
print_warning(
f"Training data file '{source_path}' contains forms. "
f"Any 'form' events will be converted to '{KEY_ACTIVE_LOOP}' events. "
f"Please note that in order for these stories to work you still "
f"need the 'FormPolicy' to be active. However the 'FormPolicy' is "
f"deprecated, please consider switching to the new 'RulePolicy', "
f"you can find the documentation here: {DOCS_URL_RULES}."
)
writer.dump(output_path, steps)
print_success(f"Converted Core file: '{source_path}' >> '{output_path}'.")
|
51,414 | def makeARGB(data, lut=None, levels=None, scale=None, useRGBA=False):
"""
Convert an array of values into an ARGB array suitable for building QImages,
OpenGL textures, etc.
Returns the ARGB array (unsigned byte) and a boolean indicating whether
there is alpha channel data. This is a two stage process:
1) Rescale the data based on the values in the *levels* argument (min, max).
2) Determine the final output by passing the rescaled values through a
lookup table.
Both stages are optional.
============== ==================================================================================
**Arguments:**
data numpy array of int/float types. If
levels List [min, max]; optionally rescale data before converting through the
lookup table. The data is rescaled such that min->0 and max->*scale*::
rescaled = (clip(data, min, max) - min) * (*scale* / (max - min))
It is also possible to use a 2D (N,2) array of values for levels. In this case,
it is assumed that each pair of min,max values in the levels array should be
applied to a different subset of the input data (for example, the input data may
already have RGB values and the levels are used to independently scale each
channel). The use of this feature requires that levels.shape[0] == data.shape[-1].
scale The maximum value to which data will be rescaled before being passed through the
lookup table (or returned if there is no lookup table). By default this will
be set to the length of the lookup table, or 255 if no lookup table is provided.
lut Optional lookup table (array with dtype=ubyte).
Values in data will be converted to color by indexing directly from lut.
The output data shape will be input.shape + lut.shape[1:].
Lookup tables can be built using ColorMap or GradientWidget.
useRGBA If True, the data is returned in RGBA order (useful for building OpenGL textures).
The default is False, which returns in ARGB order for use with QImage
(Note that 'ARGB' is a term used by the Qt documentation; the *actual* order
is BGRA).
============== ==================================================================================
"""
profile = debug.Profiler()
if data.ndim not in (2, 3):
raise TypeError("data must be 2D or 3D")
if data.ndim == 3 and data.shape[2] > 4:
raise TypeError("data.shape[2] must be <= 4")
if lut is not None and not isinstance(lut, np.ndarray):
lut = np.array(lut)
if levels is None:
# automatically decide levels based on data dtype
if data.dtype.kind == 'u':
levels = np.array([0, 2**(data.itemsize*8)-1])
elif data.dtype.kind == 'i':
s = 2**(data.itemsize*8 - 1)
levels = np.array([-s, s-1])
elif data.dtype.kind == 'b':
levels = np.array([0,1])
else:
raise Exception('levels argument is required for float input types')
if not isinstance(levels, np.ndarray):
levels = np.array(levels)
levels = levels.astype(np.float)
if levels.ndim == 1:
if levels.shape[0] != 2:
raise Exception('levels argument must have length 2')
elif levels.ndim == 2:
if lut is not None and lut.ndim > 1:
raise Exception('Cannot make ARGB data when both levels and lut have ndim > 2')
if levels.shape != (data.shape[-1], 2):
raise Exception('levels must have shape (data.shape[-1], 2)')
else:
raise Exception("levels argument must be 1D or 2D (got shape=%s)." % repr(levels.shape))
profile()
# Decide on maximum scaled value
if scale is None:
if lut is not None:
scale = lut.shape[0]
else:
scale = 255.
# Decide on the dtype we want after scaling
if lut is None:
dtype = np.ubyte
else:
dtype = np.min_scalar_type(lut.shape[0]-1)
# awkward, but fastest numpy native nan evaluation
#
nanMask = None
if data.ndim == 2 and data.dtype.kind == 'f' and np.isnan(data.min()):
nanMask = np.isnan(data)
# Apply levels if given
if levels is not None:
if isinstance(levels, np.ndarray) and levels.ndim == 2:
# we are going to rescale each channel independently
if levels.shape[0] != data.shape[-1]:
raise Exception("When rescaling multi-channel data, there must be the same number of levels as channels (data.shape[-1] == levels.shape[0])")
newData = np.empty(data.shape, dtype=int)
for i in range(data.shape[-1]):
minVal, maxVal = levels[i]
if minVal == maxVal:
maxVal = np.nextafter(maxVal, 2*maxVal)
rng = maxVal-minVal
rng = 1 if rng == 0 else rng
newData[...,i] = rescaleData(data[...,i], scale / rng, minVal, dtype=dtype)
data = newData
else:
# Apply level scaling unless it would have no effect on the data
minVal, maxVal = levels
if minVal != 0 or maxVal != scale:
if minVal == maxVal:
maxVal = np.nextafter(maxVal, 2*maxVal)
data = rescaleData(data, scale/(maxVal-minVal), minVal, dtype=dtype)
profile()
# apply LUT if given
if lut is not None:
data = applyLookupTable(data, lut)
else:
if data.dtype is not np.ubyte:
data = np.clip(data, 0, 255).astype(np.ubyte)
profile()
# this will be the final image array
imgData = np.empty(data.shape[:2]+(4,), dtype=np.ubyte)
profile()
# decide channel order
if useRGBA:
order = [0,1,2,3] # array comes out RGBA
else:
order = [2,1,0,3] # for some reason, the colors line up as BGR in the final image.
# copy data into image array
if data.ndim == 2:
# This is tempting:
# imgData[..., :3] = data[..., np.newaxis]
# ..but it turns out this is faster:
for i in range(3):
imgData[..., i] = data
elif data.shape[2] == 1:
for i in range(3):
imgData[..., i] = data[..., 0]
else:
for i in range(0, data.shape[2]):
imgData[..., i] = data[..., order[i]]
profile()
# add opaque alpha channel if needed
if data.ndim == 2 or data.shape[2] == 3:
alpha = False
imgData[..., 3] = 255
else:
alpha = True
# apply nan mask through alpha channel
if nanMask is not None:
alpha = True
imgData[nanMask, 3] = 0
profile()
return imgData, alpha
| def makeARGB(data, lut=None, levels=None, scale=None, useRGBA=False):
"""
Convert an array of values into an ARGB array suitable for building QImages,
OpenGL textures, etc.
Returns the ARGB array (unsigned byte) and a boolean indicating whether
there is alpha channel data. This is a two stage process:
1) Rescale the data based on the values in the *levels* argument (min, max).
2) Determine the final output by passing the rescaled values through a
lookup table.
Both stages are optional.
============== ==================================================================================
**Arguments:**
data numpy array of int/float types. If
levels List [min, max]; optionally rescale data before converting through the
lookup table. The data is rescaled such that min->0 and max->*scale*::
rescaled = (clip(data, min, max) - min) * (*scale* / (max - min))
It is also possible to use a 2D (N,2) array of values for levels. In this case,
it is assumed that each pair of min,max values in the levels array should be
applied to a different subset of the input data (for example, the input data may
already have RGB values and the levels are used to independently scale each
channel). The use of this feature requires that levels.shape[0] == data.shape[-1].
scale The maximum value to which data will be rescaled before being passed through the
lookup table (or returned if there is no lookup table). By default this will
be set to the length of the lookup table, or 255 if no lookup table is provided.
lut Optional lookup table (array with dtype=ubyte).
Values in data will be converted to color by indexing directly from lut.
The output data shape will be input.shape + lut.shape[1:].
Lookup tables can be built using ColorMap or GradientWidget.
useRGBA If True, the data is returned in RGBA order (useful for building OpenGL textures).
The default is False, which returns in ARGB order for use with QImage
(Note that 'ARGB' is a term used by the Qt documentation; the *actual* order
is BGRA).
============== ==================================================================================
"""
profile = debug.Profiler()
if data.ndim not in (2, 3):
raise TypeError("data must be 2D or 3D")
if data.ndim == 3 and data.shape[2] > 4:
raise TypeError("data.shape[2] must be <= 4")
if lut is not None and not isinstance(lut, np.ndarray):
lut = np.array(lut)
if levels is None:
# automatically decide levels based on data dtype
if data.dtype.kind == 'u':
levels = np.array([0, 2**(data.itemsize*8)-1])
elif data.dtype.kind == 'i':
s = 2**(data.itemsize*8 - 1)
levels = np.array([-s, s-1])
elif data.dtype.kind == 'b':
levels = np.array([0,1])
else:
raise Exception('levels argument is required for float input types')
if not isinstance(levels, np.ndarray):
levels = np.array(levels)
levels = levels.astype(np.float)
if levels.ndim == 1:
if levels.shape[0] != 2:
raise Exception('levels argument must have length 2')
elif levels.ndim == 2:
if lut is not None and lut.ndim > 1:
raise Exception('Cannot make ARGB data when both levels and lut have ndim > 2')
if levels.shape != (data.shape[-1], 2):
raise Exception('levels must have shape (data.shape[-1], 2)')
else:
raise Exception("levels argument must be 1D or 2D (got shape=%s)." % repr(levels.shape))
profile()
# Decide on maximum scaled value
if scale is None:
if lut is not None:
scale = lut.shape[0]
else:
scale = 255.
# Decide on the dtype we want after scaling
if lut is None:
dtype = np.ubyte
else:
dtype = np.min_scalar_type(lut.shape[0]-1)
# awkward, but fastest numpy native nan evaluation
#
nanMask = None
if data.dtype.kind == 'f' and np.isnan(data.min()):
nanMask = np.isnan(data)
if data.ndim > 2:
nanMask = np.any(nanMask, axis=-1)
nanMask = np.isnan(data)
# Apply levels if given
if levels is not None:
if isinstance(levels, np.ndarray) and levels.ndim == 2:
# we are going to rescale each channel independently
if levels.shape[0] != data.shape[-1]:
raise Exception("When rescaling multi-channel data, there must be the same number of levels as channels (data.shape[-1] == levels.shape[0])")
newData = np.empty(data.shape, dtype=int)
for i in range(data.shape[-1]):
minVal, maxVal = levels[i]
if minVal == maxVal:
maxVal = np.nextafter(maxVal, 2*maxVal)
rng = maxVal-minVal
rng = 1 if rng == 0 else rng
newData[...,i] = rescaleData(data[...,i], scale / rng, minVal, dtype=dtype)
data = newData
else:
# Apply level scaling unless it would have no effect on the data
minVal, maxVal = levels
if minVal != 0 or maxVal != scale:
if minVal == maxVal:
maxVal = np.nextafter(maxVal, 2*maxVal)
data = rescaleData(data, scale/(maxVal-minVal), minVal, dtype=dtype)
profile()
# apply LUT if given
if lut is not None:
data = applyLookupTable(data, lut)
else:
if data.dtype is not np.ubyte:
data = np.clip(data, 0, 255).astype(np.ubyte)
profile()
# this will be the final image array
imgData = np.empty(data.shape[:2]+(4,), dtype=np.ubyte)
profile()
# decide channel order
if useRGBA:
order = [0,1,2,3] # array comes out RGBA
else:
order = [2,1,0,3] # for some reason, the colors line up as BGR in the final image.
# copy data into image array
if data.ndim == 2:
# This is tempting:
# imgData[..., :3] = data[..., np.newaxis]
# ..but it turns out this is faster:
for i in range(3):
imgData[..., i] = data
elif data.shape[2] == 1:
for i in range(3):
imgData[..., i] = data[..., 0]
else:
for i in range(0, data.shape[2]):
imgData[..., i] = data[..., order[i]]
profile()
# add opaque alpha channel if needed
if data.ndim == 2 or data.shape[2] == 3:
alpha = False
imgData[..., 3] = 255
else:
alpha = True
# apply nan mask through alpha channel
if nanMask is not None:
alpha = True
imgData[nanMask, 3] = 0
profile()
return imgData, alpha
|
12,304 | def photon_scattering_amplitude(evolver, c_ops, tlist, indices, psi, psit):
"""
Compute the scattering amplitude for a system emitting into multiple
waveguides.
Parameters
----------
evolver : :class:Propagator
Propagator
c_ops : list
list of collapse operators for each waveguide; these are assumed to
include spontaneous decay rates, e.g.
:math:`\\sigma = \\sqrt \\gamma \\cdot a`
tlist : array_like
Times at which emissions can happen.
indices : list-like
List of (list of emission times indices) for each waveguide.
psi : Qobj
State at the start of the evolution
psit : Qobj
State at the end of the evolution.
"""
# Extract the full list of taus
taus = []
for i, tau_wg in enumerate(indices):
for t_idx in tau_wg:
taus.append((tlist[t_idx], i))
taus.sort(key=lambda tup: tup[0]) # sort taus by time
tq = tlist[0]
# Compute Prod Ueff(tq, tq-1)
for tau in taus:
tprev = tq
tq, q = tau
psi = c_ops[q] * evolver(tq, tprev) * psi
psi = evolver(tlist[-1], tq) * psi
return psit.overlap(psi)
| def photon_scattering_amplitude(propagator, c_ops, tlist, indices, psi, psit):
"""
Compute the scattering amplitude for a system emitting into multiple
waveguides.
Parameters
----------
evolver : :class:Propagator
Propagator
c_ops : list
list of collapse operators for each waveguide; these are assumed to
include spontaneous decay rates, e.g.
:math:`\\sigma = \\sqrt \\gamma \\cdot a`
tlist : array_like
Times at which emissions can happen.
indices : list-like
List of (list of emission times indices) for each waveguide.
psi : Qobj
State at the start of the evolution
psit : Qobj
State at the end of the evolution.
"""
# Extract the full list of taus
taus = []
for i, tau_wg in enumerate(indices):
for t_idx in tau_wg:
taus.append((tlist[t_idx], i))
taus.sort(key=lambda tup: tup[0]) # sort taus by time
tq = tlist[0]
# Compute Prod Ueff(tq, tq-1)
for tau in taus:
tprev = tq
tq, q = tau
psi = c_ops[q] * evolver(tq, tprev) * psi
psi = evolver(tlist[-1], tq) * psi
return psit.overlap(psi)
|
46,960 | def check_task(task: str) -> Tuple[Dict, Any]:
"""
Checks an incoming task string, to validate it's correct and return the
default Pipeline and Model classes, and default models if they exist.
Args:
task (:obj:`str`):
The task defining which pipeline will be returned. Currently accepted tasks are:
- :obj:`"feature-extraction"`
- :obj:`"sentiment-analysis"`
- :obj:`"ner"`
- :obj:`"question-answering"`
- :obj:`"fill-mask"`
- :obj:`"summarization"`
- :obj:`"translation_xx_to_yy"`
- :obj:`"translation"`
- :obj:`"text-generation"`
- :obj:`"conversation"`
Returns:
(task_defaults:obj:`dict`, task_options: (:obj:`tuple`, None))
The actual dictionnary required to initialize the pipeline and some
extra task options for parametrized tasks like "translation_XX_to_YY"
"""
if task in SUPPORTED_TASKS:
targeted_task = SUPPORTED_TASKS[task]
return targeted_task, None
if task.startswith("translation"):
tokens = task.split("_")
if len(tokens) == 4 and tokens[0] == "translation" and tokens[2] == "to":
targeted_task = SUPPORTED_TASKS["translation"]
return targeted_task, (tokens[1], tokens[3])
raise KeyError("Invalid translation task {}, use 'translation_XX_to_YY' format".format(task))
raise KeyError(
"Unknown task {}, available tasks are {}".format(task, list(SUPPORTED_TASKS.keys()) + ["translation_XX_to_YY"])
)
| def check_task(task: str) -> Tuple[Dict, Any]:
"""
Checks an incoming task string, to validate it's correct and return the
default Pipeline and Model classes, and default models if they exist.
Args:
task (:obj:`str`):
The task defining which pipeline will be returned. Currently accepted tasks are:
- :obj:`"feature-extraction"`
- :obj:`"sentiment-analysis"`
- :obj:`"ner"`
- :obj:`"question-answering"`
- :obj:`"fill-mask"`
- :obj:`"summarization"`
- :obj:`"translation_xx_to_yy"`
- :obj:`"translation"`
- :obj:`"text-generation"`
- :obj:`"conversational"`
Returns:
(task_defaults:obj:`dict`, task_options: (:obj:`tuple`, None))
The actual dictionnary required to initialize the pipeline and some
extra task options for parametrized tasks like "translation_XX_to_YY"
"""
if task in SUPPORTED_TASKS:
targeted_task = SUPPORTED_TASKS[task]
return targeted_task, None
if task.startswith("translation"):
tokens = task.split("_")
if len(tokens) == 4 and tokens[0] == "translation" and tokens[2] == "to":
targeted_task = SUPPORTED_TASKS["translation"]
return targeted_task, (tokens[1], tokens[3])
raise KeyError("Invalid translation task {}, use 'translation_XX_to_YY' format".format(task))
raise KeyError(
"Unknown task {}, available tasks are {}".format(task, list(SUPPORTED_TASKS.keys()) + ["translation_XX_to_YY"])
)
|
508 | def ucr_has_field(ucr: ReportConfiguration, field):
for column in ucr.report_columns:
if column.column_id == field:
return True
return False
| def ucr_has_field(ucr: ReportConfiguration, field):
return any(c.column_id == field for c in ucr.report_columns)
|
30,096 | def test_save_signatures_to_location_1_stdout():
# save to sigfile
sig2 = utils.get_test_data('2.fa.sig')
ss2 = sourmash.load_one_signature(sig2, ksize=31)
sig47 = utils.get_test_data('47.fa.sig')
ss47 = sourmash.load_one_signature(sig47, ksize=31)
output_capture = io.StringIO()
with contextlib.redirect_stdout(output_capture):
with sourmash_args.SaveSignaturesToLocation("-") as save_sig:
save_sig.add(ss2)
save_sig.add(ss47)
output = output_capture.getvalue()
saved = list(sourmash.signature.load_signatures(output))
assert ss2 in saved
assert ss47 in saved
assert len(saved) == 2
| def test_save_signatures_to_location_1_stdout():
# save to stdout
sig2 = utils.get_test_data('2.fa.sig')
ss2 = sourmash.load_one_signature(sig2, ksize=31)
sig47 = utils.get_test_data('47.fa.sig')
ss47 = sourmash.load_one_signature(sig47, ksize=31)
output_capture = io.StringIO()
with contextlib.redirect_stdout(output_capture):
with sourmash_args.SaveSignaturesToLocation("-") as save_sig:
save_sig.add(ss2)
save_sig.add(ss47)
output = output_capture.getvalue()
saved = list(sourmash.signature.load_signatures(output))
assert ss2 in saved
assert ss47 in saved
assert len(saved) == 2
|
51,073 | def _setup_entities(hass: HomeAssistant, entry: ConfigEntry, device_ids: list):
"""Set up Tuya Fan."""
device_manager = hass.data[DOMAIN][entry.entry_id][TUYA_DEVICE_MANAGER]
entities = []
for device_id in device_ids:
device = device_manager.device_map[device_id]
if device is None:
continue
entities.append(TuyaHaFan(device, device_manager))
hass.data[DOMAIN][entry.entry_id][TUYA_HA_DEVICES].add(device_id)
return entities
| def _setup_entities(
hass: HomeAssistant, entry: ConfigEntry, device_ids: list[str]
) -> list[Entity]:
"""Set up Tuya Fan."""
device_manager = hass.data[DOMAIN][entry.entry_id][TUYA_DEVICE_MANAGER]
entities = []
for device_id in device_ids:
device = device_manager.device_map[device_id]
if device is None:
continue
entities.append(TuyaHaFan(device, device_manager))
hass.data[DOMAIN][entry.entry_id][TUYA_HA_DEVICES].add(device_id)
return entities
|
57,490 | def test_get_caller_module_name_error():
with mock.patch('inspect.getmodule', return_value=None):
with pytest.raises(LookupError, match='Could not find caller module'):
get_caller_module_name()
| def test_get_caller_module_name_error(mocker):
mocker.patch('inspect.getmodule', return_value=None)
with pytest.raises(LookupError, match='Could not find caller module'):
get_caller_module_name()
|
27,738 | def determine_setup(
inifile: Optional[str],
args: Sequence[str],
rootdir_cmd_arg: Optional[str] = None,
config: Optional["Config"] = None,
) -> Tuple[Path, Optional[Path], Dict[str, Union[str, List[str]]]]:
rootdir = None
dirs = get_dirs_from_args(args)
if inifile:
inipath_ = Path(inifile).resolve()
inipath = inipath_ # type: Optional[Path]
inicfg = load_config_dict_from_file(inipath_) or {}
if rootdir_cmd_arg is None:
rootdir = get_common_ancestor(dirs)
else:
ancestor = get_common_ancestor(dirs)
rootdir, inipath, inicfg = locate_config([ancestor])
if rootdir is None and rootdir_cmd_arg is None:
for possible_rootdir in itertools.chain(
(ancestor,), reversed(ancestor.parents)
):
if (possible_rootdir / "setup.py").exists():
rootdir = possible_rootdir
break
else:
if dirs != [ancestor]:
rootdir, inipath, inicfg = locate_config(dirs)
if rootdir is None:
if config is not None:
cwd = config.invocation_params.dir
else:
cwd = Path.cwd()
rootdir = get_common_ancestor([cwd, ancestor])
is_fs_root = os.path.splitdrive(str(rootdir))[1] == "/"
if is_fs_root:
rootdir = ancestor
if rootdir_cmd_arg:
rootdir = Path(os.path.expandvars(rootdir_cmd_arg)).resolve()
if not rootdir.is_dir():
raise UsageError(
"Directory '{}' not found. Check your '--rootdir' option.".format(
rootdir
)
)
assert rootdir is not None
return rootdir, inipath, inicfg or {}
| def determine_setup(
inifile: Optional[str],
args: Sequence[str],
rootdir_cmd_arg: Optional[str] = None,
config: Optional["Config"] = None,
) -> Tuple[Path, Optional[Path], Dict[str, Union[str, List[str]]]]:
rootdir = None
dirs = get_dirs_from_args(args)
if inifile:
inipath_ = Path(inifile).absolute()
inipath = inipath_ # type: Optional[Path]
inicfg = load_config_dict_from_file(inipath_) or {}
if rootdir_cmd_arg is None:
rootdir = get_common_ancestor(dirs)
else:
ancestor = get_common_ancestor(dirs)
rootdir, inipath, inicfg = locate_config([ancestor])
if rootdir is None and rootdir_cmd_arg is None:
for possible_rootdir in itertools.chain(
(ancestor,), reversed(ancestor.parents)
):
if (possible_rootdir / "setup.py").exists():
rootdir = possible_rootdir
break
else:
if dirs != [ancestor]:
rootdir, inipath, inicfg = locate_config(dirs)
if rootdir is None:
if config is not None:
cwd = config.invocation_params.dir
else:
cwd = Path.cwd()
rootdir = get_common_ancestor([cwd, ancestor])
is_fs_root = os.path.splitdrive(str(rootdir))[1] == "/"
if is_fs_root:
rootdir = ancestor
if rootdir_cmd_arg:
rootdir = Path(os.path.expandvars(rootdir_cmd_arg)).resolve()
if not rootdir.is_dir():
raise UsageError(
"Directory '{}' not found. Check your '--rootdir' option.".format(
rootdir
)
)
assert rootdir is not None
return rootdir, inipath, inicfg or {}
|
749 | def get_data_independent_estimation_quantities(design_matrix, regularization_matrix=None):
Q = compute_unscaled_posterior_precision(design_matrix, regularization_matrix)
unscaled_posterior_covariance = covariance_from_precision(Q)
# TODO: evaluate whether using the explicit inverse leads to numerical instability
pseudoInv = np.einsum('...ij, ...kj->...ik', unscaled_posterior_covariance, design_matrix)
# pseudoInv = np.linalg.solve(S, np.swapaxes(design_matrix, -1, -2))
degrees_of_freedom = compute_degrees_of_freedom(design_matrix, pseudoInv)
return unscaled_posterior_covariance, pseudoInv, degrees_of_freedom
| def get_data_independent_estimation_quantities(design_matrix, regularization_matrix=None):
Q = compute_unscaled_posterior_precision(design_matrix, regularization_matrix)
unscaled_posterior_covariance = covariance_from_precision(Q)
# TODO: evaluate whether using the explicit inverse leads to numerical instability
pseudo_inv = np.einsum('...ij, ...kj->...ik', unscaled_posterior_covariance, design_matrix)
# pseudoInv = np.linalg.solve(S, np.swapaxes(design_matrix, -1, -2))
degrees_of_freedom = compute_degrees_of_freedom(design_matrix, pseudoInv)
return unscaled_posterior_covariance, pseudoInv, degrees_of_freedom
|
30,053 | def validate_transaction_params(
transaction: TxParams, latest_block: BlockData, generated_gas_price: Wei
) -> TxParams:
# gas price strategy explicitly set:
if (
generated_gas_price is not None
and 'gasPrice' not in transaction
and 'maxPriorityFeePerGas' not in transaction
):
transaction = assoc(transaction, 'gasPrice', hex(generated_gas_price))
# legacy and 1559 tx variables used:
if "gasPrice" in transaction and (
"maxFeePerGas" in transaction or "maxPriorityFeePerGas" in transaction
):
raise TransactionTypeMismatch()
# 1559 - canonical tx:
elif 'maxFeePerGas' in transaction and 'maxPriorityFeePerGas' in transaction:
if int(str(transaction["maxFeePerGas"]), 16) < int(
str(transaction["maxPriorityFeePerGas"]), 16
):
raise InvalidTransaction("maxFeePerGas must be >= maxPriorityFeePerGas")
# 1559 - no max fee:
elif 'maxFeePerGas' not in transaction and 'maxPriorityFeePerGas' in transaction:
base_fee = latest_block['baseFeePerGas']
priority_fee = int(str(transaction['maxPriorityFeePerGas']), 16)
max_fee_per_gas = priority_fee + 2 * base_fee
transaction = assoc(transaction, 'maxFeePerGas', hex(max_fee_per_gas))
# 1559 - no priority fee:
elif 'maxFeePerGas' in transaction and 'maxPriorityFeePerGas' not in transaction:
raise InvalidTransaction(
"maxPriorityFeePerGas must be defined in a 1559 transaction."
)
# should be a fully formed (legacy or 1559) tx or no fee values were specified
return transaction
| def validate_transaction_params(
transaction: TxParams, latest_block: BlockData, generated_gas_price: Wei
) -> TxParams:
# gas price strategy explicitly set:
if (
generated_gas_price is not None
and 'gasPrice' not in transaction
and all(_ not in transaction for _ in ('maxFeePerGas', 'maxPriorityFeePerGas'))
):
transaction = assoc(transaction, 'gasPrice', hex(generated_gas_price))
# legacy and 1559 tx variables used:
if "gasPrice" in transaction and (
"maxFeePerGas" in transaction or "maxPriorityFeePerGas" in transaction
):
raise TransactionTypeMismatch()
# 1559 - canonical tx:
elif 'maxFeePerGas' in transaction and 'maxPriorityFeePerGas' in transaction:
if int(str(transaction["maxFeePerGas"]), 16) < int(
str(transaction["maxPriorityFeePerGas"]), 16
):
raise InvalidTransaction("maxFeePerGas must be >= maxPriorityFeePerGas")
# 1559 - no max fee:
elif 'maxFeePerGas' not in transaction and 'maxPriorityFeePerGas' in transaction:
base_fee = latest_block['baseFeePerGas']
priority_fee = int(str(transaction['maxPriorityFeePerGas']), 16)
max_fee_per_gas = priority_fee + 2 * base_fee
transaction = assoc(transaction, 'maxFeePerGas', hex(max_fee_per_gas))
# 1559 - no priority fee:
elif 'maxFeePerGas' in transaction and 'maxPriorityFeePerGas' not in transaction:
raise InvalidTransaction(
"maxPriorityFeePerGas must be defined in a 1559 transaction."
)
# should be a fully formed (legacy or 1559) tx or no fee values were specified
return transaction
|
31,318 | def get_domain_details(client: Client, **args) -> CommandResults:
domain = args.get("domain")
uri = f"/domain/{domain}"
response = client._http_request("GET", uri)
md = ""
current_dns = response["current_dns"]
del response["current_dns"]
md = tableToMarkdown(f"Details for {domain}", response)
for record_type, record_values in current_dns.items():
# If a record type has multiple values, this will output the last item in MD
temp_values = {}
for x in record_values["values"]:
temp_values.update(**x)
record_values.update(temp_values)
del record_values["values"]
md += tableToMarkdown(f"DNS {record_type} Records for {domain}", record_values)
results = CommandResults(
outputs_prefix="SecurityTrails",
outputs_key_field=f"SecurityTrails.domain_details.{domain}",
outputs={domain: {"domain_details": response}},
readable_output=md,
)
return results
| def get_domain_details(client: Client, **args) -> CommandResults:
domain = args.get("domain")
uri = f"/domain/{domain}"
response = client._http_request("GET", uri)
md = ""
current_dns = response["current_dns"]
del response["current_dns"]
md = tableToMarkdown(f"Details for {domain}", response)
for record_type, record_values in current_dns.items():
# If a record type has multiple values, this will output the last item in MD
temp_values = {}
for x in record_values["values"]:
temp_values.update(**x)
record_values.update(temp_values)
del record_values["values"]
md += tableToMarkdown(f"/n DNS {record_type} Records for {domain}", record_values)
results = CommandResults(
outputs_prefix="SecurityTrails",
outputs_key_field=f"SecurityTrails.domain_details.{domain}",
outputs={domain: {"domain_details": response}},
readable_output=md,
)
return results
|
42,622 | def get_airdrop_data(name: str, data_dir: Path) -> Tuple[Iterator, TextIO]:
airdrops_dir = data_dir / 'airdrops'
airdrops_dir.mkdir(parents=True, exist_ok=True)
filename = airdrops_dir / f'{name}.csv'
if not filename.is_file():
# if not cached, get it from the gist
try:
request = requests.get(url=AIRDROPS[name][0], timeout=DEFAULT_TIMEOUT_TUPLE)
except requests.exceptions.RequestException as e:
raise RemoteError(f'Airdrops Gist request failed due to {str(e)}') from e
try:
content = request.content.decode('utf-8')
# Here 20900 is the size of the smallest CSV file we track
if not csv.Sniffer().has_header(content) or len(request.content) < 20900:
raise csv.Error
with open(filename, 'w') as f:
f.write(content)
except OSError as e:
raise WriteError(f'Failed to save {filename} to disk') from e
except csv.Error as e:
log.debug(f'airdrop file {filename} contains invalid data {content}')
raise InvalidData(f'File {filename} contains invalid information. Check logs.') from e
# Verify the CSV file
csvfile = open(filename, 'r')
iterator = csv.reader(csvfile)
next(iterator) # skip header
return iterator, csvfile
| def get_airdrop_data(name: str, data_dir: Path) -> Tuple[Iterator, TextIO]:
airdrops_dir = data_dir / 'airdrops'
airdrops_dir.mkdir(parents=True, exist_ok=True)
filename = airdrops_dir / f'{name}.csv'
if not filename.is_file():
# if not cached, get it from the gist
try:
request = requests.get(url=AIRDROPS[name][0], timeout=DEFAULT_TIMEOUT_TUPLE)
except requests.exceptions.RequestException as e:
raise RemoteError(f'Airdrops Gist request failed due to {str(e)}') from e
try:
content = request.content.decode('utf-8')
# Here 20900 is the size of the smallest CSV file we track
if not csv.Sniffer().has_header(content) or len(request.content) < 20900:
raise csv.Error
with open(filename, 'w') as f:
f.write(content)
except OSError as e:
raise WriteError(f'Failed to write {filename} to disk due to {str(e)}') from e
except csv.Error as e:
log.debug(f'airdrop file {filename} contains invalid data {content}')
raise InvalidData(f'File {filename} contains invalid information. Check logs.') from e
# Verify the CSV file
csvfile = open(filename, 'r')
iterator = csv.reader(csvfile)
next(iterator) # skip header
return iterator, csvfile
|
35,971 | def delete_nodes(
pks, verbosity=0, dry_run=False, force=False, create_forward=True, call_calc_forward=False, call_work_forward=False
):
"""
Delete nodes by a list of pks.
This command will delete not only the specified nodes, but also the ones that are
linked to these and should be also deleted in order to keep a consistent provenance
according to the rules explained in the concepts section of the documentation.
In summary:
1. If a DATA node is deleted, any process nodes linked to it will also be deleted.
2. If a CALC node is deleted, any incoming WORK node (callers) will be deleted as
well whereas any incoming DATA node (inputs) will be kept. Outgoing DATA nodes
(outputs) will be deleted by default but this can be disabled.
3. If a WORK node is deleted, any incoming WORK node (callers) will be deleted as
well, but all DATA nodes will be kept. Outgoing WORK or CALC nodes will be kept by
default, but deletion of either of both kind of connected nodes can be enabled.
These rules are 'recursive', so if a CALC node is deleted, then its output DATA
nodes will be deleted as well, and then any CALC node that may have those as
inputs, and so on.
:param pks: a list of the PKs of the nodes to delete
:param bool force: do not ask for confirmation to delete nodes.
:param int verbosity: 0 prints nothing,
1 prints just sums and total,
2 prints individual nodes.
:param bool create_forward:
This will delete all output data created by any deleted calculation.
:param bool call_calc_forward:
This will also delete all calculations called by any workflow that is going to
be deleted. Note that when you delete a workflow, also all parent workflows are
deleted (recursively). Therefore, setting this flag to True may delete
calculations that are 'unrelated' to what has been chosen to be deleted, just
because they are connected at some point in the upwards provenance. Use with
care, and it is advisable to never combine it with force.
:param bool call_work_forward:
This will also delete all calculations called by any workflow that is going to
be deleted. The same disclaimer as forward_calcs applies here as well.
:param bool dry_run:
Do not delete, a dry run, with statistics printed according to verbosity levels.
:param bool force:
Do not ask for confirmation to delete nodes.
"""
# pylint: disable=too-many-arguments,too-many-branches,too-many-locals,too-many-statements
from aiida.backends.utils import delete_nodes_and_connections
from aiida.common import exceptions
from aiida.common.links import LinkType
from aiida.orm import Node, QueryBuilder, load_node
starting_pks = []
for pk in pks:
try:
load_node(pk)
except exceptions.NotExistent:
echo.echo_warning('warning: node with pk<{}> does not exist, skipping'.format(pk))
else:
starting_pks.append(pk)
# An empty set might be problematic for the queries done below.
if not starting_pks:
if verbosity:
echo.echo('Nothing to delete')
return
follow_upwards = []
follow_upwards.append(LinkType.CREATE.value)
follow_upwards.append(LinkType.RETURN.value)
follow_upwards.append(LinkType.CALL_CALC.value)
follow_upwards.append(LinkType.CALL_WORK.value)
follow_downwards = []
follow_downwards.append(LinkType.INPUT_CALC.value)
follow_downwards.append(LinkType.INPUT_WORK.value)
if create_forward:
follow_downwards.append(LinkType.CREATE.value)
if call_calc_forward:
follow_downwards.append(LinkType.CALL_CALC.value)
if call_work_forward:
follow_downwards.append(LinkType.CALL_WORK.value)
links_upwards = {'type': {'in': follow_upwards}}
links_downwards = {'type': {'in': follow_downwards}}
operational_set = set().union(set(starting_pks))
accumulator_set = set().union(set(starting_pks))
while operational_set:
new_pks_set = set()
query_nodes = QueryBuilder()
query_nodes.append(Node, filters={'id': {'in': operational_set}}, tag='sources')
query_nodes.append(
Node,
filters={'id': {
'!in': accumulator_set
}},
edge_filters=links_downwards,
with_incoming='sources',
project='id'
)
new_pks_set = new_pks_set.union(set(i for i, in query_nodes.iterall()))
query_nodes = QueryBuilder()
query_nodes.append(Node, filters={'id': {'in': operational_set}}, tag='sources')
query_nodes.append(
Node,
filters={'id': {
'!in': accumulator_set
}},
edge_filters=links_upwards,
with_outgoing='sources',
project='id'
)
new_pks_set = new_pks_set.union(set(i for i, in query_nodes.iterall()))
operational_set = new_pks_set.difference(accumulator_set)
accumulator_set = new_pks_set.union(accumulator_set)
pks_set_to_delete = accumulator_set
if verbosity > 0:
echo.echo(
'I {} delete {} node{}'.format(
'would' if dry_run else 'will', len(pks_set_to_delete), 's' if len(pks_set_to_delete) > 1 else ''
)
)
if verbosity > 1:
builder = QueryBuilder().append(
Node, filters={'id': {
'in': pks_set_to_delete
}}, project=('uuid', 'id', 'node_type', 'label')
)
echo.echo('The nodes I {} delete:'.format('would' if dry_run else 'will'))
for uuid, pk, type_string, label in builder.iterall():
try:
short_type_string = type_string.split('.')[-2]
except IndexError:
short_type_string = type_string
echo.echo(' {} {} {} {}'.format(uuid, pk, short_type_string, label))
if dry_run:
if verbosity > 0:
echo.echo('\nThis was a dry run, exiting without deleting anything')
return
# Asking for user confirmation here
if force:
pass
else:
echo.echo_warning('YOU ARE ABOUT TO DELETE {} NODES! THIS CANNOT BE UNDONE!'.format(len(pks_set_to_delete)))
if not click.confirm('Shall I continue?'):
echo.echo('Exiting without deleting')
return
# Recover the list of folders to delete before actually deleting the nodes. I will delete the folders only later,
# so that if there is a problem during the deletion of the nodes in the DB, I don't delete the folders
repositories = [load_node(pk)._repository for pk in pks_set_to_delete] # pylint: disable=protected-access
if verbosity > 0:
echo.echo('I am starting node deletion.')
delete_nodes_and_connections(pks_set_to_delete)
if verbosity > 0:
echo.echo('I have finished node deletion and I am starting folder deletion.')
# If we are here, we managed to delete the entries from the DB.
# I can now delete the folders
for repository in repositories:
repository.erase(force=True)
if verbosity > 0:
echo.echo('I have finished folder deletion. Deletion completed.')
| def delete_nodes(
pks, verbosity=0, dry_run=False, force=False, create_forward=True, call_calc_forward=False, call_work_forward=False
):
"""
Delete nodes by a list of pks.
This command will delete not only the specified nodes, but also the ones that are
linked to these and should be also deleted in order to keep a consistent provenance
according to the rules explained in the concepts section of the documentation.
In summary:
1. If a DATA node is deleted, any process nodes linked to it will also be deleted.
2. If a CALC node is deleted, any incoming WORK node (callers) will be deleted as
well whereas any incoming DATA node (inputs) will be kept. Outgoing DATA nodes
(outputs) will be deleted by default but this can be disabled.
3. If a WORK node is deleted, any incoming WORK node (callers) will be deleted as
well, but all DATA nodes will be kept. Outgoing WORK or CALC nodes will be kept by
default, but deletion of either of both kind of connected nodes can be enabled.
These rules are 'recursive', so if a CALC node is deleted, then its output DATA
nodes will be deleted as well, and then any CALC node that may have those as
inputs, and so on.
:param pks: a list of the PKs of the nodes to delete
:param bool force: do not ask for confirmation to delete nodes.
:param int verbosity: 0 prints nothing,
1 prints just sums and total,
2 prints individual nodes.
:param bool create_forward:
This will delete all output data created by any deleted calculation.
:param bool call_calc_forward:
This will also delete all calculations called by any workflow that is going to
be deleted. Note that when you delete a workflow, also all parent workflows are
deleted (recursively). Therefore, setting this flag to True may delete
calculations that are 'unrelated' to what has been chosen to be deleted, just
because they are connected at some point in the upwards provenance. Use with
care, and it is advisable to never combine it with force.
:param bool call_work_forward:
This will also delete all calculations called by any workflow that is going to
be deleted. The same disclaimer as forward_calcs applies here as well.
:param bool dry_run:
Do not delete, a dry run, with statistics printed according to verbosity levels.
:param bool force:
Do not ask for confirmation to delete nodes.
"""
# pylint: disable=too-many-arguments,too-many-branches,too-many-locals,too-many-statements
from aiida.backends.utils import delete_nodes_and_connections
from aiida.common import exceptions
from aiida.common.links import LinkType
from aiida.orm import Node, QueryBuilder, load_node
starting_pks = []
for pk in pks:
try:
load_node(pk)
except exceptions.NotExistent:
echo.echo_warning('warning: node with pk<{}> does not exist, skipping'.format(pk))
else:
starting_pks.append(pk)
# An empty set might be problematic for the queries done below.
if not starting_pks:
if verbosity:
echo.echo('Nothing to delete')
return
follow_upwards = []
follow_upwards.append(LinkType.CREATE.value)
follow_upwards.append(LinkType.RETURN.value)
follow_upwards.append(LinkType.CALL_CALC.value)
follow_upwards.append(LinkType.CALL_WORK.value)
follow_downwards = []
follow_downwards.append(LinkType.INPUT_CALC.value)
follow_downwards.append(LinkType.INPUT_WORK.value)
if create_forward:
follow_downwards.append(LinkType.CREATE.value)
if call_calc_forward:
follow_downwards.append(LinkType.CALL_CALC.value)
if call_work_forward:
follow_downwards.append(LinkType.CALL_WORK.value)
links_upwards = {'type': {'in': follow_upwards}}
links_downwards = {'type': {'in': follow_downwards}}
operational_set = set().union(set(starting_pks))
accumulator_set = set().union(set(starting_pks))
while operational_set:
new_pks_set = set()
query_nodes = QueryBuilder()
query_nodes.append(Node, filters={'id': {'in': operational_set}}, tag='sources')
query_nodes.append(
Node,
filters={'id': {
'!in': accumulator_set
}},
edge_filters=links_downwards,
with_incoming='sources',
project='id'
)
new_pks_set = new_pks_set.union(set(i for i, in query_nodes.iterall()))
query_nodes = QueryBuilder()
query_nodes.append(Node, filters={'id': {'in': operational_set}}, tag='sources')
query_nodes.append(
Node,
filters={'id': {
'!in': accumulator_set
}},
edge_filters=links_upwards,
with_outgoing='sources',
project='id'
)
new_pks_set = new_pks_set.union(set(i for i, in query_nodes.iterall()))
operational_set = new_pks_set.difference(accumulator_set)
accumulator_set = new_pks_set.union(accumulator_set)
pks_set_to_delete = accumulator_set
if verbosity > 0:
echo.echo(
'I {} delete {} node{}'.format(
'would' if dry_run else 'will', len(pks_set_to_delete), 's' if len(pks_set_to_delete) > 1 else ''
)
)
if verbosity > 1:
builder = QueryBuilder().append(
Node, filters={'id': {
'in': pks_set_to_delete
}}, project=('uuid', 'id', 'node_type', 'label')
)
echo.echo('The nodes I {} delete:'.format('would' if dry_run else 'will'))
for uuid, pk, type_string, label in builder.iterall():
try:
short_type_string = type_string.split('.')[-2]
except IndexError:
short_type_string = type_string
echo.echo(' {} {} {} {}'.format(uuid, pk, short_type_string, label))
if dry_run:
if verbosity > 0:
echo.echo('\nThis was a dry run, exiting without deleting anything')
return
# Asking for user confirmation here
if force:
pass
else:
echo.echo_warning('YOU ARE ABOUT TO DELETE {} NODES! THIS CANNOT BE UNDONE!'.format(len(pks_set_to_delete)))
if not click.confirm('Shall I continue?'):
echo.echo('Exiting without deleting')
return
# Recover the list of folders to delete before actually deleting the nodes. I will delete the folders only later,
# so that if there is a problem during the deletion of the nodes in the DB, I don't delete the folders
repositories = [load_node(pk)._repository for pk in pks_set_to_delete] # pylint: disable=protected-access
if verbosity > 0:
echo.echo('I am starting node deletion.')
delete_nodes_and_connections(pks_set_to_delete)
if verbosity > 0:
echo.echo('I have finished node deletion and I am starting folder deletion.')
# If we are here, we managed to delete the entries from the DB.
# I can now delete the folders
for repository in repositories:
repository.erase(force=True)
if verbosity > 0:
echo.echo('Deletion completed.')
|
58,935 | def juggle_file(fn, dbc=None, layout=None):
env = os.environ.copy()
env["BASEDIR"] = BASEDIR
if dbc:
env["DBC_NAME"] = dbc
pj = os.getenv("PLOTJUGGLER_PATH", "plotjuggler")
if layout is not None:
subprocess.call(f'{pj} --plugin_folders {os.path.join(juggle_dir, "bin")} -d {fn} -l {layout}', shell=True, env=env, cwd=juggle_dir)
else:
subprocess.call(f'{pj} --plugin_folders {os.path.join(juggle_dir, "bin")} -d {fn}', shell=True, env=env, cwd=juggle_dir)
| def juggle_file(fn, dbc=None, layout=None):
env = os.environ.copy()
env["BASEDIR"] = BASEDIR
if dbc:
env["DBC_NAME"] = dbc
pj = os.getenv("PLOTJUGGLER_PATH", "plotjuggler")
extra_args = ""
if layout is not None:
extra_args += f'-l {layout}'
subprocess.call(f'{pj} --plugin_folders {os.path.join(juggle_dir, "bin")} -d {fn} {extra_args}', shell=True, env=env, cwd=juggle_dir)
|
57,392 | def get_dependency_tree(step_inputs: List[CWLObjectType]) -> Dict[str, List[str]]:
"""Get the dependency tree in the form of adjacency list."""
adjacency = {} # adjacency list of the dependency tree
for step_input in step_inputs:
vertex_in = get_step_id(step_input["source"])
vertex_out = get_step_id(step_input["id"])
if vertex_in not in adjacency:
adjacency[vertex_in] = [vertex_out]
elif vertex_out not in adjacency[vertex_in]:
adjacency[vertex_in].append(vertex_out)
if vertex_out not in adjacency:
adjacency[vertex_out] = []
return adjacency
| def get_dependency_tree(step_inputs: List[CWLObjectType]) -> Dict[str, List[str]]:
"""Get the dependency tree in the form of adjacency list."""
adjacency = {} # adjacency list of the dependency tree
for step_input in step_inputs:
vertex_in = get_step_id(cast(str, step_input["source"]))
vertex_out = get_step_id(cast(str, step_input["id"]))
if vertex_in not in adjacency:
adjacency[vertex_in] = [vertex_out]
elif vertex_out not in adjacency[vertex_in]:
adjacency[vertex_in].append(vertex_out)
if vertex_out not in adjacency:
adjacency[vertex_out] = []
return adjacency
|
789 | def test_near_roi():
streamlines = [np.array([[0., 0., 0.9],
[1.9, 0., 0.],
[3, 2., 2.]]),
np.array([[0.1, 0., 0],
[0, 1., 1.],
[0, 2., 2.]]),
np.array([[2, 2, 2],
[3, 3, 3]])]
mask = np.zeros((4, 4, 4), dtype=bool)
mask[0, 0, 0] = True
mask[1, 0, 0] = True
npt.assert_array_equal(near_roi(streamlines, np.eye(4), mask, tol=1),
np.array([True, True, False]))
npt.assert_array_equal(near_roi(streamlines, np.eye(4), mask),
np.array([False, True, False]))
# test for handling of various forms of null streamlines
# including a streamline from previous test because near_roi / tol
# can't handle completely empty streamline collections
streamlinesNULL = [np.array([[0., 0., 0.9],
[1.9, 0., 0.],
[3, 2., 2.]]),
np.array([[],
[],
[]]).T,
np.array([]),
[]]
npt.assert_array_equal(near_roi(streamlinesNULL, np.eye(4), mask, tol=1),
np.array([True, False, False, False]))
npt.assert_array_equal(near_roi(streamlinesNULL, np.eye(4), mask),
np.array([False, False, False, False]))
# If there is an affine, we need to use it:
affine = np.eye(4)
affine[:, 3] = [-1, 100, -20, 1]
# Transform the streamlines:
x_streamlines = [sl + affine[:3, 3] for sl in streamlines[-3:]]
npt.assert_array_equal(near_roi(x_streamlines, affine, mask, tol=1),
np.array([True, True, False]))
npt.assert_array_equal(near_roi(x_streamlines, affine, mask,
tol=None),
np.array([False, True, False]))
# Test for use of the 'all' mode:
npt.assert_array_equal(near_roi(x_streamlines, affine, mask,
tol=None, mode='all'),
np.array([False, False, False]))
mask[0, 1, 1] = True
mask[0, 2, 2] = True
# Test for use of the 'all' mode, also testing that setting the tolerance
# to a very small number gets overridden:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
npt.assert_array_equal(near_roi(x_streamlines, affine, mask,
tol=0.1,
mode='all'),
np.array([False, True, False]))
mask[2, 2, 2] = True
mask[3, 3, 3] = True
npt.assert_array_equal(near_roi(x_streamlines, affine, mask,
tol=None, mode='all'),
np.array([False, True, True]))
# Test for use of endpoints as selection criteria:
mask = np.zeros((4, 4, 4), dtype=bool)
mask[0, 1, 1] = True
mask[3, 2, 2] = True
npt.assert_array_equal(near_roi(streamlines, np.eye(4), mask, tol=0.87,
mode="either_end"),
np.array([True, False, False]))
npt.assert_array_equal(near_roi(streamlines, np.eye(4), mask, tol=0.87,
mode="both_end"),
np.array([False, False, False]))
mask[0, 0, 0] = True
mask[0, 2, 2] = True
npt.assert_array_equal(near_roi(streamlines, np.eye(4), mask,
mode="both_end"),
np.array([False, True, False]))
# Test with a generator input:
def generate_sl(streamlines):
for sl in streamlines:
yield sl
npt.assert_array_equal(near_roi(generate_sl(streamlines), np.eye(4),
mask, mode="both_end"),
np.array([False, True, False]))
| def test_near_roi():
streamlines = [np.array([[0., 0., 0.9],
[1.9, 0., 0.],
[3, 2., 2.]]),
np.array([[0.1, 0., 0],
[0, 1., 1.],
[0, 2., 2.]]),
np.array([[2, 2, 2],
[3, 3, 3]])]
mask = np.zeros((4, 4, 4), dtype=bool)
mask[0, 0, 0] = True
mask[1, 0, 0] = True
npt.assert_array_equal(near_roi(streamlines, np.eye(4), mask, tol=1),
np.array([True, True, False]))
npt.assert_array_equal(near_roi(streamlines, np.eye(4), mask),
np.array([False, True, False]))
# test for handling of various forms of null streamlines
# including a streamline from previous test because near_roi / tol
# can't handle completely empty streamline collections
streamlines_null = [np.array([[0., 0., 0.9],
[1.9, 0., 0.],
[3, 2., 2.]]),
np.array([[],
[],
[]]).T,
np.array([]),
[]]
npt.assert_array_equal(near_roi(streamlinesNULL, np.eye(4), mask, tol=1),
np.array([True, False, False, False]))
npt.assert_array_equal(near_roi(streamlinesNULL, np.eye(4), mask),
np.array([False, False, False, False]))
# If there is an affine, we need to use it:
affine = np.eye(4)
affine[:, 3] = [-1, 100, -20, 1]
# Transform the streamlines:
x_streamlines = [sl + affine[:3, 3] for sl in streamlines[-3:]]
npt.assert_array_equal(near_roi(x_streamlines, affine, mask, tol=1),
np.array([True, True, False]))
npt.assert_array_equal(near_roi(x_streamlines, affine, mask,
tol=None),
np.array([False, True, False]))
# Test for use of the 'all' mode:
npt.assert_array_equal(near_roi(x_streamlines, affine, mask,
tol=None, mode='all'),
np.array([False, False, False]))
mask[0, 1, 1] = True
mask[0, 2, 2] = True
# Test for use of the 'all' mode, also testing that setting the tolerance
# to a very small number gets overridden:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
npt.assert_array_equal(near_roi(x_streamlines, affine, mask,
tol=0.1,
mode='all'),
np.array([False, True, False]))
mask[2, 2, 2] = True
mask[3, 3, 3] = True
npt.assert_array_equal(near_roi(x_streamlines, affine, mask,
tol=None, mode='all'),
np.array([False, True, True]))
# Test for use of endpoints as selection criteria:
mask = np.zeros((4, 4, 4), dtype=bool)
mask[0, 1, 1] = True
mask[3, 2, 2] = True
npt.assert_array_equal(near_roi(streamlines, np.eye(4), mask, tol=0.87,
mode="either_end"),
np.array([True, False, False]))
npt.assert_array_equal(near_roi(streamlines, np.eye(4), mask, tol=0.87,
mode="both_end"),
np.array([False, False, False]))
mask[0, 0, 0] = True
mask[0, 2, 2] = True
npt.assert_array_equal(near_roi(streamlines, np.eye(4), mask,
mode="both_end"),
np.array([False, True, False]))
# Test with a generator input:
def generate_sl(streamlines):
for sl in streamlines:
yield sl
npt.assert_array_equal(near_roi(generate_sl(streamlines), np.eye(4),
mask, mode="both_end"),
np.array([False, True, False]))
|
31,353 | def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
''' EXECUTION '''
#LOG('command is %s' % (demisto.command(), ))
demisto.debug(f'Command being called is {demisto.command()}')
try:
LOG('Command being called is {command}'.format(command=demisto.command()))
if demisto.command() == 'Picus-GetAccessToken':
getAccessToken()
elif demisto.command() == 'Picus-Vector-Compare': # Makes a comparison of the given vector's results
token = getAccessToken()
demisto.results(vectorCompare(token))
elif demisto.command() == 'Picus-Attack-Result-List': # Returns the list of the attack results\nhave optional parameters for pagination and filtration
token = getAccessToken()
demisto.results(attackResultList(token))
elif demisto.command() == 'Picus-Specific-Threats-Results': # Returns the list of the attack results of a single threat\nhave optional
token = getAccessToken()
demisto.results(specificThreatsResults(token))
elif demisto.command() == 'Picus-Peer-List': # Returns the peer list with current statuses
token = getAccessToken()
demisto.results(peerList(token))
elif demisto.command() == 'Picus-EMail-Peer-List': # Returns the E-Mail peer list with current statuses
token = getAccessToken()
demisto.results(eMailPeerList(token))
elif demisto.command() == 'Picus-Attack-All-Vectors': # Schedules given attack on all possible vectors
token = getAccessToken()
demisto.results(attackAllVectors(token))
elif demisto.command() == 'Picus-Attack-Single': # Schedules a single attack on requested vector
token = getAccessToken()
demisto.results(attackSingle(token))
elif demisto.command() == 'Picus-Trigger-Update': # Triggers the update mechanism manually, returns if the update-command is taken successfully
token = getAccessToken()
demisto.results(triggerUpdate(token))
elif demisto.command() == 'Picus-Version': # Returns the current version and the update time config
token = getAccessToken()
demisto.results(version(token))
elif demisto.command() == 'Picus-Threat-List': # Returns the list of the threats\nhave optional parameters for pagination and filtration
token = getAccessToken()
demisto.results(threatList(token))
elif demisto.command() == 'Picus-Mitigation-List': # Returns the list of the mitigations of threats\nhave optional parameters for pagination and filtration, this route may not be used associated with your license
token = getAccessToken()
demisto.results(mitigationList(token))
elif demisto.command() == 'Picus-Mitre-Matrix': # Returns the mitre matrix metadata\ntakes no parameters
token = getAccessToken()
demisto.results(mitreMatrix(token))
elif demisto.command() == 'Picus-Sigma-Rules-List': # Returns the list of the sigma rules of scenario actions\nhave optional parameters for pagination and filtration, this route may not be used associated with your license
token = getAccessToken()
demisto.results(sigmaRulesList(token))
elif demisto.command() == 'Picus-Vector-List': # Returns the list of the vectors all disabled and enabled ones\nhave optional parameters for pagination
token = getAccessToken()
demisto.results(vectorList(token))
elif demisto.command() == 'test-module':
demisto.results(test_module())
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
| def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
''' EXECUTION '''
#LOG('command is %s' % (demisto.command(), ))
demisto.debug(f'Command being called is {demisto.command()}')
try:
LOG('Command being called is {command}'.format(command=demisto.command()))
if demisto.command() == 'Picus-GetAccessToken':
getAccessToken()
elif demisto.command() == 'Picus-Vector-Compare': # Makes a comparison of the given vector's results
token = getAccessToken()
demisto.results(vectorCompare(token))
elif demisto.command() == 'Picus-Attack-Result-List': # Returns the list of the attack results\nhave optional parameters for pagination and filtration
token = getAccessToken()
demisto.results(attackResultList(token))
elif demisto.command() == 'Picus-Specific-Threats-Results': # Returns the list of the attack results of a single threat\nhave optional
token = getAccessToken()
demisto.results(specificThreatsResults(token))
elif demisto.command() == 'Picus-Peer-List': # Returns the peer list with current statuses
token = getAccessToken()
demisto.results(peerList(token))
elif demisto.command() == 'Picus-EMail-Peer-List': # Returns the E-Mail peer list with current statuses
token = getAccessToken()
demisto.results(eMailPeerList(token))
elif demisto.command() == 'Picus-Attack-All-Vectors': # Schedules given attack on all possible vectors
token = getAccessToken()
demisto.results(attackAllVectors(token))
elif demisto.command() == 'Picus-Attack-Single': # Schedules a single attack on requested vector
token = getAccessToken()
demisto.results(attackSingle(token))
elif demisto.command() == 'Picus-Trigger-Update': # Triggers the update mechanism manually, returns if the update-command is taken successfully
token = getAccessToken()
demisto.results(triggerUpdate(token))
elif demisto.command() == 'Picus-Version': # Returns the current version and the update time config
token = getAccessToken()
demisto.results(version(token))
elif demisto.command() == 'Picus-Threat-List': # Returns the list of the threats\nhave optional parameters for pagination and filtration
token = getAccessToken()
demisto.results(threatList(token))
elif demisto.command() == 'picus-mitigation-list': # Returns the list of the mitigations of threats\nhave optional parameters for pagination and filtration, this route may not be used associated with your license
token = getAccessToken()
demisto.results(mitigationList(token))
elif demisto.command() == 'Picus-Mitre-Matrix': # Returns the mitre matrix metadata\ntakes no parameters
token = getAccessToken()
demisto.results(mitreMatrix(token))
elif demisto.command() == 'Picus-Sigma-Rules-List': # Returns the list of the sigma rules of scenario actions\nhave optional parameters for pagination and filtration, this route may not be used associated with your license
token = getAccessToken()
demisto.results(sigmaRulesList(token))
elif demisto.command() == 'Picus-Vector-List': # Returns the list of the vectors all disabled and enabled ones\nhave optional parameters for pagination
token = getAccessToken()
demisto.results(vectorList(token))
elif demisto.command() == 'test-module':
demisto.results(test_module())
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
16,188 | def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the available Repetier Server sensors."""
if discovery_info is None:
return
sensor_map = {
"bed_temperature": RepetierTempSensor,
"extruder_temperature": RepetierTempSensor,
"chamber_temperature": RepetierTempSensor,
"current_state": RepetierSensor,
"current_job": RepetierJobSensor,
"job_end": RepetierJobEndSensor,
"job_start": RepetierJobStartSensor,
}
entities = []
for info in discovery_info:
printer_name = info["printer_name"]
api = hass.data[REPETIER_API][printer_name]
printer_id = info["printer_id"]
sensor_type = info["sensor_type"]
temp_id = info["temp_id"]
description = SENSOR_TYPES[sensor_type]
name = f"{info['name']}{description.name}"
if temp_id is not None:
_LOGGER.debug("%s Temp_id: %s", sensor_type, temp_id)
name = f"{name}{temp_id}"
sensor_class = sensor_map[sensor_type]
entity = sensor_class(api, temp_id, name, printer_id, description)
entities.append(entity)
add_entities(entities, True)
| def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the available Repetier Server sensors."""
if discovery_info is None:
return
sensor_map = {
"bed_temperature": RepetierTempSensor,
"extruder_temperature": RepetierTempSensor,
"chamber_temperature": RepetierTempSensor,
"current_state": RepetierSensor,
"current_job": RepetierJobSensor,
"job_end": RepetierJobEndSensor,
"job_start": RepetierJobStartSensor,
}
entities = []
for info in discovery_info:
printer_name = info["printer_name"]
api = hass.data[REPETIER_API][printer_name]
printer_id = info["printer_id"]
sensor_type = info["sensor_type"]
temp_id = info["temp_id"]
description = SENSOR_TYPES[sensor_type]
name = f"{info['name']}{description.name or ''}"
if temp_id is not None:
_LOGGER.debug("%s Temp_id: %s", sensor_type, temp_id)
name = f"{name}{temp_id}"
sensor_class = sensor_map[sensor_type]
entity = sensor_class(api, temp_id, name, printer_id, description)
entities.append(entity)
add_entities(entities, True)
|
40,499 | def run_stage(sessions, stage, tavern_box, test_block_config):
"""Run one stage from the test
Args:
sessions (dict): List of relevant 'session' objects used for this test
stage (dict): specification of stage to be run
tavern_box (box.Box): Box object containing format variables to be used
in test
test_block_config (dict): available variables for test
"""
name = stage["name"]
r = get_request_type(stage, test_block_config, sessions)
tavern_box.update(request_vars=r.request_vars)
expected = get_expected(stage, test_block_config, sessions)
delay(stage, "before", test_block_config["variables"])
logger.info("Running stage : %s", name)
response = r.run()
verifiers = get_verifiers(stage, test_block_config, sessions, expected)
for v in verifiers:
saved = v.verify(response)
test_block_config["variables"].update(saved)
tavern_box.pop("request_vars")
delay(stage, "after", test_block_config["variables"])
| def run_stage(sessions, stage, tavern_box, test_block_config):
"""Run one stage from the test
Args:
sessions (dict): Dictionary of relevant 'session' objects used for this test
stage (dict): specification of stage to be run
tavern_box (box.Box): Box object containing format variables to be used
in test
test_block_config (dict): available variables for test
"""
name = stage["name"]
r = get_request_type(stage, test_block_config, sessions)
tavern_box.update(request_vars=r.request_vars)
expected = get_expected(stage, test_block_config, sessions)
delay(stage, "before", test_block_config["variables"])
logger.info("Running stage : %s", name)
response = r.run()
verifiers = get_verifiers(stage, test_block_config, sessions, expected)
for v in verifiers:
saved = v.verify(response)
test_block_config["variables"].update(saved)
tavern_box.pop("request_vars")
delay(stage, "after", test_block_config["variables"])
|
5,717 | def kmeans2(data, k, iter=10, thresh=1e-5, minit='random',
missing='warn', check_finite=True, seed=None):
"""
Classify a set of observations into k clusters using the k-means algorithm.
The algorithm attempts to minimize the Euclidean distance between
observations and centroids. Several initialization methods are
included.
Parameters
----------
data : ndarray
A 'M' by 'N' array of 'M' observations in 'N' dimensions or a length
'M' array of 'M' 1-D observations.
k : int or ndarray
The number of clusters to form as well as the number of
centroids to generate. If `minit` initialization string is
'matrix', or if a ndarray is given instead, it is
interpreted as initial cluster to use instead.
iter : int, optional
Number of iterations of the k-means algorithm to run. Note
that this differs in meaning from the iters parameter to
the kmeans function.
thresh : float, optional
(not used yet)
minit : str, optional
Method for initialization. Available methods are 'random',
'points', '++' and 'matrix':
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
'points': choose k observations (rows) at random from data for
the initial centroids.
'++': choose k observations accordingly to the kmeans++ method
(careful seeding)
'matrix': interpret the k parameter as a k by M (or length k
array for 1-D data) array of initial centroids.
missing : str, optional
Method to deal with empty clusters. Available methods are
'warn' and 'raise':
'warn': give a warning and continue.
'raise': raise an ClusterError and terminate the algorithm.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
Seed for initializing the pseudo-random number generator.
If `seed` is None (or `numpy.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
The default is None.
Returns
-------
centroid : ndarray
A 'k' by 'N' array of centroids found at the last iteration of
k-means.
label : ndarray
label[i] is the code or index of the centroid the
ith observation is closest to.
See Also
--------
kmeans
References
----------
.. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of
careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium
on Discrete Algorithms, 2007.
Examples
--------
>>> from scipy.cluster.vq import kmeans2
>>> import matplotlib.pyplot as plt
Create z, an array with shape (100, 2) containing a mixture of samples
from three multivariate normal distributions.
>>> rng = np.random.default_rng()
>>> a = rng.multivariate_normal([0, 6], [[2, 1], [1, 1.5]], size=45)
>>> b = rng.multivariate_normal([2, 0], [[1, -1], [-1, 3]], size=30)
>>> c = rng.multivariate_normal([6, 4], [[5, 0], [0, 1.2]], size=25)
>>> z = np.concatenate((a, b, c))
>>> rng.shuffle(z)
Compute three clusters.
>>> centroid, label = kmeans2(z, 3, minit='points')
>>> centroid
array([[ 2.22274463, -0.61666946], # may vary
[ 0.54069047, 5.86541444],
[ 6.73846769, 4.01991898]])
How many points are in each cluster?
>>> counts = np.bincount(label)
>>> counts
array([29, 51, 20]) # may vary
Plot the clusters.
>>> w0 = z[label == 0]
>>> w1 = z[label == 1]
>>> w2 = z[label == 2]
>>> plt.plot(w0[:, 0], w0[:, 1], 'o', alpha=0.5, label='cluster 0')
>>> plt.plot(w1[:, 0], w1[:, 1], 'd', alpha=0.5, label='cluster 1')
>>> plt.plot(w2[:, 0], w2[:, 1], 's', alpha=0.5, label='cluster 2')
>>> plt.plot(centroid[:, 0], centroid[:, 1], 'k*', label='centroids')
>>> plt.axis('equal')
>>> plt.legend(shadow=True)
>>> plt.show()
"""
if int(iter) < 1:
raise ValueError("Invalid iter (%s), "
"must be a positive integer." % iter)
try:
miss_meth = _valid_miss_meth[missing]
except KeyError as e:
raise ValueError("Unknown missing method %r" % (missing,)) from e
data = _asarray_validated(data, check_finite=check_finite)
if data.ndim == 1:
d = 1
elif data.ndim == 2:
d = data.shape[1]
else:
raise ValueError("Input of rank > 2 is not supported.")
if data.size < 1:
raise ValueError("Empty input is not supported.")
# If k is not a single value, it should be compatible with data's shape
if minit == 'matrix' or not np.isscalar(k):
code_book = np.array(k, copy=True)
if data.ndim != code_book.ndim:
raise ValueError("k array doesn't match data rank")
nc = len(code_book)
if data.ndim > 1 and code_book.shape[1] != d:
raise ValueError("k array doesn't match data dimension")
else:
nc = int(k)
if nc < 1:
raise ValueError("Cannot ask kmeans2 for %d clusters"
" (k was %s)" % (nc, k))
elif nc != k:
warnings.warn("k was not an integer, was converted.")
try:
init_meth = _valid_init_meth[minit]
except KeyError as e:
raise ValueError("Unknown init method %r" % (minit,)) from e
else:
rng = check_random_state(seed)
code_book = init_meth(data, k, rng)
for i in range(iter):
# Compute the nearest neighbor for each obs using the current code book
label = vq(data, code_book)[0]
# Update the code book by computing centroids
new_code_book, has_members = _vq.update_cluster_means(data, label, nc)
if not has_members.all():
miss_meth()
# Set the empty clusters to their previous positions
new_code_book[~has_members] = code_book[~has_members]
code_book = new_code_book
return code_book, label
| def kmeans2(data, k, iter=10, thresh=1e-5, minit='random',
missing='warn', check_finite=True, *, seed=None):
"""
Classify a set of observations into k clusters using the k-means algorithm.
The algorithm attempts to minimize the Euclidean distance between
observations and centroids. Several initialization methods are
included.
Parameters
----------
data : ndarray
A 'M' by 'N' array of 'M' observations in 'N' dimensions or a length
'M' array of 'M' 1-D observations.
k : int or ndarray
The number of clusters to form as well as the number of
centroids to generate. If `minit` initialization string is
'matrix', or if a ndarray is given instead, it is
interpreted as initial cluster to use instead.
iter : int, optional
Number of iterations of the k-means algorithm to run. Note
that this differs in meaning from the iters parameter to
the kmeans function.
thresh : float, optional
(not used yet)
minit : str, optional
Method for initialization. Available methods are 'random',
'points', '++' and 'matrix':
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
'points': choose k observations (rows) at random from data for
the initial centroids.
'++': choose k observations accordingly to the kmeans++ method
(careful seeding)
'matrix': interpret the k parameter as a k by M (or length k
array for 1-D data) array of initial centroids.
missing : str, optional
Method to deal with empty clusters. Available methods are
'warn' and 'raise':
'warn': give a warning and continue.
'raise': raise an ClusterError and terminate the algorithm.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
Seed for initializing the pseudo-random number generator.
If `seed` is None (or `numpy.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
The default is None.
Returns
-------
centroid : ndarray
A 'k' by 'N' array of centroids found at the last iteration of
k-means.
label : ndarray
label[i] is the code or index of the centroid the
ith observation is closest to.
See Also
--------
kmeans
References
----------
.. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of
careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium
on Discrete Algorithms, 2007.
Examples
--------
>>> from scipy.cluster.vq import kmeans2
>>> import matplotlib.pyplot as plt
Create z, an array with shape (100, 2) containing a mixture of samples
from three multivariate normal distributions.
>>> rng = np.random.default_rng()
>>> a = rng.multivariate_normal([0, 6], [[2, 1], [1, 1.5]], size=45)
>>> b = rng.multivariate_normal([2, 0], [[1, -1], [-1, 3]], size=30)
>>> c = rng.multivariate_normal([6, 4], [[5, 0], [0, 1.2]], size=25)
>>> z = np.concatenate((a, b, c))
>>> rng.shuffle(z)
Compute three clusters.
>>> centroid, label = kmeans2(z, 3, minit='points')
>>> centroid
array([[ 2.22274463, -0.61666946], # may vary
[ 0.54069047, 5.86541444],
[ 6.73846769, 4.01991898]])
How many points are in each cluster?
>>> counts = np.bincount(label)
>>> counts
array([29, 51, 20]) # may vary
Plot the clusters.
>>> w0 = z[label == 0]
>>> w1 = z[label == 1]
>>> w2 = z[label == 2]
>>> plt.plot(w0[:, 0], w0[:, 1], 'o', alpha=0.5, label='cluster 0')
>>> plt.plot(w1[:, 0], w1[:, 1], 'd', alpha=0.5, label='cluster 1')
>>> plt.plot(w2[:, 0], w2[:, 1], 's', alpha=0.5, label='cluster 2')
>>> plt.plot(centroid[:, 0], centroid[:, 1], 'k*', label='centroids')
>>> plt.axis('equal')
>>> plt.legend(shadow=True)
>>> plt.show()
"""
if int(iter) < 1:
raise ValueError("Invalid iter (%s), "
"must be a positive integer." % iter)
try:
miss_meth = _valid_miss_meth[missing]
except KeyError as e:
raise ValueError("Unknown missing method %r" % (missing,)) from e
data = _asarray_validated(data, check_finite=check_finite)
if data.ndim == 1:
d = 1
elif data.ndim == 2:
d = data.shape[1]
else:
raise ValueError("Input of rank > 2 is not supported.")
if data.size < 1:
raise ValueError("Empty input is not supported.")
# If k is not a single value, it should be compatible with data's shape
if minit == 'matrix' or not np.isscalar(k):
code_book = np.array(k, copy=True)
if data.ndim != code_book.ndim:
raise ValueError("k array doesn't match data rank")
nc = len(code_book)
if data.ndim > 1 and code_book.shape[1] != d:
raise ValueError("k array doesn't match data dimension")
else:
nc = int(k)
if nc < 1:
raise ValueError("Cannot ask kmeans2 for %d clusters"
" (k was %s)" % (nc, k))
elif nc != k:
warnings.warn("k was not an integer, was converted.")
try:
init_meth = _valid_init_meth[minit]
except KeyError as e:
raise ValueError("Unknown init method %r" % (minit,)) from e
else:
rng = check_random_state(seed)
code_book = init_meth(data, k, rng)
for i in range(iter):
# Compute the nearest neighbor for each obs using the current code book
label = vq(data, code_book)[0]
# Update the code book by computing centroids
new_code_book, has_members = _vq.update_cluster_means(data, label, nc)
if not has_members.all():
miss_meth()
# Set the empty clusters to their previous positions
new_code_book[~has_members] = code_book[~has_members]
code_book = new_code_book
return code_book, label
|
39,532 | def split_markdown_front_matter(lines: str) -> Tuple[str, str]:
r"""
Split text into markdown front matter and the markdown body
Return ("", text) for text with non existing front matter
>>> text='''---
... title: DUMMY-SECURITY-2019-001
... description: Incorrect access control.
... cves: [CVE-2042-1337]
... ---
... # Markdown starts here
... '''
>>> split_markdown_front_matter(text)
('title: DUMMY-SECURITY-2019-001\ndescription: Incorrect access control.\ncves: [CVE-2042-1337]', '# Markdown starts here\n')
"""
fmlines = []
mdlines = []
splitter = mdlines
lines = lines.replace("\r\n", "\n")
for index, line in enumerate(lines.split("\n")):
if index == 0 and line.strip().startswith("---"):
splitter = fmlines
elif line.strip().startswith("---"):
splitter = mdlines
else:
splitter.append(line)
return "\n".join(fmlines), "\n".join(mdlines)
| def split_markdown_front_matter(lines: str) -> Tuple[str, str]:
"""
Split text into markdown front matter and the markdown body
Return ("", text) for text with non existing front matter
>>> text='''---
... title: DUMMY-SECURITY-2019-001
... description: Incorrect access control.
... cves: [CVE-2042-1337]
... ---
... # Markdown starts here
... '''
>>> split_markdown_front_matter(text)
('title: DUMMY-SECURITY-2019-001\ndescription: Incorrect access control.\ncves: [CVE-2042-1337]', '# Markdown starts here\n')
"""
fmlines = []
mdlines = []
splitter = mdlines
lines = lines.replace("\r\n", "\n")
for index, line in enumerate(lines.split("\n")):
if index == 0 and line.strip().startswith("---"):
splitter = fmlines
elif line.strip().startswith("---"):
splitter = mdlines
else:
splitter.append(line)
return "\n".join(fmlines), "\n".join(mdlines)
|
45,704 | def forecast(
vil,
velocity,
timesteps,
rainrate=None,
n_cascade_levels=8,
extrap_method="semilagrangian",
ar_order=2,
ar_window_radius=50,
r_vil_window_radius=3,
fft_method="numpy",
apply_rainrate_mask=True,
num_workers=1,
extrap_kwargs=None,
filter_kwargs=None,
measure_time=False,
):
"""Generate a nowcast by using the autoregressive nowcasting using VIL
(ANVIL) method. ANVIL is built on top of an extrapolation-based nowcast.
The key features are:
1) Growth and decay: implemented by using a cascade decomposition and
a multiscale autoregressive integrated ARI(p,1) model. Instead of the
original time series, the ARI model is applied to the differenced one
corresponding to time derivatives.
2) Originally designed for using integrated liquid (VIL) as the input data.
In this case, the rain rate (R) is obtained from VIL via an empirical
relation. This implementation is more general so that the input can be
any two-dimensional precipitation field.
3) The parameters of the ARI model and the R(VIL) relation are allowed to
be spatially variable. The estimation is done using a moving window.
Parameters
----------
vil: array_like
Array of shape (ar_order+2,m,n) containing the input fields ordered by
timestamp from oldest to newest. The inputs are expected to contain VIL
or rain rate. The time steps between the inputs are assumed to be regular.
velocity: array_like
Array of shape (2,m,n) containing the x- and y-components of the
advection field. The velocities are assumed to represent one time step
between the inputs. All values are required to be finite.
timesteps: int or list
Number of time steps to forecast or a list of time steps for which the
forecasts are computed (relative to the input time step). The elements
of the list are required to be in ascending order.
rainrate: array_like
Array of shape (m,n) containing the most recently observed rain rate
field. If set to None, no R(VIL) conversion is done and the outputs
are in the same units as the inputs.
n_cascade_levels: int, optional
The number of cascade levels to use.
extrap_method: str, optional
Name of the extrapolation method to use. See the documentation of
pysteps.extrapolation.interface.
ar_order: int, optional
The order of the autoregressive model to use. The recommended values
are 1 or 2. Using a higher-order model is strongly discouraged because
the stationarity of the AR process cannot be guaranteed.
ar_window_radius: int, optional
The radius of the window to use for determining the parameters of the
autoregressive model. Set to None to disable localization.
r_vil_window_radius: int, optional
The radius of the window to use for determining the R(VIL) relation.
Applicable if rainrate is not None.
fft_method: str, optional
A string defining the FFT method to use (see utils.fft.get_method).
Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed,
the recommended method is 'pyfftw'.
apply_rainrate_mask: bool
Apply mask to prevent producing precipitation to areas where it was not
originally observed. Defaults to True. Disabling this may improve some
verification metrics but increases the number of false alarms. Applicable
if rainrate is None.
num_workers: int, optional
The number of workers to use for parallel computation. Applicable if
dask is installed or pyFFTW is used for computing the FFT.
When num_workers>1, it is advisable to disable OpenMP by setting
the environment variable OMP_NUM_THREADS to 1.
This avoids slowdown caused by too many simultaneous threads.
extrap_kwargs: dict, optional
Optional dictionary containing keyword arguments for the extrapolation
method. See the documentation of pysteps.extrapolation.
filter_kwargs: dict, optional
Optional dictionary containing keyword arguments for the filter method.
See the documentation of pysteps.cascade.bandpass_filters.py.
measure_time: bool, optional
If True, measure, print and return the computation time.
Returns
-------
out: ndarray
A three-dimensional array of shape (num_timesteps,m,n) containing a time
series of forecast precipitation fields. The time series starts from
t0+timestep, where timestep is taken from the input VIL/rain rate
fields. If measure_time is True, the return value is a three-element
tuple containing the nowcast array, the initialization time of the
nowcast generator and the time used in the main loop (seconds).
References
----------
:cite:`PCLH2020`
"""
_check_inputs(vil, rainrate, velocity, timesteps, ar_order)
if extrap_kwargs is None:
extrap_kwargs = dict()
else:
extrap_kwargs = extrap_kwargs.copy()
if filter_kwargs is None:
filter_kwargs = dict()
print("Computing ANVIL nowcast:")
print("------------------------")
print("")
print("Inputs:")
print("-------")
print("input dimensions: %dx%d" % (vil.shape[1], vil.shape[2]))
print("")
print("Methods:")
print("--------")
print("extrapolation: %s" % extrap_method)
print("FFT: %s" % fft_method)
print("")
print("Parameters:")
print("-----------")
if isinstance(timesteps, int):
print("number of time steps: %d" % timesteps)
else:
print("time steps: %s" % timesteps)
print("parallel threads: %d" % num_workers)
print("number of cascade levels: %d" % n_cascade_levels)
print("order of the ARI(p,1) model: %d" % ar_order)
if type(ar_window_radius) == int:
print("ARI(p,1) window radius: %d" % ar_window_radius)
else:
print("ARI(p,1) window radius: none")
print("R(VIL) window radius: %d" % r_vil_window_radius)
if measure_time:
starttime_init = time.time()
m, n = vil.shape[1:]
vil = vil.copy()
if rainrate is None and apply_rainrate_mask:
rainrate_mask = vil[-1, :] < 0.1
if rainrate is not None:
# determine the coefficients fields of the relation R=a*VIL+b by
# localized linear regression
r_vil_a, r_vil_b = _r_vil_regression(vil[-1, :], rainrate, r_vil_window_radius)
# transform the input fields to Lagrangian coordinates by extrapolation
extrapolator = extrapolation.get_method(extrap_method)
res = list()
def worker(vil, i):
return (
i,
extrapolator(
vil[i, :],
velocity,
vil.shape[0] - 1 - i,
allow_nonfinite_values=True,
**extrap_kwargs,
)[-1],
)
for i in range(vil.shape[0] - 1):
if not DASK_IMPORTED or num_workers == 1:
vil[i, :, :] = worker(vil, i)[1]
else:
res.append(dask.delayed(worker)(vil, i))
if DASK_IMPORTED and num_workers > 1:
num_workers_ = len(res) if num_workers > len(res) else num_workers
vil_e = dask.compute(*res, num_workers=num_workers_)
for i in range(len(vil_e)):
vil[vil_e[i][0], :] = vil_e[i][1]
# compute the final mask as the intersection of the masks of the advected
# fields
mask = np.isfinite(vil[0, :])
for i in range(1, vil.shape[0]):
mask = np.logical_and(mask, np.isfinite(vil[i, :]))
if rainrate is None and apply_rainrate_mask:
rainrate_mask = np.logical_and(rainrate_mask, mask)
# apply cascade decomposition to the advected input fields
bp_filter_method = cascade.get_method("gaussian")
bp_filter = bp_filter_method((m, n), n_cascade_levels, **filter_kwargs)
fft = utils.get_method(fft_method, shape=vil.shape[1:], n_threads=num_workers)
decomp_method, recomp_method = cascade.get_method("fft")
vil_dec = np.empty((n_cascade_levels, vil.shape[0], m, n))
for i in range(vil.shape[0]):
vil_ = vil[i, :].copy()
vil_[~np.isfinite(vil_)] = 0.0
vil_dec_i = decomp_method(vil_, bp_filter, fft_method=fft)
for j in range(n_cascade_levels):
vil_dec[j, i, :] = vil_dec_i["cascade_levels"][j, :]
# compute time-lagged correlation coefficients for the cascade levels of
# the advected and differenced input fields
gamma = np.empty((n_cascade_levels, ar_order, m, n))
for i in range(n_cascade_levels):
vil_diff = np.diff(vil_dec[i, :], axis=0)
vil_diff[~np.isfinite(vil_diff)] = 0.0
for j in range(ar_order):
gamma[i, j, :] = _moving_window_corrcoef(
vil_diff[-1, :], vil_diff[-(j + 2), :], ar_window_radius
)
if ar_order == 2:
# if the order of the ARI model is 2, adjust the correlation coefficients
# so that the resulting process is stationary
for i in range(n_cascade_levels):
gamma[i, 1, :] = autoregression.adjust_lag2_corrcoef2(
gamma[i, 0, :], gamma[i, 1, :]
)
# estimate the parameters of the ARI models
phi = []
for i in range(n_cascade_levels):
if ar_order > 2:
phi_ = autoregression.estimate_ar_params_yw_localized(gamma[i, :], d=1)
elif ar_order == 2:
phi_ = _estimate_ar2_params(gamma[i, :])
else:
phi_ = _estimate_ar1_params(gamma[i, :])
phi.append(phi_)
vil_dec = vil_dec[:, -(ar_order + 1) :, :]
if measure_time:
init_time = time.time() - starttime_init
print("Starting nowcast computation.")
if measure_time:
starttime_mainloop = time.time()
r_f = []
if isinstance(timesteps, int):
timesteps = range(timesteps + 1)
timestep_type = "int"
else:
original_timesteps = [0] + list(timesteps)
timesteps = nowcast_utils.binned_timesteps(original_timesteps)
timestep_type = "list"
if rainrate is not None:
r_f_prev = r_vil_a * vil[-1, :] + r_vil_b
else:
r_f_prev = vil[-1, :]
extrap_kwargs["return_displacement"] = True
dp = None
t_nowcast = 0
t_prev = 0.0
for t in range(len(timesteps)):
if timestep_type == "list":
subtimesteps = [original_timesteps[t_] for t_ in timesteps[t]]
else:
subtimesteps = [t]
if len(subtimesteps) > 1 or t > 0:
nowcast_time_step = True
else:
nowcast_time_step = False
if nowcast_time_step:
print(
"Computing nowcast for time step %d... " % (t_nowcast + 1),
end="",
flush=True,
)
t_nowcast += 1
if measure_time:
starttime = time.time()
# iterate the ARI models for each cascade level
for i in range(n_cascade_levels):
vil_dec[i, :] = autoregression.iterate_ar_model(vil_dec[i, :], phi[i])
# recompose the cascade to obtain the forecast field
vil_dec_dict = {}
vil_dec_dict["cascade_levels"] = vil_dec[:, -1, :]
vil_dec_dict["domain"] = "spatial"
vil_dec_dict["normalized"] = False
vil_f = recomp_method(vil_dec_dict)
vil_f[~mask] = np.nan
if rainrate is not None:
# convert VIL to rain rate
r_f_new = r_vil_a * vil_f + r_vil_b
else:
r_f_new = vil_f
if apply_rainrate_mask:
r_f_new[rainrate_mask] = 0.0
r_f_new[r_f_new < 0.0] = 0.0
# advect the recomposed field to obtain the forecast for the current
# time step (or subtimesteps if non-integer time steps are given)
for t_sub in subtimesteps:
if t_sub > 0:
t_diff_prev_int = t_sub - int(t_sub)
if t_diff_prev_int > 0.0:
r_f_ip = (
1.0 - t_diff_prev_int
) * r_f_prev + t_diff_prev_int * r_f_new
else:
r_f_ip = r_f_prev
t_diff_prev = t_sub - t_prev
extrap_kwargs["displacement_prev"] = dp
r_f_ep, dp = extrapolator(
r_f_ip,
velocity,
[t_diff_prev],
allow_nonfinite_values=True,
**extrap_kwargs,
)
r_f.append(r_f_ep[0])
t_prev = t_sub
# advect the forecast field by one time step if no subtimesteps in the
# current interval were found
if len(subtimesteps) == 0:
t_diff_prev = t + 1 - t_prev
extrap_kwargs["displacement_prev"] = dp
_, dp = extrapolator(
None,
velocity,
[t_diff_prev],
allow_nonfinite_values=True,
**extrap_kwargs,
)
t_prev = t + 1
r_f_prev = r_f_new
if nowcast_time_step:
if measure_time:
print("%.2f seconds." % (time.time() - starttime))
else:
print("done.")
if measure_time:
mainloop_time = time.time() - starttime_mainloop
if measure_time:
return np.stack(r_f), init_time, mainloop_time
else:
return np.stack(r_f)
| def forecast(
vil,
velocity,
timesteps,
rainrate=None,
n_cascade_levels=8,
extrap_method="semilagrangian",
ar_order=2,
ar_window_radius=50,
r_vil_window_radius=3,
fft_method="numpy",
apply_rainrate_mask=True,
num_workers=1,
extrap_kwargs=None,
filter_kwargs=None,
measure_time=False,
):
"""Generate a nowcast by using the autoregressive nowcasting using VIL
(ANVIL) method. ANVIL is built on top of an extrapolation-based nowcast.
The key features are:
1) Growth and decay: implemented by using a cascade decomposition and
a multiscale autoregressive integrated ARI(p,1) model. Instead of the
original time series, the ARI model is applied to the differenced one
corresponding to time derivatives.
2) Originally designed for using integrated liquid (VIL) as the input data.
In this case, the rain rate (R) is obtained from VIL via an empirical
relation. This implementation is more general so that the input can be
any two-dimensional precipitation field.
3) The parameters of the ARI model and the R(VIL) relation are allowed to
be spatially variable. The estimation is done using a moving window.
Parameters
----------
vil: array_like
Array of shape (ar_order+2,m,n) containing the input fields ordered by
timestamp from oldest to newest. The inputs are expected to contain VIL
or rain rate. The time steps between the inputs are assumed to be regular.
velocity: array_like
Array of shape (2,m,n) containing the x- and y-components of the
advection field. The velocities are assumed to represent one time step
between the inputs. All values are required to be finite.
timesteps: int or list
Number of time steps to forecast or a list of time steps for which the
forecasts are computed (relative to the input time step). The elements
of the list are required to be in ascending order.
rainrate: array_like
Array of shape (m,n) containing the most recently observed rain rate
field. If set to None, no R(VIL) conversion is done and the outputs
are in the same units as the inputs.
n_cascade_levels: int, optional
The number of cascade levels to use.
extrap_method: str, optional
Name of the extrapolation method to use. See the documentation of
pysteps.extrapolation.interface.
ar_order: int, optional
The order of the autoregressive model to use. The recommended values
are 1 or 2. Using a higher-order model is strongly discouraged because
the stationarity of the AR process cannot be guaranteed.
ar_window_radius: int, optional
The radius of the window to use for determining the parameters of the
autoregressive model. Set to None to disable localization.
r_vil_window_radius: int, optional
The radius of the window to use for determining the R(VIL) relation.
Applicable if rainrate is not None.
fft_method: str, optional
A string defining the FFT method to use (see utils.fft.get_method).
Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed,
the recommended method is 'pyfftw'.
apply_rainrate_mask: bool
Apply mask to prevent producing precipitation to areas where it was not
originally observed. Defaults to True. Disabling this may improve some
verification metrics but increases the number of false alarms. Applicable
if rainrate is None.
num_workers: int, optional
The number of workers to use for parallel computation. Applicable if
dask is installed or pyFFTW is used for computing the FFT.
When num_workers>1, it is advisable to disable OpenMP by setting
the environment variable OMP_NUM_THREADS to 1.
This avoids slowdown caused by too many simultaneous threads.
extrap_kwargs: dict, optional
Optional dictionary containing keyword arguments for the extrapolation
method. See the documentation of pysteps.extrapolation.
filter_kwargs: dict, optional
Optional dictionary containing keyword arguments for the filter method.
See the documentation of pysteps.cascade.bandpass_filters.py.
measure_time: bool, optional
If True, measure, print and return the computation time.
Returns
-------
out: ndarray
A three-dimensional array of shape (num_timesteps,m,n) containing a time
series of forecast precipitation fields. The time series starts from
t0+timestep, where timestep is taken from the input VIL/rain rate
fields. If measure_time is True, the return value is a three-element
tuple containing the nowcast array, the initialization time of the
nowcast generator and the time used in the main loop (seconds).
References
----------
:cite:`PCLH2020`
"""
_check_inputs(vil, rainrate, velocity, timesteps, ar_order)
if extrap_kwargs is None:
extrap_kwargs = dict()
else:
extrap_kwargs = extrap_kwargs.copy()
if filter_kwargs is None:
filter_kwargs = dict()
print("Computing ANVIL nowcast:")
print("------------------------")
print("")
print("Inputs:")
print("-------")
print("input dimensions: %dx%d" % (vil.shape[1], vil.shape[2]))
print("")
print("Methods:")
print("--------")
print("extrapolation: %s" % extrap_method)
print("FFT: %s" % fft_method)
print("")
print("Parameters:")
print("-----------")
if isinstance(timesteps, int):
print("number of time steps: %d" % timesteps)
else:
print("time steps: %s" % timesteps)
print("parallel threads: %d" % num_workers)
print("number of cascade levels: %d" % n_cascade_levels)
print("order of the ARI(p,1) model: %d" % ar_order)
if type(ar_window_radius) == int:
print("ARI(p,1) window radius: %d" % ar_window_radius)
else:
print("ARI(p,1) window radius: none")
print("R(VIL) window radius: %d" % r_vil_window_radius)
if measure_time:
starttime_init = time.time()
m, n = vil.shape[1:]
vil = vil.copy()
if rainrate is None and apply_rainrate_mask:
rainrate_mask = vil[-1, :] < 0.1
if rainrate is not None:
# determine the coefficients fields of the relation R=a*VIL+b by
# localized linear regression
r_vil_a, r_vil_b = _r_vil_regression(vil[-1, :], rainrate, r_vil_window_radius)
# transform the input fields to Lagrangian coordinates by extrapolation
extrapolator = extrapolation.get_method(extrap_method)
res = list()
def worker(vil, i):
return (
i,
extrapolator(
vil[i, :],
velocity,
vil.shape[0] - 1 - i,
allow_nonfinite_values=True,
**extrap_kwargs,
)[-1],
)
for i in range(vil.shape[0] - 1):
if not DASK_IMPORTED or num_workers == 1:
vil[i, :, :] = worker(vil, i)[1]
else:
res.append(dask.delayed(worker)(vil, i))
if DASK_IMPORTED and num_workers > 1:
num_workers_ = len(res) if num_workers > len(res) else num_workers
vil_e = dask.compute(*res, num_workers=num_workers_)
for i in range(len(vil_e)):
vil[vil_e[i][0], :] = vil_e[i][1]
# compute the final mask as the intersection of the masks of the advected
# fields
mask = np.isfinite(vil[0, :])
for i in range(1, vil.shape[0]):
mask = np.logical_and(mask, np.isfinite(vil[i, :]))
if rainrate is None and apply_rainrate_mask:
rainrate_mask = np.logical_and(rainrate_mask, mask)
# apply cascade decomposition to the advected input fields
bp_filter_method = cascade.get_method("gaussian")
bp_filter = bp_filter_method((m, n), n_cascade_levels, **filter_kwargs)
fft = utils.get_method(fft_method, shape=vil.shape[1:], n_threads=num_workers)
decomp_method, recomp_method = cascade.get_method("fft")
vil_dec = np.empty((n_cascade_levels, vil.shape[0], m, n))
for i in range(vil.shape[0]):
vil_ = vil[i, :].copy()
vil_[~np.isfinite(vil_)] = 0.0
vil_dec_i = decomp_method(vil_, bp_filter, fft_method=fft)
for j in range(n_cascade_levels):
vil_dec[j, i, :] = vil_dec_i["cascade_levels"][j, :]
# compute time-lagged correlation coefficients for the cascade levels of
# the advected and differenced input fields
gamma = np.empty((n_cascade_levels, ar_order, m, n))
for i in range(n_cascade_levels):
vil_diff = np.diff(vil_dec[i, :], axis=0)
vil_diff[~np.isfinite(vil_diff)] = 0.0
for j in range(ar_order):
gamma[i, j, :] = _moving_window_corrcoef(
vil_diff[-1, :], vil_diff[-(j + 2), :], ar_window_radius
)
if ar_order == 2:
# if the order of the ARI model is 2, adjust the correlation coefficients
# so that the resulting process is stationary
for i in range(n_cascade_levels):
gamma[i, 1, :] = autoregression.adjust_lag2_corrcoef2(
gamma[i, 0, :], gamma[i, 1, :]
)
# estimate the parameters of the ARI models
phi = []
for i in range(n_cascade_levels):
if ar_order > 2:
phi_ = autoregression.estimate_ar_params_yw_localized(gamma[i, :], d=1)
elif ar_order == 2:
phi_ = _estimate_ar2_params(gamma[i, :])
else:
phi_ = _estimate_ar1_params(gamma[i, :])
phi.append(phi_)
vil_dec = vil_dec[:, -(ar_order + 1) :, :]
if measure_time:
init_time = time.time() - starttime_init
print("Starting nowcast computation.")
if measure_time:
starttime_mainloop = time.time()
r_f = []
if isinstance(timesteps, int):
timesteps = range(timesteps + 1)
timestep_type = "int"
else:
original_timesteps = [0] + list(timesteps)
timesteps = nowcast_utils.binned_timesteps(original_timesteps)
timestep_type = "list"
if rainrate is not None:
r_f_prev = r_vil_a * vil[-1, :] + r_vil_b
else:
r_f_prev = vil[-1, :]
extrap_kwargs["return_displacement"] = True
dp = None
t_nowcast = 0
t_prev = 0.0
for t in range(len(timesteps)):
if timestep_type == "list":
subtimesteps = [original_timesteps[t_] for t_ in timesteps[t]]
else:
subtimesteps = [t]
if subtimesteps and t > 0:
nowcast_time_step = True
else:
nowcast_time_step = False
if nowcast_time_step:
print(
"Computing nowcast for time step %d... " % (t_nowcast + 1),
end="",
flush=True,
)
t_nowcast += 1
if measure_time:
starttime = time.time()
# iterate the ARI models for each cascade level
for i in range(n_cascade_levels):
vil_dec[i, :] = autoregression.iterate_ar_model(vil_dec[i, :], phi[i])
# recompose the cascade to obtain the forecast field
vil_dec_dict = {}
vil_dec_dict["cascade_levels"] = vil_dec[:, -1, :]
vil_dec_dict["domain"] = "spatial"
vil_dec_dict["normalized"] = False
vil_f = recomp_method(vil_dec_dict)
vil_f[~mask] = np.nan
if rainrate is not None:
# convert VIL to rain rate
r_f_new = r_vil_a * vil_f + r_vil_b
else:
r_f_new = vil_f
if apply_rainrate_mask:
r_f_new[rainrate_mask] = 0.0
r_f_new[r_f_new < 0.0] = 0.0
# advect the recomposed field to obtain the forecast for the current
# time step (or subtimesteps if non-integer time steps are given)
for t_sub in subtimesteps:
if t_sub > 0:
t_diff_prev_int = t_sub - int(t_sub)
if t_diff_prev_int > 0.0:
r_f_ip = (
1.0 - t_diff_prev_int
) * r_f_prev + t_diff_prev_int * r_f_new
else:
r_f_ip = r_f_prev
t_diff_prev = t_sub - t_prev
extrap_kwargs["displacement_prev"] = dp
r_f_ep, dp = extrapolator(
r_f_ip,
velocity,
[t_diff_prev],
allow_nonfinite_values=True,
**extrap_kwargs,
)
r_f.append(r_f_ep[0])
t_prev = t_sub
# advect the forecast field by one time step if no subtimesteps in the
# current interval were found
if len(subtimesteps) == 0:
t_diff_prev = t + 1 - t_prev
extrap_kwargs["displacement_prev"] = dp
_, dp = extrapolator(
None,
velocity,
[t_diff_prev],
allow_nonfinite_values=True,
**extrap_kwargs,
)
t_prev = t + 1
r_f_prev = r_f_new
if nowcast_time_step:
if measure_time:
print("%.2f seconds." % (time.time() - starttime))
else:
print("done.")
if measure_time:
mainloop_time = time.time() - starttime_mainloop
if measure_time:
return np.stack(r_f), init_time, mainloop_time
else:
return np.stack(r_f)
|
5,011 | def _get_packed_offsets(wd_list, total, sep, mode="fixed"):
r"""
Horizontally pack boxes specified by their ``(width, xdescent)`` pair.
(For simplicity of the description, the terminology used here assumes a
horizontal layout, but the function works equally for a vertical layout.)
*xdescent* is analogous to the usual descent, but along the x-direction; it
is currently ignored.
There are three packing *mode*\s:
- 'fixed': The elements are packed tight to the left with a spacing of
*sep* in between. If *total* is *None* the returned total will be the
right edge of the last box. A non-*None* total will be passed unchecked
to the output. In particular this means that right edge of the last
box may be further to the right than the returned total.
- 'expand': Distribute the boxes with equal spacing so that the left edge
of the first box is at 0, and the right edge of the last box is at
*total*. The parameter *sep* is ignored in this mode. A total of *None*
is accepted and considered equal to 1. The total is returned unchanged
(except for the conversion *None* to 1). If the total is smaller than
the sum of the widths, the laid out boxes will overlap.
- 'equal': If *total* is given, the total space is divided in N equal
ranges and each box is left-aligned within its subspace.
Otherwise (*total* is *None*), *sep* must be provided and each box is
left-aligned in its subspace of width ``(max(widths) + sep)``. The
total width is then calculated to be ``N * (max(widths) + sep)``.
Parameters
----------
wd_list : list of (float, float)
(width, xdescent) of boxes to be packed.
total : float or None
Intended total length. *None* if not used.
sep : float
Spacing between boxes.
mode : {'fixed', 'expand', 'equal'}
The packing mode.
Returns
-------
total : float
The total width needed to accommodate the laid out boxes.
offsets : array of float
The left offsets of the boxes.
"""
w_list, d_list = zip(*wd_list) # d_list is currently not used.
_api.check_in_list(["fixed", "expand", "equal"], mode=mode)
if mode == "fixed":
offsets_ = np.cumsum([0] + [w + sep for w in w_list])
offsets = offsets_[:-1]
if total is None:
total = offsets_[-1] - sep
return total, offsets
elif mode == "expand":
# This is a bit of a hack to avoid a TypeError when *total*
# is None and used in conjugation with tight layout.
if total is None:
total = 1
if len(w_list) > 1:
sep = (total - sum(w_list)) / (len(w_list) - 1)
else:
sep = 0
offsets_ = np.cumsum([0] + [w + sep for w in w_list])
offsets = offsets_[:-1]
return total, offsets
elif mode == "equal":
maxh = max(w_list)
if total is None:
if sep is None:
raise ValueError("total and sep cannot both be None when "
"using layout mode 'equal'")
total = (maxh + sep) * len(w_list)
else:
sep = total / len(w_list) - maxh
offsets = (maxh + sep) * np.arange(len(w_list))
return total, offsets
| def _get_packed_offsets(wd_list, total, sep, mode="fixed"):
r"""
Distibute boxes within a given *total* space.
(For simplicity of the description, the terminology used here assumes a
horizontal layout, but the function works equally for a vertical layout.)
*xdescent* is analogous to the usual descent, but along the x-direction; it
is currently ignored.
There are three packing *mode*\s:
- 'fixed': The elements are packed tight to the left with a spacing of
*sep* in between. If *total* is *None* the returned total will be the
right edge of the last box. A non-*None* total will be passed unchecked
to the output. In particular this means that right edge of the last
box may be further to the right than the returned total.
- 'expand': Distribute the boxes with equal spacing so that the left edge
of the first box is at 0, and the right edge of the last box is at
*total*. The parameter *sep* is ignored in this mode. A total of *None*
is accepted and considered equal to 1. The total is returned unchanged
(except for the conversion *None* to 1). If the total is smaller than
the sum of the widths, the laid out boxes will overlap.
- 'equal': If *total* is given, the total space is divided in N equal
ranges and each box is left-aligned within its subspace.
Otherwise (*total* is *None*), *sep* must be provided and each box is
left-aligned in its subspace of width ``(max(widths) + sep)``. The
total width is then calculated to be ``N * (max(widths) + sep)``.
Parameters
----------
wd_list : list of (float, float)
(width, xdescent) of boxes to be packed.
total : float or None
Intended total length. *None* if not used.
sep : float
Spacing between boxes.
mode : {'fixed', 'expand', 'equal'}
The packing mode.
Returns
-------
total : float
The total width needed to accommodate the laid out boxes.
offsets : array of float
The left offsets of the boxes.
"""
w_list, d_list = zip(*wd_list) # d_list is currently not used.
_api.check_in_list(["fixed", "expand", "equal"], mode=mode)
if mode == "fixed":
offsets_ = np.cumsum([0] + [w + sep for w in w_list])
offsets = offsets_[:-1]
if total is None:
total = offsets_[-1] - sep
return total, offsets
elif mode == "expand":
# This is a bit of a hack to avoid a TypeError when *total*
# is None and used in conjugation with tight layout.
if total is None:
total = 1
if len(w_list) > 1:
sep = (total - sum(w_list)) / (len(w_list) - 1)
else:
sep = 0
offsets_ = np.cumsum([0] + [w + sep for w in w_list])
offsets = offsets_[:-1]
return total, offsets
elif mode == "equal":
maxh = max(w_list)
if total is None:
if sep is None:
raise ValueError("total and sep cannot both be None when "
"using layout mode 'equal'")
total = (maxh + sep) * len(w_list)
else:
sep = total / len(w_list) - maxh
offsets = (maxh + sep) * np.arange(len(w_list))
return total, offsets
|
28,599 | def plot_pair(
data,
group="posterior",
var_names: Optional[List[str]] = None,
filter_vars: Optional[str] = None,
coords=None,
marginals=False,
figsize=None,
textsize=None,
kind: Union[str, List[str]] = "scatter",
gridsize="auto",
contour: Optional[bool] = None,
plot_kwargs=None,
fill_last=False,
divergences=False,
colorbar=False,
labeller=None,
ax=None,
divergences_kwargs=None,
scatter_kwargs=None,
kde_kwargs=None,
hexbin_kwargs=None,
backend=None,
backend_kwargs=None,
marginal_kwargs=None,
point_estimate=None,
point_estimate_kwargs=None,
point_estimate_marker_kwargs=None,
reference_values=None,
reference_values_kwargs=None,
show=None,
):
"""
Plot a scatter, kde and/or hexbin matrix with (optional) marginals on the diagonal.
Parameters
----------
data: obj
Any object that can be converted to an :class:`az.InferenceData` object
refer to documentation of :func:`az.convert_to_dataset` for details
group: str, optional
Specifies which InferenceData group should be plotted. Defaults to 'posterior'.
var_names: list of variable names, optional
Variables to be plotted, if None all variable are plotted. Prefix the
variables by ``~`` when you want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If ``None`` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
coords: mapping, optional
Coordinates of var_names to be plotted. Passed to `Dataset.sel`
marginals: bool, optional
If True pairplot will include marginal distributions for every variable
figsize: figure size tuple
If None, size is (8 + numvars, 8 + numvars)
textsize: int
Text size for labels. If None it will be autoscaled based on figsize.
kind : str or List[str]
Type of plot to display (scatter, kde and/or hexbin)
gridsize: int or (int, int), optional
Only works for kind=hexbin.
The number of hexagons in the x-direction. The corresponding number of hexagons in the
y-direction is chosen such that the hexagons are approximately regular.
Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons
in the x-direction and the y-direction.
contour : bool, optional, deprecated, Defaults to True.
If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.
**Note:** this default is implemented in the body of the code, not in argument processing.
fill_last : bool
If True fill the last contour of the 2D KDE plot. Defaults to True.
divergences: Boolean
If True divergences will be plotted in a different color, only if group is either 'prior'
or 'posterior'.
colorbar: bool
If True a colorbar will be included as part of the plot (Defaults to False).
Only works when kind=hexbin
labeller : labeller instance, optional
Class providing the method `make_label_vert` to generate the labels in the plot.
Read the :ref:`label_guide` for more details and usage examples.
ax: axes, optional
Matplotlib axes or bokeh figures.
divergences_kwargs: dicts, optional
Additional keywords passed to ``ax.scatter`` for divergences
scatter_kwargs:
Additional keywords passed to ``ax.plot`` when using scatter kind
kde_kwargs: dict, optional
Additional keywords passed to :func:`az.plot_kde` when using kde kind
hexbin_kwargs: dict, optional
Additional keywords passed to ``ax.hexbin`` when using hexbin kind
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used. For additional documentation
check the plotting method of the backend.
marginal_kwargs: dict, optional
Additional keywords passed to :func:`az.plot_dist`, modifying the marginal distributions
plotted in the diagonal.
point_estimate: str, optional
Select point estimate from 'mean', 'mode' or 'median'. The point estimate will be
plotted using a scatter marker and vertical/horizontal lines.
point_estimate_kwargs: dict, optional
Additional keywords passed to ``ax.vline``, ``ax.hline`` (matplotlib) or ``ax.square``, ``Span`` (bokeh)
point_estimate_marker_kwargs: dict, optional
Additional keywords passed to ax.scatter in point estimate plot. Not available in bokeh
reference_values: dict, optional
Reference values for the plotted variables. The Reference values will be plotted
using a scatter marker
reference_values_kwargs: dict, optional
Additional keywords passed to ``ax.plot`` or ``ax.circle`` in reference values plot
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
Examples
--------
KDE Pair Plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> centered = az.load_arviz_data('centered_eight')
>>> coords = {'school': ['Choate', 'Deerfield']}
>>> az.plot_pair(centered,
>>> var_names=['theta', 'mu', 'tau'],
>>> kind='kde',
>>> coords=coords,
>>> divergences=True,
>>> textsize=18)
Hexbin pair plot
.. plot::
:context: close-figs
>>> az.plot_pair(centered,
>>> var_names=['theta', 'mu'],
>>> coords=coords,
>>> textsize=18,
>>> kind='hexbin')
Pair plot showing divergences and select variables with regular expressions
.. plot::
:context: close-figs
>>> az.plot_pair(centered,
... var_names=['^t', 'mu'],
... filter_vars="regex",
... coords=coords,
... divergences=True,
... textsize=18)
"""
valid_kinds = ["scatter", "kde", "hexbin"]
kind_boolean: Union[bool, List[bool]]
if isinstance(kind, str):
kind_boolean = kind in valid_kinds
else:
kind_boolean = [kind[i] in valid_kinds for i in range(len(kind))]
if not np.all(kind_boolean):
raise ValueError((f"Plot type {kind} not recognized." "Plot type must be in {valid_kinds}"))
if fill_last or contour:
warnings.warn(
"fill_last and contour will be deprecated. Please use kde_kwargs",
UserWarning,
)
if plot_kwargs:
warnings.warn(
"plot_kwargs will be deprecated."
" Please use scatter_kwargs, kde_kwargs and/or hexbin_kwargs",
UserWarning,
)
if coords is None:
coords = {}
if labeller is None:
labeller = BaseLabeller()
# Get posterior draws and combine chains
dataset = convert_to_dataset(data, group=group)
var_names = _var_names(var_names, dataset, filter_vars)
plotters = list(
xarray_var_iter(get_coords(dataset, coords), var_names=var_names, combined=True)
)
flat_var_names = [
labeller.make_label_vert(var_name, sel, isel) for var_name, sel, isel, _ in plotters
]
divergent_data = None
diverging_mask = None
# Assigning divergence group based on group param
if group == "posterior":
divergent_group = "sample_stats"
elif group == "prior":
divergent_group = "sample_stats_prior"
else:
divergences = False
# Get diverging draws and combine chains
if divergences:
if hasattr(data, divergent_group) and hasattr(getattr(data, divergent_group), "diverging"):
divergent_data = convert_to_dataset(data, group=divergent_group)
_, diverging_mask = xarray_to_ndarray(
divergent_data, var_names=("diverging",), combined=True
)
diverging_mask = np.squeeze(diverging_mask)
else:
divergences = False
warnings.warn(
"Divergences data not found, plotting without divergences. "
"Make sure the sample method provides divergences data and "
"that it is present in the `diverging` field of `sample_stats` "
"or `sample_stats_prior` or set divergences=False",
UserWarning,
)
if gridsize == "auto":
gridsize = int(dataset.dims["draw"] ** 0.35)
numvars = len(flat_var_names)
if numvars < 2:
raise ValueError("Number of variables to be plotted must be 2 or greater.")
pairplot_kwargs = dict(
ax=ax,
plotters=plotters,
numvars=numvars,
figsize=figsize,
textsize=textsize,
kind=kind,
scatter_kwargs=scatter_kwargs,
kde_kwargs=kde_kwargs,
hexbin_kwargs=hexbin_kwargs,
gridsize=gridsize,
colorbar=colorbar,
divergences=divergences,
diverging_mask=diverging_mask,
divergences_kwargs=divergences_kwargs,
flat_var_names=flat_var_names,
backend_kwargs=backend_kwargs,
marginal_kwargs=marginal_kwargs,
show=show,
marginals=marginals,
point_estimate=point_estimate,
point_estimate_kwargs=point_estimate_kwargs,
point_estimate_marker_kwargs=point_estimate_marker_kwargs,
reference_values=reference_values,
reference_values_kwargs=reference_values_kwargs,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_pair", "pairplot", backend)
ax = plot(**pairplot_kwargs)
return ax
| def plot_pair(
data,
group="posterior",
var_names: Optional[List[str]] = None,
filter_vars: Optional[str] = None,
coords=None,
marginals=False,
figsize=None,
textsize=None,
kind: Union[str, List[str]] = "scatter",
gridsize="auto",
contour: Optional[bool] = None,
plot_kwargs=None,
fill_last=False,
divergences=False,
colorbar=False,
labeller=None,
ax=None,
divergences_kwargs=None,
scatter_kwargs=None,
kde_kwargs=None,
hexbin_kwargs=None,
backend=None,
backend_kwargs=None,
marginal_kwargs=None,
point_estimate=None,
point_estimate_kwargs=None,
point_estimate_marker_kwargs=None,
reference_values=None,
reference_values_kwargs=None,
show=None,
):
"""
Plot a scatter, kde and/or hexbin matrix with (optional) marginals on the diagonal.
Parameters
----------
data: obj
Any object that can be converted to an :class:`az.InferenceData` object
refer to documentation of :func:`az.convert_to_dataset` for details
group: str, optional
Specifies which InferenceData group should be plotted. Defaults to 'posterior'.
var_names: list of variable names, optional
Variables to be plotted, if None all variable are plotted. Prefix the
variables by ``~`` when you want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If ``None`` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
coords: mapping, optional
Coordinates of var_names to be plotted. Passed to `Dataset.sel`
marginals: bool, optional
If True pairplot will include marginal distributions for every variable
figsize: figure size tuple
If None, size is (8 + numvars, 8 + numvars)
textsize: int
Text size for labels. If None it will be autoscaled based on figsize.
kind : str or List[str]
Type of plot to display (scatter, kde and/or hexbin)
gridsize: int or (int, int), optional
Only works for kind=hexbin.
The number of hexagons in the x-direction. The corresponding number of hexagons in the
y-direction is chosen such that the hexagons are approximately regular.
Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons
in the x-direction and the y-direction.
contour : bool, optional, deprecated, Defaults to True.
If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.
**Note:** this default is implemented in the body of the code, not in argument processing.
fill_last : bool
If True fill the last contour of the 2D KDE plot. Defaults to True.
divergences: Boolean
If True divergences will be plotted in a different color, only if group is either 'prior'
or 'posterior'.
colorbar: bool
If True a colorbar will be included as part of the plot (Defaults to False).
Only works when kind=hexbin
labeller : labeller instance, optional
Class providing the method `make_label_vert` to generate the labels in the plot.
Read the :ref:`label_guide` for more details and usage examples.
ax: axes, optional
Matplotlib axes or bokeh figures.
divergences_kwargs: dicts, optional
Additional keywords passed to ``ax.scatter`` for divergences
scatter_kwargs:
Additional keywords passed to :meth:`matplotlib.axes.Axes.plot` when using scatter kind
kde_kwargs: dict, optional
Additional keywords passed to :func:`az.plot_kde` when using kde kind
hexbin_kwargs: dict, optional
Additional keywords passed to ``ax.hexbin`` when using hexbin kind
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used. For additional documentation
check the plotting method of the backend.
marginal_kwargs: dict, optional
Additional keywords passed to :func:`az.plot_dist`, modifying the marginal distributions
plotted in the diagonal.
point_estimate: str, optional
Select point estimate from 'mean', 'mode' or 'median'. The point estimate will be
plotted using a scatter marker and vertical/horizontal lines.
point_estimate_kwargs: dict, optional
Additional keywords passed to ``ax.vline``, ``ax.hline`` (matplotlib) or ``ax.square``, ``Span`` (bokeh)
point_estimate_marker_kwargs: dict, optional
Additional keywords passed to ax.scatter in point estimate plot. Not available in bokeh
reference_values: dict, optional
Reference values for the plotted variables. The Reference values will be plotted
using a scatter marker
reference_values_kwargs: dict, optional
Additional keywords passed to ``ax.plot`` or ``ax.circle`` in reference values plot
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
Examples
--------
KDE Pair Plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> centered = az.load_arviz_data('centered_eight')
>>> coords = {'school': ['Choate', 'Deerfield']}
>>> az.plot_pair(centered,
>>> var_names=['theta', 'mu', 'tau'],
>>> kind='kde',
>>> coords=coords,
>>> divergences=True,
>>> textsize=18)
Hexbin pair plot
.. plot::
:context: close-figs
>>> az.plot_pair(centered,
>>> var_names=['theta', 'mu'],
>>> coords=coords,
>>> textsize=18,
>>> kind='hexbin')
Pair plot showing divergences and select variables with regular expressions
.. plot::
:context: close-figs
>>> az.plot_pair(centered,
... var_names=['^t', 'mu'],
... filter_vars="regex",
... coords=coords,
... divergences=True,
... textsize=18)
"""
valid_kinds = ["scatter", "kde", "hexbin"]
kind_boolean: Union[bool, List[bool]]
if isinstance(kind, str):
kind_boolean = kind in valid_kinds
else:
kind_boolean = [kind[i] in valid_kinds for i in range(len(kind))]
if not np.all(kind_boolean):
raise ValueError((f"Plot type {kind} not recognized." "Plot type must be in {valid_kinds}"))
if fill_last or contour:
warnings.warn(
"fill_last and contour will be deprecated. Please use kde_kwargs",
UserWarning,
)
if plot_kwargs:
warnings.warn(
"plot_kwargs will be deprecated."
" Please use scatter_kwargs, kde_kwargs and/or hexbin_kwargs",
UserWarning,
)
if coords is None:
coords = {}
if labeller is None:
labeller = BaseLabeller()
# Get posterior draws and combine chains
dataset = convert_to_dataset(data, group=group)
var_names = _var_names(var_names, dataset, filter_vars)
plotters = list(
xarray_var_iter(get_coords(dataset, coords), var_names=var_names, combined=True)
)
flat_var_names = [
labeller.make_label_vert(var_name, sel, isel) for var_name, sel, isel, _ in plotters
]
divergent_data = None
diverging_mask = None
# Assigning divergence group based on group param
if group == "posterior":
divergent_group = "sample_stats"
elif group == "prior":
divergent_group = "sample_stats_prior"
else:
divergences = False
# Get diverging draws and combine chains
if divergences:
if hasattr(data, divergent_group) and hasattr(getattr(data, divergent_group), "diverging"):
divergent_data = convert_to_dataset(data, group=divergent_group)
_, diverging_mask = xarray_to_ndarray(
divergent_data, var_names=("diverging",), combined=True
)
diverging_mask = np.squeeze(diverging_mask)
else:
divergences = False
warnings.warn(
"Divergences data not found, plotting without divergences. "
"Make sure the sample method provides divergences data and "
"that it is present in the `diverging` field of `sample_stats` "
"or `sample_stats_prior` or set divergences=False",
UserWarning,
)
if gridsize == "auto":
gridsize = int(dataset.dims["draw"] ** 0.35)
numvars = len(flat_var_names)
if numvars < 2:
raise ValueError("Number of variables to be plotted must be 2 or greater.")
pairplot_kwargs = dict(
ax=ax,
plotters=plotters,
numvars=numvars,
figsize=figsize,
textsize=textsize,
kind=kind,
scatter_kwargs=scatter_kwargs,
kde_kwargs=kde_kwargs,
hexbin_kwargs=hexbin_kwargs,
gridsize=gridsize,
colorbar=colorbar,
divergences=divergences,
diverging_mask=diverging_mask,
divergences_kwargs=divergences_kwargs,
flat_var_names=flat_var_names,
backend_kwargs=backend_kwargs,
marginal_kwargs=marginal_kwargs,
show=show,
marginals=marginals,
point_estimate=point_estimate,
point_estimate_kwargs=point_estimate_kwargs,
point_estimate_marker_kwargs=point_estimate_marker_kwargs,
reference_values=reference_values,
reference_values_kwargs=reference_values_kwargs,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_pair", "pairplot", backend)
ax = plot(**pairplot_kwargs)
return ax
|
21,551 | def main(args, environ):
mode = args[1] if len(args) > 1 else "run"
desired_uid = int(environ.get("UID", "991"))
desired_gid = int(environ.get("GID", "991"))
synapse_worker = environ.get("SYNAPSE_WORKER", "synapse.app.homeserver")
if (desired_uid == os.getuid()) and (desired_gid == os.getgid()):
ownership = None
else:
ownership = "{}:{}".format(desired_uid, desired_gid)
if ownership is None:
log("Will not perform chmod/gosu as UserID already matches request")
# In generate mode, generate a configuration and missing keys, then exit
if mode == "generate":
return run_generate_config(environ, ownership)
if mode == "migrate_config":
# generate a config based on environment vars.
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
config_path = environ.get(
"SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml"
)
return generate_config_from_template(
config_dir, config_path, environ, ownership
)
if mode != "run":
error("Unknown execution mode '%s'" % (mode,))
args = args[2:]
if "-m" not in args:
args = ["-m", synapse_worker] + args
jemallocpath = f"/usr/lib/{platform.machine()}-linux-gnu/libjemalloc.so.2"
if os.path.isfile(jemallocpath):
environ["LD_PRELOAD"] = jemallocpath
else:
log(f"Could not find {jemallocpath}, will not use")
# if there are no config files passed to synapse, try adding the default file
if not any(p.startswith("--config-path") or p.startswith("-c") for p in args):
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
config_path = environ.get(
"SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml"
)
if not os.path.exists(config_path):
if "SYNAPSE_SERVER_NAME" in environ:
error(
"""\
Config file '%s' does not exist.
The synapse docker image no longer supports generating a config file on-the-fly
based on environment variables. You can migrate to a static config file by
running with 'migrate_config'. See the README for more details.
"""
% (config_path,)
)
error(
"Config file '%s' does not exist. You should either create a new "
"config file by running with the `generate` argument (and then edit "
"the resulting file before restarting) or specify the path to an "
"existing config file with the SYNAPSE_CONFIG_PATH variable."
% (config_path,)
)
args += ["--config-path", config_path]
log("Starting synapse with args " + " ".join(args))
args = ["python"] + args
if ownership is not None:
args = ["gosu", ownership] + args
os.execve("/usr/sbin/gosu", args, environ)
else:
os.execve("/usr/local/bin/python", args, environ)
| def main(args, environ):
mode = args[1] if len(args) > 1 else "run"
desired_uid = int(environ.get("UID", "991"))
desired_gid = int(environ.get("GID", "991"))
synapse_worker = environ.get("SYNAPSE_WORKER", "synapse.app.homeserver")
if (desired_uid == os.getuid()) and (desired_gid == os.getgid()):
ownership = None
else:
ownership = "{}:{}".format(desired_uid, desired_gid)
if ownership is None:
log("Will not perform chmod/gosu as UserID already matches request")
# In generate mode, generate a configuration and missing keys, then exit
if mode == "generate":
return run_generate_config(environ, ownership)
if mode == "migrate_config":
# generate a config based on environment vars.
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
config_path = environ.get(
"SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml"
)
return generate_config_from_template(
config_dir, config_path, environ, ownership
)
if mode != "run":
error("Unknown execution mode '%s'" % (mode,))
args = args[2:]
if "-m" not in args:
args = ["-m", synapse_worker] + args
jemallocpath = "/usr/lib/%s-linux-gnu/libjemalloc.so.2" % (platform.machine(),)
if os.path.isfile(jemallocpath):
environ["LD_PRELOAD"] = jemallocpath
else:
log(f"Could not find {jemallocpath}, will not use")
# if there are no config files passed to synapse, try adding the default file
if not any(p.startswith("--config-path") or p.startswith("-c") for p in args):
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
config_path = environ.get(
"SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml"
)
if not os.path.exists(config_path):
if "SYNAPSE_SERVER_NAME" in environ:
error(
"""\
Config file '%s' does not exist.
The synapse docker image no longer supports generating a config file on-the-fly
based on environment variables. You can migrate to a static config file by
running with 'migrate_config'. See the README for more details.
"""
% (config_path,)
)
error(
"Config file '%s' does not exist. You should either create a new "
"config file by running with the `generate` argument (and then edit "
"the resulting file before restarting) or specify the path to an "
"existing config file with the SYNAPSE_CONFIG_PATH variable."
% (config_path,)
)
args += ["--config-path", config_path]
log("Starting synapse with args " + " ".join(args))
args = ["python"] + args
if ownership is not None:
args = ["gosu", ownership] + args
os.execve("/usr/sbin/gosu", args, environ)
else:
os.execve("/usr/local/bin/python", args, environ)
|
12,200 | def configure_parser_run(sub_parsers):
help = "Run an executable in a conda environment. [Experimental]"
descr = help + dedent("""
Use '--' (double dash) to separate CLI flags for 'conda run' from CLI flags sent to
the process being launched.
Example usage:
$ conda create -y -n my-python-2-env python=2
$ conda run -n my-python-2-env python --version
""")
epilog = dedent("""
""")
p = sub_parsers.add_parser(
'run',
description=descr,
help=help,
epilog=epilog,
)
add_parser_prefix(p)
p.add_argument(
"-v", "--verbose",
action=NullCountAction,
help="Use once for info, twice for debug, three times for trace.",
dest="verbosity",
default=NULL,
)
p.add_argument(
"--dev",
action=NullCountAction,
help="Sets `CONDA_EXE` to `python -m conda`, assuming the CWD contains "
"the root of conda development sources. This is mainly for use "
"during tests where we test new conda source against old Python "
"versions.",
dest="dev",
default=NULL,
)
p.add_argument(
"--debug-wrapper-scripts",
action=NullCountAction,
help="When this is set, where implemented, the shell wrapper scripts"
"will echo to stderr a lot of debugging information.",
dest="debug_wrapper_scripts",
default=NULL,
)
p.add_argument(
"--cwd",
help="Current working directory for command to run in. Defaults to cwd",
default=os.getcwd()
)
p.add_argument(
"--no-capture-output",
help="Don't capture stdout/stdout",
action=NullCountAction,
default=NULL,
)
p.add_argument(
'executable_call',
nargs=REMAINDER,
help="Executable name, with additional arguments to be passed to the executable "
"on invocation.",
)
p.set_defaults(func='.main_run.execute')
| def configure_parser_run(sub_parsers):
help = "Run an executable in a conda environment. [Experimental]"
descr = help + dedent("""
Use '--' (double dash) to separate CLI flags for 'conda run' from CLI flags sent to
the process being launched.
Example usage:
$ conda create -y -n my-python-2-env python=2
$ conda run -n my-python-2-env python --version
""")
epilog = dedent("""
""")
p = sub_parsers.add_parser(
'run',
description=descr,
help=help,
epilog=epilog,
)
add_parser_prefix(p)
p.add_argument(
"-v", "--verbose",
action=NullCountAction,
help="Use once for info, twice for debug, three times for trace.",
dest="verbosity",
default=NULL,
)
p.add_argument(
"--dev",
action=NullCountAction,
help="Sets `CONDA_EXE` to `python -m conda`, assuming the CWD contains "
"the root of conda development sources. This is mainly for use "
"during tests where we test new conda source against old Python "
"versions.",
dest="dev",
default=NULL,
)
p.add_argument(
"--debug-wrapper-scripts",
action=NullCountAction,
help="When this is set, where implemented, the shell wrapper scripts"
"will echo to stderr a lot of debugging information.",
dest="debug_wrapper_scripts",
default=NULL,
)
p.add_argument(
"--cwd",
help="Current working directory for command to run in. Defaults to cwd",
default=os.getcwd()
)
p.add_argument(
"--no-capture-output",
help="Don't capture stdout/stderr",
action=NullCountAction,
default=NULL,
)
p.add_argument(
'executable_call',
nargs=REMAINDER,
help="Executable name, with additional arguments to be passed to the executable "
"on invocation.",
)
p.set_defaults(func='.main_run.execute')
|
31,029 | def main() -> None:
try:
api_key = demisto.params().get('apikey')
verify = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
headers = {
'Authorization': f'Bearer {api_key}'
}
client = Client(headers, verify, proxy)
if demisto.command() == 'CloudConvert-import':
return_results(import_command(client, demisto.args()))
elif demisto.command() == 'CloudConvert-convert':
return_results(convert_command(client, demisto.args()))
elif demisto.command() == 'CloudConvert-checkstatus':
return_results(check_status_command(client, demisto.args()))
elif demisto.command() == 'CloudConvert-export':
return_results(export_command(client, demisto.args()))
elif demisto.command() == 'test-module':
return_results(test_module(client))
except Exception as e:
err_msg = 'Task id not found or expired' if 'No query results for model' in str(e) else \
('No more conversion minutes for today for this user' if 'Payment Required' in str(e) else str(e))
return_error(f'Failed to execute {demisto.command()} command. Error: {err_msg}')
| def main() -> None:
try:
api_key = demisto.params().get('apikey')
verify = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
headers = {
'Authorization': f'Bearer {api_key}'
}
client = Client(headers, verify, proxy)
if demisto.command() == 'CloudConvert-import':
return_results(import_command(client, demisto.args()))
elif demisto.command() == 'CloudConvert-convert':
return_results(convert_command(client, demisto.args()))
elif demisto.command() == 'CloudConvert-checkstatus':
return_results(check_status_command(client, demisto.args()))
elif demisto.command() == 'CloudConvert-export':
return_results(export_command(client, demisto.args()))
elif demisto.command() == 'test-module':
return_results(test_module(client))
except Exception as e:
err_msg = 'Task id not found or expired' if 'No query results for model' in str(e) else \
('No more conversion minutes for today for this user' if 'Payment Required' in str(e) else str(e))
return_error(f'Failed to execute {demisto.command()} command. Error: {err_msg}', error=traceback.format_exc())
|
40,115 | def get_imphash(file_object):
'''
Generates and returns the md5 import hash of file_object.
The imports are sorted before the hex is generated so the order of imports
does not matter.
:param file_object: The FileObject of which the imphash shall be computed
'''
if _is_elf_file(file_object):
try:
with _suppress_stdout():
functions = normalize_lief_items(lief.parse(file_object.file_path).imported_functions)
return md5(','.join(sorted(functions)).encode()).hexdigest()
except Exception:
logging.error('Could not compute imphash for {}'.format(file_object.file_path), exc_info=True)
return None
| def get_imphash(file_object):
'''
Generates and returns the md5 hash of the imported functions of an ELF file represented by `file_object`.
The imports are sorted before the hex is generated so the order of imports
does not matter.
:param file_object: The FileObject of which the imphash shall be computed
'''
if _is_elf_file(file_object):
try:
with _suppress_stdout():
functions = normalize_lief_items(lief.parse(file_object.file_path).imported_functions)
return md5(','.join(sorted(functions)).encode()).hexdigest()
except Exception:
logging.error('Could not compute imphash for {}'.format(file_object.file_path), exc_info=True)
return None
|
20,491 | def remove_social_googleplus(env):
"""On v13, this field has been removed, but it was put on certain places on
the website, and they remain unaltered due to the noupdate=1 flag or being
a COW (created on write) view, so we directly remove that part from the
view if we find the exact HTML expected code that is on the definition.
This is done for avoiding side effects, and it means that if you have altered
somehow that part, you will need to remove it manually.
"""
for key, code in [
(
"website.footer_custom",
' <a t-if="website.social_googleplus" t-att-href="website.social_googleplus"'
' class="btn btn-sm btn-link" rel="publisher"><i class="fa fa-2x fa-google-plus-square"/></a>\n'
),
(
"website.footer_default",
' <a t-att-href="website.social_googleplus" t-if="website.social_googleplus" '
'rel="publisher"><i class="fa fa-google-plus-square"/></a>\n'
),
(
"website_blog.opt_blog_rc_follow_us",
' <a t-att-href="website.social_googleplus" t-if="website.social_googleplus" '
'aria-label="Google Plus" title="Google Plus"><i class="fa fa-google-plus-square"/></a>\n'
),
(
"website_mass_mailing.social_links",
' <t t-if="website.social_googleplus">\n'
' <a t-att-href="website.social_googleplus" style="margin-left:10px" '
'aria-label="Google Plus" title="Google Plus">\n'
' <span class="fa fa-google-plus"/>\n'
' </a>\n'
' </t>\n'
),
]:
views = env["ir.ui.view"].search([("key", "=", key)])
for view in views:
arch = view.arch.replace(code, "")
if arch != view.arch:
view.arch = arch
| def remove_social_googleplus(env):
"""On v13, this field has been removed, but it was put on certain places on
the website, and they remain unaltered due to the noupdate=1 flag or being
a COW (created on write) view, so we directly remove that part from the
view if we find the exact HTML expected code that is on the definition.
This is done for avoiding side effects, and it means that if you have altered
somehow that part, you will need to remove it manually.
"""
for key, code in [
(
"website.footer_custom",
' <a t-if="website.social_googleplus" t-att-href="website.social_googleplus"'
' class="btn btn-sm btn-link" rel="publisher"><i class="fa fa-2x fa-google-plus-square"/></a>\n'
),
(
"website.footer_default",
' <a t-att-href="website.social_googleplus" t-if="website.social_googleplus" '
'rel="publisher"><i class="fa fa-google-plus-square"/></a>\n'
),
(
"website_blog.opt_blog_rc_follow_us",
' <a t-att-href="website.social_googleplus" t-if="website.social_googleplus" '
'aria-label="Google Plus" title="Google Plus"><i class="fa fa-google-plus-square"/></a>\n'
),
(
"website_mass_mailing.social_links",
' <t t-if="website.social_googleplus">\n'
' <a t-att-href="website.social_googleplus" style="margin-left:10px" '
'aria-label="Google Plus" title="Google Plus">\n'
' <span class="fa fa-google-plus"/>\n'
' </a>\n'
' </t>\n'
),
]:
views = env["ir.ui.view"].search([("key", "=", key)])
for view in views:
arch = view.arch.replace(code, "")
if arch != view.arch:
view.arch = arch
|
6,495 | def add_data(employee_map, att_map, holiday_map, filters, default_holiday_list, leave_list=None):
record = []
emp_att_map = {}
for emp in employee_map:
emp_det = employee_map.get(emp)
if not emp_det or emp not in att_map:
continue
row = []
if filters.group_by:
row += [" "]
row += [emp, emp_det.employee_name]
emp_status_map = []
to_date = getdate(filters["to_date"])
from_date =getdate(filters["from_date"])
keys = get_days_columns(to_date, from_date, get_att_map_key=True)
status_map = get_attendance_status_abbr_map()
total_p = total_a = total_l = total_h = total_um= 0.0
for day in keys:
attendance_detail = att_map.get(emp).get(day)
emp_holiday_list = emp_det.holiday_list if emp_det.holiday_list else default_holiday_list
status = None
status = get_status(attendance_detail, holiday_map, emp_holiday_list, day)
leave_abbr_map = get_leave_type_abbr()
abbr = ""
if status:
abbr = get_abbr(status, status_map, leave_abbr_map, attendance_detail)
emp_status_map.append(abbr)
if filters.summarized_view:
count = get_totals(status, status_map, attendance_detail)
total_p += count[0]; total_l += count[1]; total_a += count[2]; total_h += count[3]; total_um += count[4]
if not filters.summarized_view:
row += emp_status_map
if filters.summarized_view:
row += [total_p, total_l, total_a, total_h, total_um]
conditions, filters = get_conditions(filters)
if not filters.get("employee"):
filters.update({"employee": emp})
conditions += " and employee = %(employee)s"
elif not filters.get("employee") == emp:
filters.update({"employee": emp})
if filters.summarized_view:
get_leave_type_and_total_leave_taken(row, conditions, filters, emp, leave_list=leave_list)
get_late_entry_and_earl_exit_count(row, conditions, filters)
emp_att_map[emp] = emp_status_map
record.append(row)
return record, emp_att_map
| def add_data(employee_map, att_map, holiday_map, filters, default_holiday_list, leave_list=None):
record = []
emp_att_map = {}
for emp in employee_map:
emp_det = employee_map.get(emp)
if not emp_det or emp not in att_map:
continue
row = []
if filters.group_by:
row += [" "]
row += [emp, emp_det.employee_name]
emp_status_map = []
to_date = getdate(filters["to_date"])
from_date =getdate(filters["from_date"])
keys = get_days_columns(to_date, from_date, get_att_map_key=True)
status_map = get_attendance_status_abbr_map()
total_p = total_a = total_l = total_h = total_um= 0.0
for day in keys:
attendance_detail = att_map.get(emp).get(day)
emp_holiday_list = emp_det.holiday_list if emp_det.holiday_list else default_holiday_list
status = get_status(attendance_detail, holiday_map, emp_holiday_list, day)
leave_abbr_map = get_leave_type_abbr()
abbr = ""
if status:
abbr = get_abbr(status, status_map, leave_abbr_map, attendance_detail)
emp_status_map.append(abbr)
if filters.summarized_view:
count = get_totals(status, status_map, attendance_detail)
total_p += count[0]; total_l += count[1]; total_a += count[2]; total_h += count[3]; total_um += count[4]
if not filters.summarized_view:
row += emp_status_map
if filters.summarized_view:
row += [total_p, total_l, total_a, total_h, total_um]
conditions, filters = get_conditions(filters)
if not filters.get("employee"):
filters.update({"employee": emp})
conditions += " and employee = %(employee)s"
elif not filters.get("employee") == emp:
filters.update({"employee": emp})
if filters.summarized_view:
get_leave_type_and_total_leave_taken(row, conditions, filters, emp, leave_list=leave_list)
get_late_entry_and_earl_exit_count(row, conditions, filters)
emp_att_map[emp] = emp_status_map
record.append(row)
return record, emp_att_map
|
26,940 | def get_id_collation_args():
"""Get SQLAlchemy args to use for COLLATION"""
collation = conf.get('core', 'sql_engine_collation_for_ids', fallback=None)
if collation:
return {'collation': collation}
else:
# Automatically use utf8mb3_general_ci collation for mysql
# This is backwards-compatible. All our IDS are ASCII anyway so even if
# we migrate from previously installed database with different collation and we end up mixture of
# COLLATIONS, it's not a problem whatsoever (and we keep it small enough so that our indexes
# for MYSQL will not exceed the maximum index size.
#
# See https://github.com/apache/airflow/pull/17603#issuecomment-901121618.
#
# We cannot use session/dialect as at this point we are trying to determine the right connection
# parameters, so we use the connection
conn = conf.get('core', 'sql_alchemy_conn', fallback='')
if conn.startswith('mysql'):
return {'collation': 'utf8mb3_general_ci'}
return {}
| def get_id_collation_args():
"""Get SQLAlchemy args to use for COLLATION"""
collation = conf.get('core', 'sql_engine_collation_for_ids', fallback=None)
if collation:
return {'collation': collation}
else:
# Automatically use utf8mb3_general_ci collation for mysql
# This is backwards-compatible. All our IDS are ASCII anyway so even if
# we migrate from previously installed database with different collation and we end up mixture of
# COLLATIONS, it's not a problem whatsoever (and we keep it small enough so that our indexes
# for MYSQL will not exceed the maximum index size.
#
# See https://github.com/apache/airflow/pull/17603#issuecomment-901121618.
#
# We cannot use session/dialect as at this point we are trying to determine the right connection
# parameters, so we use the connection
conn = conf.get('core', 'sql_alchemy_conn', fallback='')
if conn.startswith('mysql') or conn.startswith('mariadb'):
return {'collation': 'utf8mb3_general_ci'}
return {}
|
6,388 | def delete_appointments():
doc_list = frappe.get_list('Appointment',filters={'scheduled_time':datetime.datetime.min,'customer_phone_number':'8088'})
for doc in doc_list:
doc.delete()
| def delete_appointments():
doc_list = frappe.get_list('Appointment', filters={'scheduled_time': datetime.datetime.min, 'customer_phone_number': '8088'})
for doc in doc_list:
doc.delete()
|
41,238 | def transformer(cls_or_func: Any) -> Any:
"""Decorator to verify API and append logging functionality to transformer functions & classes.
A transformer is a callable that takes as inputs a cirq.AbstractCircuit and
cirq.TransformerContext, and returns another cirq.AbstractCircuit without
modifying the input circuit. A transformer could be a function, for example:
>>> @cirq.transformer
>>> def convert_to_cz(
>>> circuit: cirq.AbstractCircuit, context: cirq.TransformerContext
>>> ) -> cirq.Circuit:
>>> ...
Or it could be a class that implements `__call__` with the same API, for example:
>>> @cirq.transformer
>>> class ConvertToSqrtISwaps:
>>> def __init__(self):
>>> ...
>>> def __call__(
>>> self, circuit: cirq.Circuit, context: cirq.TransformerContext
>>> ) -> cirq.Circuit:
>>> ...
Args:
cls_or_func: The callable class or function to be decorated.
Returns:
Decorated class / function which includes additional logging boilerplate.
"""
if isinstance(cls_or_func, type):
cls = cls_or_func
method = cls.__call__
@functools.wraps(method)
def method_with_logging(self, circuit, context):
return _transform_and_log(
lambda circuit, context: method(self, circuit, context),
cls.__name__,
circuit,
context,
)
setattr(cls, '__call__', method_with_logging)
return cls
else:
assert callable(cls_or_func)
func = cls_or_func
@functools.wraps(func)
def func_with_logging(circuit, context):
return _transform_and_log(func, func.__name__, circuit, context)
return func_with_logging
| def transformer(cls_or_func: Any) -> Any:
"""Decorator to verify API and append logging functionality to transformer functions & classes.
A transformer is a callable that takes as inputs a cirq.AbstractCircuit and
`cirq.TransformerContext`, and returns another `cirq.AbstractCircuit` without
modifying the input circuit. A transformer could be a function, for example:
>>> @cirq.transformer
>>> def convert_to_cz(
>>> circuit: cirq.AbstractCircuit, context: cirq.TransformerContext
>>> ) -> cirq.Circuit:
>>> ...
Or it could be a class that implements `__call__` with the same API, for example:
>>> @cirq.transformer
>>> class ConvertToSqrtISwaps:
>>> def __init__(self):
>>> ...
>>> def __call__(
>>> self, circuit: cirq.Circuit, context: cirq.TransformerContext
>>> ) -> cirq.Circuit:
>>> ...
Args:
cls_or_func: The callable class or function to be decorated.
Returns:
Decorated class / function which includes additional logging boilerplate.
"""
if isinstance(cls_or_func, type):
cls = cls_or_func
method = cls.__call__
@functools.wraps(method)
def method_with_logging(self, circuit, context):
return _transform_and_log(
lambda circuit, context: method(self, circuit, context),
cls.__name__,
circuit,
context,
)
setattr(cls, '__call__', method_with_logging)
return cls
else:
assert callable(cls_or_func)
func = cls_or_func
@functools.wraps(func)
def func_with_logging(circuit, context):
return _transform_and_log(func, func.__name__, circuit, context)
return func_with_logging
|
6,975 | def validate_google_sheets_url(url):
if six.PY2:
from urlparse import urlparse
elif six.PY3:
from urllib.parse import urlparse
u = urlparse(url)
if u.scheme != "https" or u.netloc != "docs.google.com" or "/spreadsheets/" not in u.path:
frappe.throw(
_('"{0}" is not a valid Google Sheets URL').format(url),
title=_("Invalid URL"),
)
| def validate_google_sheets_url(url):
from six.moves.urllib.parse import urlparse
u = urlparse(url)
if u.scheme != "https" or u.netloc != "docs.google.com" or "/spreadsheets/" not in u.path:
frappe.throw(
_('"{0}" is not a valid Google Sheets URL').format(url),
title=_("Invalid URL"),
)
|
58,842 | def preconfigure_modules(compiler, settings):
"""Returns a list of modules buildable in given environment and settings.
For each module in MODULES list, this function checks if the module
can be built in the current environment and reports it.
Returns a list of module names available.
"""
nvcc_path = build.get_nvcc_path()
hipcc_path = build.get_hipcc_path()
summary = [
'',
'************************************************************',
'* CuPy Configuration Summary *',
'************************************************************',
'',
'Build Environment:',
' Include directories: {}'.format(str(settings['include_dirs'])),
' Library directories: {}'.format(str(settings['library_dirs'])),
' nvcc command : {}'.format(
nvcc_path if nvcc_path else '(not found)'),
' hipcc command : {}'.format(
hipcc_path if hipcc_path else '(not found)'),
'',
'Environment Variables:',
]
for key in ['CFLAGS', 'LDFLAGS', 'LIBRARY_PATH',
'CUDA_PATH', 'NVTOOLSEXT_PATH', 'NVCC', 'HIPCC',
'ROCM_HOME']:
summary += [' {:<16}: {}'.format(key, os.environ.get(key, '(none)'))]
summary += [
'',
'Modules:',
]
ret = []
for module in MODULES:
installed = False
status = 'No'
errmsg = []
if module['name'] == 'cutensor':
cutensor_path = os.environ.get('CUTENSOR_PATH', '')
inc_path = os.path.join(cutensor_path, 'include')
if os.path.exists(inc_path):
settings['include_dirs'].append(inc_path)
cuda_version = build.get_cuda_version()
cuda_major = str(cuda_version // 1000)
cuda_major_minor = cuda_major + '.' + \
str((cuda_version // 10) % 100)
for cuda_ver in (cuda_major_minor, cuda_major):
lib_path = os.path.join(cutensor_path, 'lib', cuda_ver)
if os.path.exists(lib_path):
settings['library_dirs'].append(lib_path)
break
if module['name'] == 'cugraph':
cusparselt_path = os.environ.get('CUGRAPH_PATH', '')
for i in 'include', 'include/cugraph':
inc_path = os.path.join(cusparselt_path, i)
if os.path.exists(inc_path):
settings['include_dirs'].append(inc_path)
lib_path = os.path.join(cusparselt_path, 'lib')
if os.path.exists(lib_path):
settings['library_dirs'].append(lib_path)
print('')
print('-------- Configuring Module: {} --------'.format(
module['name']))
sys.stdout.flush()
if not check_library(
compiler,
includes=module['include'],
include_dirs=settings['include_dirs'],
define_macros=settings['define_macros'],
extra_compile_args=settings['extra_compile_args']):
errmsg = ['Include files not found: %s' % module['include'],
'Check your CFLAGS environment variable.']
elif not check_library(
compiler,
libraries=module['libraries'],
library_dirs=settings['library_dirs'],
define_macros=settings['define_macros'],
extra_compile_args=settings['extra_compile_args']):
errmsg = ['Cannot link libraries: %s' % module['libraries'],
'Check your LDFLAGS environment variable.']
elif ('check_method' in module and
not module['check_method'](compiler, settings)):
# Fail on per-library condition check (version requirements etc.)
installed = True
errmsg = ['The library is installed but not supported.']
elif (module['name'] in ('thrust', 'cub', 'random')
and (nvcc_path is None and hipcc_path is None)):
installed = True
cmd = 'nvcc' if not use_hip else 'hipcc'
errmsg = ['{} command could not be found in PATH.'.format(cmd),
'Check your PATH environment variable.']
else:
installed = True
status = 'Yes'
ret.append(module['name'])
if installed and 'version_method' in module:
status += ' (version {})'.format(module['version_method'](True))
summary += [
' {:<10}: {}'.format(module['name'], status)
]
# If error message exists...
if len(errmsg) != 0:
summary += [' -> {}'.format(m) for m in errmsg]
# Skip checking other modules when CUDA is unavailable.
if module['name'] == 'cuda':
break
# Get a list of the CC of the devices connected to this node
if not use_hip:
build.check_compute_capabilities(compiler, settings)
if len(ret) != len(MODULES):
if 'cuda' in ret:
lines = [
'WARNING: Some modules could not be configured.',
'CuPy will be installed without these modules.',
]
else:
lines = [
'ERROR: CUDA could not be found on your system.',
]
summary += [
'',
] + lines + [
'Please refer to the Installation Guide for details:',
'https://docs.cupy.dev/en/stable/install.html',
'',
]
summary += [
'************************************************************',
'',
]
print('\n'.join(summary))
return ret, settings
| def preconfigure_modules(compiler, settings):
"""Returns a list of modules buildable in given environment and settings.
For each module in MODULES list, this function checks if the module
can be built in the current environment and reports it.
Returns a list of module names available.
"""
nvcc_path = build.get_nvcc_path()
hipcc_path = build.get_hipcc_path()
summary = [
'',
'************************************************************',
'* CuPy Configuration Summary *',
'************************************************************',
'',
'Build Environment:',
' Include directories: {}'.format(str(settings['include_dirs'])),
' Library directories: {}'.format(str(settings['library_dirs'])),
' nvcc command : {}'.format(
nvcc_path if nvcc_path else '(not found)'),
' hipcc command : {}'.format(
hipcc_path if hipcc_path else '(not found)'),
'',
'Environment Variables:',
]
for key in ['CFLAGS', 'LDFLAGS', 'LIBRARY_PATH',
'CUDA_PATH', 'NVTOOLSEXT_PATH', 'NVCC', 'HIPCC',
'ROCM_HOME']:
summary += [' {:<16}: {}'.format(key, os.environ.get(key, '(none)'))]
summary += [
'',
'Modules:',
]
ret = []
for module in MODULES:
installed = False
status = 'No'
errmsg = []
if module['name'] == 'cutensor':
cutensor_path = os.environ.get('CUTENSOR_PATH', '')
inc_path = os.path.join(cutensor_path, 'include')
if os.path.exists(inc_path):
settings['include_dirs'].append(inc_path)
cuda_version = build.get_cuda_version()
cuda_major = str(cuda_version // 1000)
cuda_major_minor = cuda_major + '.' + \
str((cuda_version // 10) % 100)
for cuda_ver in (cuda_major_minor, cuda_major):
lib_path = os.path.join(cutensor_path, 'lib', cuda_ver)
if os.path.exists(lib_path):
settings['library_dirs'].append(lib_path)
break
if module['name'] == 'cugraph':
cugraph_path = os.environ.get('CUGRAPH_PATH', '')
for i in 'include', 'include/cugraph':
inc_path = os.path.join(cugraph_path, i)
if os.path.exists(inc_path):
settings['include_dirs'].append(inc_path)
lib_path = os.path.join(cugraph_path, 'lib')
if os.path.exists(lib_path):
settings['library_dirs'].append(lib_path)
print('')
print('-------- Configuring Module: {} --------'.format(
module['name']))
sys.stdout.flush()
if not check_library(
compiler,
includes=module['include'],
include_dirs=settings['include_dirs'],
define_macros=settings['define_macros'],
extra_compile_args=settings['extra_compile_args']):
errmsg = ['Include files not found: %s' % module['include'],
'Check your CFLAGS environment variable.']
elif not check_library(
compiler,
libraries=module['libraries'],
library_dirs=settings['library_dirs'],
define_macros=settings['define_macros'],
extra_compile_args=settings['extra_compile_args']):
errmsg = ['Cannot link libraries: %s' % module['libraries'],
'Check your LDFLAGS environment variable.']
elif ('check_method' in module and
not module['check_method'](compiler, settings)):
# Fail on per-library condition check (version requirements etc.)
installed = True
errmsg = ['The library is installed but not supported.']
elif (module['name'] in ('thrust', 'cub', 'random')
and (nvcc_path is None and hipcc_path is None)):
installed = True
cmd = 'nvcc' if not use_hip else 'hipcc'
errmsg = ['{} command could not be found in PATH.'.format(cmd),
'Check your PATH environment variable.']
else:
installed = True
status = 'Yes'
ret.append(module['name'])
if installed and 'version_method' in module:
status += ' (version {})'.format(module['version_method'](True))
summary += [
' {:<10}: {}'.format(module['name'], status)
]
# If error message exists...
if len(errmsg) != 0:
summary += [' -> {}'.format(m) for m in errmsg]
# Skip checking other modules when CUDA is unavailable.
if module['name'] == 'cuda':
break
# Get a list of the CC of the devices connected to this node
if not use_hip:
build.check_compute_capabilities(compiler, settings)
if len(ret) != len(MODULES):
if 'cuda' in ret:
lines = [
'WARNING: Some modules could not be configured.',
'CuPy will be installed without these modules.',
]
else:
lines = [
'ERROR: CUDA could not be found on your system.',
]
summary += [
'',
] + lines + [
'Please refer to the Installation Guide for details:',
'https://docs.cupy.dev/en/stable/install.html',
'',
]
summary += [
'************************************************************',
'',
]
print('\n'.join(summary))
return ret, settings
|
38,518 | def test_half_space_interior_point_convex_3d():
# Find interior point in convex domain made of half spaces in 3d.
n = np.array([[1, -1, 0, 0, 0, 0], [0, 0, 1, -1, 0, 0], [0, 0, 0, 0, 1, -1]])
x0 = np.array([[0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1]])
pts = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
pt = half_space.half_space_interior_point(n, x0, pts).reshape((-1, 1))
# Verify that the computed point is on the same side of the all normal
# vectors as a point known to be in the interior.
known_pt = np.array([1.0 / 2.0, 1.0 / 2.0, 1.0 / 2.0]).reshape((-1, 1))
assert np.all(
np.sign(np.sum(n * (pt - x0), axis=0))
== np.sign(np.sum(n * (known_pt - x0), axis=0))
)
| def test_half_space_interior_point_convex_3d():
# Find interior point in convex domain made of half spaces in 3d.
n = np.array([[1, -1, 0, 0, 0, 0], [0, 0, 1, -1, 0, 0], [0, 0, 0, 0, 1, -1]])
x0 = np.array([[0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1]])
pts = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
pt = half_space.half_space_interior_point(n, x0, pts).reshape((-1, 1))
# Verify that the computed point is on the same side of all the normal
# vectors as a point known to be in the interior.
known_pt = np.array([1.0 / 2.0, 1.0 / 2.0, 1.0 / 2.0]).reshape((-1, 1))
assert np.all(
np.sign(np.sum(n * (pt - x0), axis=0))
== np.sign(np.sum(n * (known_pt - x0), axis=0))
)
|
31,432 | def xsoar_store_list_command(args: Dict[str, Any]) -> CommandResults:
namespace = args.get('namespace', 'default')
data = demisto.getIntegrationContext().get(namespace)
if not data:
if namespace == 'default':
return_error(f"Namespace: <default> empty!")
else:
return_error(f"Namespace: <{namespace}> not found!")
data = [key for key in demisto.getIntegrationContext().get(namespace)]
number_of_keys = len(data)
r_data = "\n".join(data)
return CommandResults(
readable_output=f"{number_of_keys} key(s) found: \n {r_data}",
outputs_prefix=f"XSOAR.Store.{namespace}",
outputs={"keys": data},
raw_response=data
)
| def xsoar_store_list_command(args: Dict[str, Any]) -> CommandResults:
namespace = args.get('namespace', 'default')
data = demisto.getIntegrationContext().get(namespace)
if not data:
if namespace == 'default':
return_error(f"Namespace: <default> empty!")
else:
return_error(f"Namespace: <{namespace}> not found!")
data = [key for key in demisto.getIntegrationContext().get(namespace, [])]
number_of_keys = len(data)
r_data = "\n".join(data)
return CommandResults(
readable_output=f"{number_of_keys} key(s) found: \n {r_data}",
outputs_prefix=f"XSOAR.Store.{namespace}",
outputs={"keys": data},
raw_response=data
)
|
38,677 | def git_clone(url, targetdir=None):
'''Clone a git repository from a URL.
:arg url: The URL to clone from.
:arg targetdir: The directory where the repository will cloned to. If
:class:`None`, a new directory will be created with the repository
name as if ``git clone {url}`` was issued.
'''
if not git_repo_exists(url):
raise ReframeError('git repository does not exist')
targetdir = targetdir or ''
run_command('git clone %s %s' % (url, targetdir), check=True)
| def git_clone(url, targetdir=None):
'''Clone a git repository from a URL.
:arg url: The URL to clone from.
:arg targetdir: The directory where the repository will be cloned to. If
:class:`None`, a new directory will be created with the repository
name as if ``git clone {url}`` was issued.
'''
if not git_repo_exists(url):
raise ReframeError('git repository does not exist')
targetdir = targetdir or ''
run_command('git clone %s %s' % (url, targetdir), check=True)
|
6,987 | def make_notification_logs(doc, users):
from frappe.social.doctype.energy_point_settings.energy_point_settings import is_energy_point_enabled
for user in users:
username = frappe.db.exists("User", {"email": user, "enabled": 1})
if username:
if is_notifications_enabled(user):
if doc.type == 'Energy Point' and not is_energy_point_enabled():
return
_doc = frappe.new_doc('Notification Log')
_doc.update(doc)
_doc.for_user = username
if _doc.for_user != _doc.from_user or doc.type == "Energy Point" or doc.type == "Alert":
_doc.insert(ignore_permissions=True)
| def make_notification_logs(doc, users):
from frappe.social.doctype.energy_point_settings.energy_point_settings import is_energy_point_enabled
for user in users:
username = frappe.db.get_value("User", {"email": user, "enabled": 1})
if username:
if is_notifications_enabled(user):
if doc.type == 'Energy Point' and not is_energy_point_enabled():
return
_doc = frappe.new_doc('Notification Log')
_doc.update(doc)
_doc.for_user = username
if _doc.for_user != _doc.from_user or doc.type == "Energy Point" or doc.type == "Alert":
_doc.insert(ignore_permissions=True)
|
30,152 | def fetch_production(zone_key='NL', session=None, target_datetime=None,
logger=logging.getLogger(__name__), energieopwek_nl=True):
if target_datetime is None:
target_datetime = arrow.utcnow()
r = session or requests.session()
consumptions = ENTSOE.fetch_consumption(zone_key=zone_key,
session=r,
target_datetime=target_datetime,
logger=logger)
if not consumptions:
return
for c in consumptions:
del c['source']
df_consumptions = pd.DataFrame.from_dict(consumptions).set_index(
'datetime')
# NL has exchanges with BE, DE, NO, GB, DK-DK1
exchanges = []
for exchange_key in ['BE', 'DE', 'GB']:
zone_1, zone_2 = sorted([exchange_key, zone_key])
exchange = ENTSOE.fetch_exchange(zone_key1=zone_1,
zone_key2=zone_2,
session=r,
target_datetime=target_datetime,
logger=logger)
if not exchange:
return
exchanges.extend(exchange or [])
# add NO data, fetch once for every hour
# This introduces an error, because it doesn't use the average power flow
# during the hour, but rather only the value during the first minute of the
# hour!
zone_1, zone_2 = sorted(['NO', zone_key])
exchange_NO = [statnett.fetch_exchange(zone_key1=zone_1, zone_key2=zone_2,
session=r, target_datetime=dt.datetime,
logger=logger)
for dt in arrow.Arrow.range(
'hour',
arrow.get(min([e['datetime']
for e in exchanges])).replace(minute=0),
arrow.get(max([e['datetime']
for e in exchanges])).replace(minute=0))]
exchanges.extend(exchange_NO)
# add DK1 data
zone_1, zone_2 = sorted(['DK-DK1', zone_key])
df_dk = pd.DataFrame(DK.fetch_exchange(zone_key1=zone_1, zone_key2=zone_2,
session=r, target_datetime=target_datetime,
logger=logger))
# Because other exchanges and consumption data is only available per hour
# we floor the timpstamp to hour and group by hour with averaging of netFlow
df_dk['datetime'] = df_dk['datetime'].dt.floor('H')
exchange_DK = df_dk.groupby(['datetime']).aggregate({'netFlow' : 'mean',
'sortedZoneKeys': 'max', 'source' : 'max'}).reset_index()
# because averaging with high precision numbers leads to rounding errors
exchange_DK = exchange_DK.round({'netFlow': 3})
exchanges.extend(exchange_DK.to_dict(orient='records'))
# We want to know the net-imports into NL, so if NL is in zone_1 we need
# to flip the direction of the flow. E.g. 100MW for NL->DE means 100MW
# export to DE and needs to become -100MW for import to NL.
for e in exchanges:
if(e['sortedZoneKeys'].startswith('NL->')):
e['NL_import'] = -1 * e['netFlow']
else:
e['NL_import'] = e['netFlow']
del e['source']
del e['netFlow']
df_exchanges = pd.DataFrame.from_dict(exchanges).set_index('datetime')
# Sum all exchanges to NL imports
df_exchanges = df_exchanges.groupby('datetime').sum()
# Fill missing values by propagating the value forward
df_consumptions_with_exchanges = df_consumptions.join(df_exchanges).fillna(
method='ffill', limit=3) # Limit to 3 x 15min
# Load = Generation + netImports
# => Generation = Load - netImports
df_total_generations = (df_consumptions_with_exchanges['consumption']
- df_consumptions_with_exchanges['NL_import'])
# Fetch all production
# The energieopwek_nl parser is backwards compatible with ENTSOE parser.
# Because of data quality issues we switch to using energieopwek, but if
# data quality of ENTSOE improves we can switch back to using a single
# source.
productions_ENTSOE = ENTSOE.fetch_production(zone_key=zone_key, session=r,
target_datetime=target_datetime, logger=logger)
if energieopwek_nl:
productions_eopwek = fetch_production_energieopwek_nl(session=r,
target_datetime=target_datetime, logger=logger)
# For every production value we look up the corresponding ENTSOE
# values and copy the nuclear, gas, coal, biomass and unknown production.
productions = []
for p in productions_eopwek:
entsoe_value = next((pe for pe in productions_ENTSOE
if pe["datetime"] == p["datetime"]), None)
if entsoe_value:
p["production"]["nuclear"] = entsoe_value["production"]["nuclear"]
p["production"]["gas"] = entsoe_value["production"]["gas"]
p["production"]["coal"] = entsoe_value["production"]["coal"]
p["production"]["biomass"] = entsoe_value["production"]["biomass"]
p["production"]["unknown"] = entsoe_value["production"]["unknown"]
productions.append(p)
else:
productions = productions_ENTSOE
if not productions:
return
# Flatten production dictionaries (we ignore storage)
for p in productions:
# if for some reason theré's no unknown value
if not 'unknown' in p['production']:
p['production']['unknown'] = 0
Z = sum([x or 0 for x in p['production'].values()])
# Only calculate the difference if the datetime exists
# If total ENTSOE reported production (Z) is less than total generation
# (calculated from consumption and imports), then there must be some
# unknown production missing, so we add the difference.
# The difference can actually be negative, because consumption is based
# on TSO network load, but locally generated electricity may never leave
# the DSO network and be substantial (e.g. Solar).
if p['datetime'] in df_total_generations and Z < df_total_generations[p['datetime']]:
p['production']['unknown'] = round((
df_total_generations[p['datetime']] - Z + p['production']['unknown']), 3)
# Filter invalid
# We should probably add logging to this
return [p for p in productions if p['production']['unknown'] > 0]
| def fetch_production(zone_key='NL', session=None, target_datetime=None,
logger=logging.getLogger(__name__), energieopwek_nl=True):
if target_datetime is None:
target_datetime = arrow.utcnow()
r = session or requests.session()
consumptions = ENTSOE.fetch_consumption(zone_key=zone_key,
session=r,
target_datetime=target_datetime,
logger=logger)
if not consumptions:
return
for c in consumptions:
del c['source']
df_consumptions = pd.DataFrame.from_dict(consumptions).set_index(
'datetime')
# NL has exchanges with BE, DE, NO, GB, DK-DK1
exchanges = []
for exchange_key in ['BE', 'DE', 'GB']:
zone_1, zone_2 = sorted([exchange_key, zone_key])
exchange = ENTSOE.fetch_exchange(zone_key1=zone_1,
zone_key2=zone_2,
session=r,
target_datetime=target_datetime,
logger=logger)
if not exchange:
return
exchanges.extend(exchange or [])
# add NO data, fetch once for every hour
# This introduces an error, because it doesn't use the average power flow
# during the hour, but rather only the value during the first minute of the
# hour!
zone_1, zone_2 = sorted(['NO', zone_key])
exchange_NO = [statnett.fetch_exchange(zone_key1=zone_1, zone_key2=zone_2,
session=r, target_datetime=dt.datetime,
logger=logger)
for dt in arrow.Arrow.range(
'hour',
arrow.get(min([e['datetime']
for e in exchanges])).replace(minute=0),
arrow.get(max([e['datetime']
for e in exchanges])).replace(minute=0))]
exchanges.extend(exchange_NO)
# add DK1 data
zone_1, zone_2 = sorted(['DK-DK1', zone_key])
df_dk = pd.DataFrame(DK.fetch_exchange(zone_key1=zone_1, zone_key2=zone_2,
session=r, target_datetime=target_datetime,
logger=logger))
# Because other exchanges and consumption data is only available per hour
# we floor the timpstamp to hour and group by hour with averaging of netFlow
df_dk['datetime'] = df_dk['datetime'].dt.floor('H')
exchange_DK = df_dk.groupby(['datetime']).aggregate({'netFlow' : 'mean',
'sortedZoneKeys': 'max', 'source' : 'max'}).reset_index()
# because averaging with high precision numbers leads to rounding errors
exchange_DK = exchange_DK.round({'netFlow': 3})
exchanges.extend(exchange_DK.to_dict(orient='records'))
# We want to know the net-imports into NL, so if NL is in zone_1 we need
# to flip the direction of the flow. E.g. 100MW for NL->DE means 100MW
# export to DE and needs to become -100MW for import to NL.
for e in exchanges:
if(e['sortedZoneKeys'].startswith('NL->')):
e['NL_import'] = -1 * e['netFlow']
else:
e['NL_import'] = e['netFlow']
del e['source']
del e['netFlow']
df_exchanges = pd.DataFrame.from_dict(exchanges).set_index('datetime')
# Sum all exchanges to NL imports
df_exchanges = df_exchanges.groupby('datetime').sum()
# Fill missing values by propagating the value forward
df_consumptions_with_exchanges = df_consumptions.join(df_exchanges).fillna(
method='ffill', limit=3) # Limit to 3 x 15min
# Load = Generation + netImports
# => Generation = Load - netImports
df_total_generations = (df_consumptions_with_exchanges['consumption']
- df_consumptions_with_exchanges['NL_import'])
# Fetch all production
# The energieopwek_nl parser is backwards compatible with ENTSOE parser.
# Because of data quality issues we switch to using energieopwek, but if
# data quality of ENTSOE improves we can switch back to using a single
# source.
productions_ENTSOE = ENTSOE.fetch_production(zone_key=zone_key, session=r,
target_datetime=target_datetime, logger=logger)
if energieopwek_nl:
productions_eopwek = fetch_production_energieopwek_nl(session=r,
target_datetime=target_datetime, logger=logger)
# For every production value we look up the corresponding ENTSOE
# values and copy the nuclear, gas, coal, biomass and unknown production.
productions = []
for p in productions_eopwek:
entsoe_value = next((pe for pe in productions_ENTSOE
if pe["datetime"] == p["datetime"]), None)
if entsoe_value:
p["production"]["nuclear"] = entsoe_value["production"]["nuclear"]
p["production"]["gas"] = entsoe_value["production"]["gas"]
p["production"]["coal"] = entsoe_value["production"]["coal"]
p["production"]["biomass"] = entsoe_value["production"]["biomass"]
p["production"]["unknown"] = entsoe_value["production"]["unknown"]
productions.append(p)
else:
productions = productions_ENTSOE
if not productions:
return
# Flatten production dictionaries (we ignore storage)
for p in productions:
# if for some reason theré's no unknown value
if not 'unknown' in p['production'] or p['production']['unknown'] == None:
p['production']['unknown'] = 0
Z = sum([x or 0 for x in p['production'].values()])
# Only calculate the difference if the datetime exists
# If total ENTSOE reported production (Z) is less than total generation
# (calculated from consumption and imports), then there must be some
# unknown production missing, so we add the difference.
# The difference can actually be negative, because consumption is based
# on TSO network load, but locally generated electricity may never leave
# the DSO network and be substantial (e.g. Solar).
if p['datetime'] in df_total_generations and Z < df_total_generations[p['datetime']]:
p['production']['unknown'] = round((
df_total_generations[p['datetime']] - Z + p['production']['unknown']), 3)
# Filter invalid
# We should probably add logging to this
return [p for p in productions if p['production']['unknown'] > 0]
|
49,758 | def get_options(argv=None):
"""
Convert options into commands.
Return commands, message
"""
parser = argparse.ArgumentParser(usage="spyder [options] files")
parser.add_argument(
'--new-instance',
action='store_true',
default=False,
help="Run a new instance of Spyder, even if the single "
"instance mode has been turned on (default)"
)
parser.add_argument(
'--defaults',
dest="reset_to_defaults",
action='store_true',
default=False,
help="Reset configuration settings to defaults"
)
parser.add_argument(
'--reset',
dest="reset_config_files",
action='store_true',
default=False,
help="Remove all configuration files!"
)
parser.add_argument(
'--optimize',
action='store_true',
default=False,
help="Optimize Spyder bytecode (this may require "
"administrative privileges)"
)
parser.add_argument(
'-w', '--workdir',
dest="working_directory",
default=None,
help="Default working directory"
)
parser.add_argument(
'--hide-console',
action='store_true',
default=False,
help="Hide parent console window (Windows)"
)
parser.add_argument(
'--show-console',
action='store_true',
default=False,
help="(Deprecated) Does nothing, now the default behavior "
"is to show the console"
)
parser.add_argument(
'--multithread',
dest="multithreaded",
action='store_true',
default=False,
help="Internal console is executed in another thread "
"(separate from main application thread)"
)
parser.add_argument(
'--profile',
action='store_true',
default=False,
help="Profile mode (internal test, not related "
"with Python profiling)"
)
parser.add_argument(
'--window-title',
type=str,
default=None,
help="String to show in the main window title"
)
parser.add_argument(
'-p', '--project',
default=None,
type=str,
dest="project",
help="Path that contains an Spyder project"
)
parser.add_argument(
'--opengl',
default=None,
dest="opengl_implementation",
choices=['software', 'desktop', 'gles'],
help="OpenGL implementation to pass to Qt"
)
parser.add_argument(
'--paths',
action='store_true',
default=False,
help="Show all Spyder configuration paths"
)
parser.add_argument(
'--debug-info',
default=None,
dest="debug_info",
choices=['minimal', 'verbose'],
help=("Level of internal debugging info to give. "
"'minimal' only logs a small amount of "
"confirmation messages and 'verbose' logs a "
"lot of detailed information.")
)
parser.add_argument(
'--debug-output',
default='terminal',
dest="debug_output",
choices=['terminal', 'file'],
help=("Print internal debugging info to the terminal and a file in "
"the configuration directory or to the terminal and a file "
"called spyder-debug.log in the current working directory. "
"Default is 'terminal'.")
)
parser.add_argument(
'--filter-log',
default='',
help="Comma-separated module name hierarchies whose log "
"messages should be shown. e.g., "
"spyder.plugins.completion,spyder.plugins.editor"
)
parser.add_argument(
'--safe-mode',
dest="safe_mode",
action='store_true',
default=False,
help="Start Spyder with a clean configuration directory"
)
parser.add_argument(
'--offline',
dest="offline",
action='store_true',
default=False,
help="Disable all web engine and download functionality."
)
parser.add_argument(
'--report-segfault',
dest="report_segfault",
action='store_true',
default=False,
help="Report segmentation fault to Github."
)
parser.add_argument(
'--conf-dir',
type=str,
dest="conf_dir",
default=None,
help="Choose a configuration directory to use for Spyder."
)
parser.add_argument('files', nargs='*')
options = parser.parse_args(argv)
args = options.files
return options, args
| def get_options(argv=None):
"""
Convert options into commands.
Return commands, message
"""
parser = argparse.ArgumentParser(usage="spyder [options] files")
parser.add_argument(
'--new-instance',
action='store_true',
default=False,
help="Run a new instance of Spyder, even if the single "
"instance mode has been turned on (default)"
)
parser.add_argument(
'--defaults',
dest="reset_to_defaults",
action='store_true',
default=False,
help="Reset configuration settings to defaults"
)
parser.add_argument(
'--reset',
dest="reset_config_files",
action='store_true',
default=False,
help="Remove all configuration files!"
)
parser.add_argument(
'--optimize',
action='store_true',
default=False,
help="Optimize Spyder bytecode (this may require "
"administrative privileges)"
)
parser.add_argument(
'-w', '--workdir',
dest="working_directory",
default=None,
help="Default working directory"
)
parser.add_argument(
'--hide-console',
action='store_true',
default=False,
help="Hide parent console window (Windows)"
)
parser.add_argument(
'--show-console',
action='store_true',
default=False,
help="(Deprecated) Does nothing, now the default behavior "
"is to show the console"
)
parser.add_argument(
'--multithread',
dest="multithreaded",
action='store_true',
default=False,
help="Internal console is executed in another thread "
"(separate from main application thread)"
)
parser.add_argument(
'--profile',
action='store_true',
default=False,
help="Profile mode (internal test, not related "
"with Python profiling)"
)
parser.add_argument(
'--window-title',
type=str,
default=None,
help="String to show in the main window title"
)
parser.add_argument(
'-p', '--project',
default=None,
type=str,
dest="project",
help="Path that contains an Spyder project"
)
parser.add_argument(
'--opengl',
default=None,
dest="opengl_implementation",
choices=['software', 'desktop', 'gles'],
help="OpenGL implementation to pass to Qt"
)
parser.add_argument(
'--paths',
action='store_true',
default=False,
help="Show all Spyder configuration paths"
)
parser.add_argument(
'--debug-info',
default=None,
dest="debug_info",
choices=['minimal', 'verbose'],
help=("Level of internal debugging info to give. "
"'minimal' only logs a small amount of "
"confirmation messages and 'verbose' logs a "
"lot of detailed information.")
)
parser.add_argument(
'--debug-output',
default='terminal',
dest="debug_output",
choices=['terminal', 'file'],
help=("Print internal debugging info to the terminal and a file in "
"the configuration directory or to the terminal and a file "
"called spyder-debug.log in the current working directory. "
"Default is 'terminal'.")
)
parser.add_argument(
'--filter-log',
default='',
help="Comma-separated module name hierarchies whose log "
"messages should be shown. e.g., "
"spyder.plugins.completion,spyder.plugins.editor"
)
parser.add_argument(
'--safe-mode',
dest="safe_mode",
action='store_true',
default=False,
help="Start Spyder with a clean configuration directory"
)
parser.add_argument(
'--offline',
dest="offline",
action='store_true',
default=False,
help="Disable the usage of web widgets in Spyder (e.g. the Help and "
"Online help panes)."
)
parser.add_argument(
'--report-segfault',
dest="report_segfault",
action='store_true',
default=False,
help="Report segmentation fault to Github."
)
parser.add_argument(
'--conf-dir',
type=str,
dest="conf_dir",
default=None,
help="Choose a configuration directory to use for Spyder."
)
parser.add_argument('files', nargs='*')
options = parser.parse_args(argv)
args = options.files
return options, args
|
41,907 | def _get_contour_plot(study: Study, params: Optional[List[str]] = None) -> "go.Figure":
layout = go.Layout(title="Contour Plot")
trials = [trial for trial in study.trials if trial.state == TrialState.COMPLETE]
if len(trials) == 0:
_logger.warning("Your study does not have any completed trials.")
return go.Figure(data=[], layout=layout)
all_params = {p_name for t in trials for p_name in t.params.keys()}
if params is None:
sorted_params = sorted(list(all_params))
elif len(params) <= 1:
_logger.warning("The length of params must be greater than 1.")
return go.Figure(data=[], layout=layout)
else:
for input_p_name in params:
if input_p_name not in all_params:
raise ValueError("Parameter {} does not exist in your study.".format(input_p_name))
sorted_params = sorted(list(set(params)))
padding_ratio = 0.05
param_values_range = {}
update_category_axes = {}
for p_name in sorted_params:
values = [t.params[p_name] for t in trials if p_name in t.params]
min_value = min(values)
max_value = max(values)
if _is_log_scale(trials, p_name):
padding = (math.log10(max_value) - math.log10(min_value)) * padding_ratio
min_value = math.pow(10, math.log10(min_value) - padding)
max_value = math.pow(10, math.log10(max_value) + padding)
elif _is_categorical(trials, p_name):
update_category_axes[p_name] = any([str(v).isnumeric() for v in set(values)])
else:
padding = (max_value - min_value) * padding_ratio
min_value = min_value - padding
max_value = max_value + padding
param_values_range[p_name] = (min_value, max_value)
if len(sorted_params) == 2:
x_param = sorted_params[0]
y_param = sorted_params[1]
sub_plots = _generate_contour_subplot(
trials, x_param, y_param, study.direction, param_values_range
)
figure = go.Figure(data=sub_plots, layout=layout)
figure.update_xaxes(title_text=x_param, range=param_values_range[x_param])
figure.update_yaxes(title_text=y_param, range=param_values_range[y_param])
if _is_categorical(trials, x_param) and update_category_axes[x_param]:
figure.update_xaxes(type="category")
if _is_categorical(trials, y_param) and update_category_axes[y_param]:
figure.update_yaxes(type="category")
if _is_log_scale(trials, x_param):
log_range = [math.log10(p) for p in param_values_range[x_param]]
figure.update_xaxes(range=log_range, type="log")
if _is_log_scale(trials, y_param):
log_range = [math.log10(p) for p in param_values_range[y_param]]
figure.update_yaxes(range=log_range, type="log")
else:
figure = make_subplots(
rows=len(sorted_params), cols=len(sorted_params), shared_xaxes=True, shared_yaxes=True
)
figure.update_layout(layout)
showscale = True # showscale option only needs to be specified once
for x_i, x_param in enumerate(sorted_params):
for y_i, y_param in enumerate(sorted_params):
if x_param == y_param:
figure.add_trace(go.Scatter(), row=y_i + 1, col=x_i + 1)
else:
sub_plots = _generate_contour_subplot(
trials, x_param, y_param, study.direction, param_values_range
)
contour = sub_plots[0]
scatter = sub_plots[1]
contour.update(showscale=showscale) # showscale's default is True
if showscale:
showscale = False
figure.add_trace(contour, row=y_i + 1, col=x_i + 1)
figure.add_trace(scatter, row=y_i + 1, col=x_i + 1)
figure.update_xaxes(range=param_values_range[x_param], row=y_i + 1, col=x_i + 1)
figure.update_yaxes(range=param_values_range[y_param], row=y_i + 1, col=x_i + 1)
if _is_categorical(trials, x_param) and update_category_axes[x_param]:
figure.update_xaxes(type="category", row=y_i + 1, col=x_i + 1)
if _is_categorical(trials, y_param) and update_category_axes[y_param]:
figure.update_yaxes(type="category", row=y_i + 1, col=x_i + 1)
if _is_log_scale(trials, x_param):
log_range = [math.log10(p) for p in param_values_range[x_param]]
figure.update_xaxes(range=log_range, type="log", row=y_i + 1, col=x_i + 1)
if _is_log_scale(trials, y_param):
log_range = [math.log10(p) for p in param_values_range[y_param]]
figure.update_yaxes(range=log_range, type="log", row=y_i + 1, col=x_i + 1)
if x_i == 0:
figure.update_yaxes(title_text=y_param, row=y_i + 1, col=x_i + 1)
if y_i == len(sorted_params) - 1:
figure.update_xaxes(title_text=x_param, row=y_i + 1, col=x_i + 1)
return figure
| def _get_contour_plot(study: Study, params: Optional[List[str]] = None) -> "go.Figure":
layout = go.Layout(title="Contour Plot")
trials = [trial for trial in study.trials if trial.state == TrialState.COMPLETE]
if len(trials) == 0:
_logger.warning("Your study does not have any completed trials.")
return go.Figure(data=[], layout=layout)
all_params = {p_name for t in trials for p_name in t.params.keys()}
if params is None:
sorted_params = sorted(list(all_params))
elif len(params) <= 1:
_logger.warning("The length of params must be greater than 1.")
return go.Figure(data=[], layout=layout)
else:
for input_p_name in params:
if input_p_name not in all_params:
raise ValueError("Parameter {} does not exist in your study.".format(input_p_name))
sorted_params = sorted(list(set(params)))
padding_ratio = 0.05
param_values_range = {}
update_category_axes = {}
for p_name in sorted_params:
values = [t.params[p_name] for t in trials if p_name in t.params]
min_value = min(values)
max_value = max(values)
if _is_log_scale(trials, p_name):
padding = (math.log10(max_value) - math.log10(min_value)) * padding_ratio
min_value = math.pow(10, math.log10(min_value) - padding)
max_value = math.pow(10, math.log10(max_value) + padding)
elif _is_categorical(trials, p_name):
update_category_axes[p_name] = any([str(v).isnumeric() for v in set(values)])
else:
padding = (max_value - min_value) * padding_ratio
min_value = min_value - padding
max_value = max_value + padding
param_values_range[p_name] = (min_value, max_value)
if len(sorted_params) == 2:
x_param = sorted_params[0]
y_param = sorted_params[1]
sub_plots = _generate_contour_subplot(
trials, x_param, y_param, study.direction, param_values_range
)
figure = go.Figure(data=sub_plots, layout=layout)
figure.update_xaxes(title_text=x_param, range=param_values_range[x_param])
figure.update_yaxes(title_text=y_param, range=param_values_range[y_param])
if _is_categorical(trials, x_param) and update_category_axes[x_param]:
figure.update_xaxes(type="category")
if _is_categorical(trials, y_param) and update_category_axes[y_param]:
figure.update_yaxes(type="category")
if _is_log_scale(trials, x_param):
log_range = [math.log10(p) for p in param_values_range[x_param]]
figure.update_xaxes(range=log_range, type="log")
if _is_log_scale(trials, y_param):
log_range = [math.log10(p) for p in param_values_range[y_param]]
figure.update_yaxes(range=log_range, type="log")
else:
figure = make_subplots(
rows=len(sorted_params), cols=len(sorted_params), shared_xaxes=True, shared_yaxes=True
)
figure.update_layout(layout)
showscale = True # showscale option only needs to be specified once
for x_i, x_param in enumerate(sorted_params):
for y_i, y_param in enumerate(sorted_params):
if x_param == y_param:
figure.add_trace(go.Scatter(), row=y_i + 1, col=x_i + 1)
else:
sub_plots = _generate_contour_subplot(
trials, x_param, y_param, study.direction, param_values_range
)
contour = sub_plots[0]
scatter = sub_plots[1]
contour.update(showscale=showscale) # showscale's default is True
if showscale:
showscale = False
figure.add_trace(contour, row=y_i + 1, col=x_i + 1)
figure.add_trace(scatter, row=y_i + 1, col=x_i + 1)
figure.update_xaxes(range=param_values_range[x_param], row=y_i + 1, col=x_i + 1)
figure.update_yaxes(range=param_values_range[y_param], row=y_i + 1, col=x_i + 1)
if update_category_axes.get(x_param, False):
figure.update_xaxes(type="category", row=y_i + 1, col=x_i + 1)
if _is_categorical(trials, y_param) and update_category_axes[y_param]:
figure.update_yaxes(type="category", row=y_i + 1, col=x_i + 1)
if _is_log_scale(trials, x_param):
log_range = [math.log10(p) for p in param_values_range[x_param]]
figure.update_xaxes(range=log_range, type="log", row=y_i + 1, col=x_i + 1)
if _is_log_scale(trials, y_param):
log_range = [math.log10(p) for p in param_values_range[y_param]]
figure.update_yaxes(range=log_range, type="log", row=y_i + 1, col=x_i + 1)
if x_i == 0:
figure.update_yaxes(title_text=y_param, row=y_i + 1, col=x_i + 1)
if y_i == len(sorted_params) - 1:
figure.update_xaxes(title_text=x_param, row=y_i + 1, col=x_i + 1)
return figure
|
58,023 | def get_jobs(topology: Topology, device_filter_string: str = None, status: str = None, job_type: str = None,
id: int = None) -> List[ShowJobsAllResultData]:
"""
Get all the jobs from the devices in the environment, or a single job when ID is specified.
Jobs are sorted by the most recent queued and are returned in a way that's consumable by Generic Polling.
:param topology: `Topology` instance !no-auto-argument
:param device_filter_string: String to filter to only show specific hostnames or serial numbers.
:param status: Filter returned jobs by status
:param job_type: Filter returned jobs by type
:param id: Filter by ID
"""
if id:
id = int(id)
result: List[ShowJobsAllResultData] = UniversalCommand.show_jobs(
topology,
device_filter_string,
job_type=job_type,
status=status,
id=id
)
return result
| def get_jobs(topology: Topology, device_filter_string: str = None, status: str = None, job_type: str = None,
id: int = None) -> List[ShowJobsAllResultData]:
"""
Get all the jobs from the devices in the environment, or a single job when ID is specified.
Jobs are sorted by the most recent queued and are returned in a way that's consumable by Generic Polling.
:param topology: `Topology` instance !no-auto-argument
:param device_filter_string: String to filter to only show specific hostnames or serial numbers.
:param status: Filter returned jobs by status
:param job_type: Filter returned jobs by type
:param id: Filter by ID
"""
_id = arg_to_number(id)
return UniversalCommand.show_jobs(
topology,
device_filter_string,
job_type=job_type,
status=status,
id=_id
)
|
43,174 | def double_radius_node_labeling(g, src, dst):
r"""Double Radius Node Labeling, as introduced in `Link Prediction
Based on Graph Neural Networks <https://arxiv.org/abs/1802.09691>`__.
This function computes the double radius node labeling for each node to mark
nodes' different roles in an enclosing subgraph, given a target link.
The node labels of source :math:`s` and destination :math:`t` are set to 1 and
those of unreachable nodes from source or destination are set to 0. The labels
of other nodes :math:`l` are defined according to the following hash function:
:math:`l = 1 + min(d_s, d_t) + (d//2)[(d//2) + (d%2) - 1]`
where :math:`d_s` and :math:`d_t` denote the shortest distance to the source and
the target, respectively. :math:`d = d_s + d_t`.
Parameters
----------
g : DGLGraph
The input graph.
src : int
The source node id of the target link.
dst : int
The destination node id of the target link.
Returns
-------
Tensor
The double radius node labeling Tensor of shape :math:`(N,)`, where
:math:`N` is the number of nodes in the input graph.
Example
-------
>>> import dgl
>>> g = dgl.graph(([0,0,0,0,1,1,2,4], [1,2,3,6,3,4,4,5]))
>>> dgl.double_radius_node_labeling(g, 0, 1)
tensor([1, 1, 3, 2, 3, 7, 0])
"""
adj = g.adj(scipy_fmt='csr')
src, dst = (dst, src) if src > dst else (src, dst)
idx = list(range(src)) + list(range(src + 1, adj.shape[0]))
adj_wo_src = adj[idx, :][:, idx]
idx = list(range(dst)) + list(range(dst + 1, adj.shape[0]))
adj_wo_dst = adj[idx, :][:, idx]
# distance to the source node
ds = sparse.csgraph.shortest_path(adj_wo_dst, directed=False, unweighted=True, indices=src)
ds = np.insert(ds, dst, 0, axis=0)
# distance to the destination node
dt = sparse.csgraph.shortest_path(adj_wo_src, directed=False, unweighted=True, indices=dst-1)
dt = np.insert(dt, src, 0, axis=0)
d = ds + dt
# suppress invalid value (nan) warnings
with np.errstate(invalid='ignore'):
z = 1 + np.stack([ds, dt]).min(axis=0) + d//2 * (d//2 + d%2 -1)
z[src] = 1
z[dst] = 1
z[np.isnan(z)] = 0 # unreachable nodes
return F.tensor(z, F.int64)
| def double_radius_node_labeling(g, src, dst):
r"""Double Radius Node Labeling, as introduced in `Link Prediction
Based on Graph Neural Networks <https://arxiv.org/abs/1802.09691>`__.
This function computes the double radius node labeling for each node to mark
nodes' different roles in an enclosing subgraph, given a target link.
The node labels of source :math:`s` and destination :math:`t` are set to 1 and
those of unreachable nodes from source or destination are set to 0. The labels
of other nodes :math:`l` are defined according to the following hash function:
:math:`l = 1 + min(d_s, d_t) + (d//2)[(d//2) + (d%2) - 1]`
where :math:`d_s` and :math:`d_t` denote the shortest distance to the source and
the target, respectively. :math:`d = d_s + d_t`.
Parameters
----------
g : DGLGraph
The input graph.
src : int
The source node ID of the target link.
dst : int
The destination node id of the target link.
Returns
-------
Tensor
The double radius node labeling Tensor of shape :math:`(N,)`, where
:math:`N` is the number of nodes in the input graph.
Example
-------
>>> import dgl
>>> g = dgl.graph(([0,0,0,0,1,1,2,4], [1,2,3,6,3,4,4,5]))
>>> dgl.double_radius_node_labeling(g, 0, 1)
tensor([1, 1, 3, 2, 3, 7, 0])
"""
adj = g.adj(scipy_fmt='csr')
src, dst = (dst, src) if src > dst else (src, dst)
idx = list(range(src)) + list(range(src + 1, adj.shape[0]))
adj_wo_src = adj[idx, :][:, idx]
idx = list(range(dst)) + list(range(dst + 1, adj.shape[0]))
adj_wo_dst = adj[idx, :][:, idx]
# distance to the source node
ds = sparse.csgraph.shortest_path(adj_wo_dst, directed=False, unweighted=True, indices=src)
ds = np.insert(ds, dst, 0, axis=0)
# distance to the destination node
dt = sparse.csgraph.shortest_path(adj_wo_src, directed=False, unweighted=True, indices=dst-1)
dt = np.insert(dt, src, 0, axis=0)
d = ds + dt
# suppress invalid value (nan) warnings
with np.errstate(invalid='ignore'):
z = 1 + np.stack([ds, dt]).min(axis=0) + d//2 * (d//2 + d%2 -1)
z[src] = 1
z[dst] = 1
z[np.isnan(z)] = 0 # unreachable nodes
return F.tensor(z, F.int64)
|
49,116 | def is_convex(f, *syms, domain=S.Reals):
r"""Determines the convexity of the function passed in the argument.
Parameters
==========
f : :py:class:`~.Expr`
The concerned function.
syms : Tuple of :py:class:`~.Symbol`
The variables with respect to which the convexity is to be determined.
domain : :py:class:`~.Interval`, optional
The domain over which the convexity of the function has to be checked.
If unspecified, S.Reals will be the default domain.
Returns
=======
bool
The method returns ``True`` if the function is convex otherwise it
returns ``False``.
Raises
======
NotImplementedError
The check for the convexity of multivariate functions is not implemented yet.
Notes
=====
To determine concavity of a function pass `-f` as the concerned function.
To determine logarithmic convexity of a function pass `\log(f)` as
concerned function.
To determine logartihmic concavity of a function pass `-\log(f)` as
concerned function.
Currently, convexity check of multivariate functions is not handled.
Examples
========
>>> from sympy import is_convex, symbols, exp, oo, Interval
>>> x = symbols('x')
>>> is_convex(exp(x), x)
True
>>> is_convex(x**3, x, domain = Interval(-1, oo))
False
>>> is_convex(1/x**2, x, domain=Interval.open(0, oo))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Convex_function
.. [2] http://www.ifp.illinois.edu/~angelia/L3_convfunc.pdf
.. [3] https://en.wikipedia.org/wiki/Logarithmically_convex_function
.. [4] https://en.wikipedia.org/wiki/Logarithmically_concave_function
.. [5] https://en.wikipedia.org/wiki/Concave_function
"""
if len(syms) > 1:
raise NotImplementedError(
"The check for the convexity of multivariate functions is not implemented yet.")
from sympy.solvers.inequalities import solve_univariate_inequality
f = _sympify(f)
var = syms[0]
s = singularities(f, var)
for ele in s:
if ele in domain:
return False
condition = f.diff(var, 2) < 0
if solve_univariate_inequality(condition, var, False, domain):
return False
return True
| def is_convex(f, *syms, domain=S.Reals):
r"""Determines the convexity of the function passed in the argument.
Parameters
==========
f : :py:class:`~.Expr`
The concerned function.
syms : Tuple of :py:class:`~.Symbol`
The variables with respect to which the convexity is to be determined.
domain : :py:class:`~.Interval`, optional
The domain over which the convexity of the function has to be checked.
If unspecified, S.Reals will be the default domain.
Returns
=======
bool
The method returns ``True`` if the function is convex otherwise it
returns ``False``.
Raises
======
NotImplementedError
The check for the convexity of multivariate functions is not implemented yet.
Notes
=====
To determine concavity of a function pass `-f` as the concerned function.
To determine logarithmic convexity of a function pass `\log(f)` as
concerned function.
To determine logartihmic concavity of a function pass `-\log(f)` as
concerned function.
Currently, convexity check of multivariate functions is not handled.
Examples
========
>>> from sympy import is_convex, symbols, exp, oo, Interval
>>> x = symbols('x')
>>> is_convex(exp(x), x)
True
>>> is_convex(x**3, x, domain = Interval(-1, oo))
False
>>> is_convex(1/x**2, x, domain=Interval.open(0, oo))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Convex_function
.. [2] http://www.ifp.illinois.edu/~angelia/L3_convfunc.pdf
.. [3] https://en.wikipedia.org/wiki/Logarithmically_convex_function
.. [4] https://en.wikipedia.org/wiki/Logarithmically_concave_function
.. [5] https://en.wikipedia.org/wiki/Concave_function
"""
if len(syms) > 1:
raise NotImplementedError(
"The check for the convexity of multivariate functions is not implemented yet.")
from sympy.solvers.inequalities import solve_univariate_inequality
f = _sympify(f)
var = syms[0]
if any(s in domain for s in singularities(f, var)):
return False
condition = f.diff(var, 2) < 0
if solve_univariate_inequality(condition, var, False, domain):
return False
return True
|
4,283 | def write_meas_info(fid, info, data_type=None, reset_range=True):
"""Write measurement info into a file id (from a fif file).
Parameters
----------
fid : file
Open file descriptor.
info : instance of Info
The measurement info structure.
data_type : int
The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),
5 (FIFFT_DOUBLE), or 16 (FIFFT_DAU_PACK16) for
raw data.
reset_range : bool
If True, info['chs'][k]['range'] will be set to unity.
Notes
-----
Tags are written in a particular order for compatibility with maxfilter.
"""
info._check_consistency()
_check_dates(info)
# Measurement info
start_block(fid, FIFF.FIFFB_MEAS_INFO)
# Add measurement id
if info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id'])
for event in info['events']:
start_block(fid, FIFF.FIFFB_EVENTS)
if event.get('channels') is not None:
write_int(fid, FIFF.FIFF_EVENT_CHANNELS, event['channels'])
if event.get('list') is not None:
write_int(fid, FIFF.FIFF_EVENT_LIST, event['list'])
end_block(fid, FIFF.FIFFB_EVENTS)
# HPI Result
for hpi_result in info['hpi_results']:
start_block(fid, FIFF.FIFFB_HPI_RESULT)
write_dig_points(fid, hpi_result['dig_points'])
if 'order' in hpi_result:
write_int(fid, FIFF.FIFF_HPI_DIGITIZATION_ORDER,
hpi_result['order'])
if 'used' in hpi_result:
write_int(fid, FIFF.FIFF_HPI_COILS_USED, hpi_result['used'])
if 'moments' in hpi_result:
write_float_matrix(fid, FIFF.FIFF_HPI_COIL_MOMENTS,
hpi_result['moments'])
if 'goodness' in hpi_result:
write_float(fid, FIFF.FIFF_HPI_FIT_GOODNESS,
hpi_result['goodness'])
if 'good_limit' in hpi_result:
write_float(fid, FIFF.FIFF_HPI_FIT_GOOD_LIMIT,
hpi_result['good_limit'])
if 'dist_limit' in hpi_result:
write_float(fid, FIFF.FIFF_HPI_FIT_DIST_LIMIT,
hpi_result['dist_limit'])
if 'accept' in hpi_result:
write_int(fid, FIFF.FIFF_HPI_FIT_ACCEPT, hpi_result['accept'])
if 'coord_trans' in hpi_result:
write_coord_trans(fid, hpi_result['coord_trans'])
end_block(fid, FIFF.FIFFB_HPI_RESULT)
# HPI Measurement
for hpi_meas in info['hpi_meas']:
start_block(fid, FIFF.FIFFB_HPI_MEAS)
if hpi_meas.get('creator') is not None:
write_string(fid, FIFF.FIFF_CREATOR, hpi_meas['creator'])
if hpi_meas.get('sfreq') is not None:
write_float(fid, FIFF.FIFF_SFREQ, hpi_meas['sfreq'])
if hpi_meas.get('nchan') is not None:
write_int(fid, FIFF.FIFF_NCHAN, hpi_meas['nchan'])
if hpi_meas.get('nave') is not None:
write_int(fid, FIFF.FIFF_NAVE, hpi_meas['nave'])
if hpi_meas.get('ncoil') is not None:
write_int(fid, FIFF.FIFF_HPI_NCOIL, hpi_meas['ncoil'])
if hpi_meas.get('first_samp') is not None:
write_int(fid, FIFF.FIFF_FIRST_SAMPLE, hpi_meas['first_samp'])
if hpi_meas.get('last_samp') is not None:
write_int(fid, FIFF.FIFF_LAST_SAMPLE, hpi_meas['last_samp'])
for hpi_coil in hpi_meas['hpi_coils']:
start_block(fid, FIFF.FIFFB_HPI_COIL)
if hpi_coil.get('number') is not None:
write_int(fid, FIFF.FIFF_HPI_COIL_NO, hpi_coil['number'])
if hpi_coil.get('epoch') is not None:
write_float_matrix(fid, FIFF.FIFF_EPOCH, hpi_coil['epoch'])
if hpi_coil.get('slopes') is not None:
write_float(fid, FIFF.FIFF_HPI_SLOPES, hpi_coil['slopes'])
if hpi_coil.get('corr_coeff') is not None:
write_float(fid, FIFF.FIFF_HPI_CORR_COEFF,
hpi_coil['corr_coeff'])
if hpi_coil.get('coil_freq') is not None:
write_float(fid, FIFF.FIFF_HPI_COIL_FREQ,
hpi_coil['coil_freq'])
end_block(fid, FIFF.FIFFB_HPI_COIL)
end_block(fid, FIFF.FIFFB_HPI_MEAS)
# Polhemus data
write_dig_points(fid, info['dig'], block=True)
# megacq parameters
if info['acq_pars'] is not None or info['acq_stim'] is not None:
start_block(fid, FIFF.FIFFB_DACQ_PARS)
if info['acq_pars'] is not None:
write_string(fid, FIFF.FIFF_DACQ_PARS, info['acq_pars'])
if info['acq_stim'] is not None:
write_string(fid, FIFF.FIFF_DACQ_STIM, info['acq_stim'])
end_block(fid, FIFF.FIFFB_DACQ_PARS)
# Coordinate transformations if the HPI result block was not there
if info['dev_head_t'] is not None:
write_coord_trans(fid, info['dev_head_t'])
if info['ctf_head_t'] is not None:
write_coord_trans(fid, info['ctf_head_t'])
if info['dev_ctf_t'] is not None:
write_coord_trans(fid, info['dev_ctf_t'])
# Projectors
rename_map = _make_rename_map(info['chs'])
_write_proj(fid, info['projs'], rename=rename_map)
# Bad channels
if len(info['bads']) > 0:
bads = _rename_list(info['bads'].copy(), rename_map)
start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, bads)
end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
# General
if info.get('experimenter') is not None:
write_string(fid, FIFF.FIFF_EXPERIMENTER, info['experimenter'])
if info.get('description') is not None:
write_string(fid, FIFF.FIFF_DESCRIPTION, info['description'])
if info.get('proj_id') is not None:
write_int(fid, FIFF.FIFF_PROJ_ID, info['proj_id'])
if info.get('proj_name') is not None:
write_string(fid, FIFF.FIFF_PROJ_NAME, info['proj_name'])
if info.get('meas_date') is not None:
write_int(fid, FIFF.FIFF_MEAS_DATE, _dt_to_stamp(info['meas_date']))
if info.get('utc_offset') is not None:
write_string(fid, FIFF.FIFF_UTC_OFFSET, info['utc_offset'])
write_int(fid, FIFF.FIFF_NCHAN, info['nchan'])
write_float(fid, FIFF.FIFF_SFREQ, info['sfreq'])
if info['lowpass'] is not None:
write_float(fid, FIFF.FIFF_LOWPASS, info['lowpass'])
if info['highpass'] is not None:
write_float(fid, FIFF.FIFF_HIGHPASS, info['highpass'])
if info.get('line_freq') is not None:
write_float(fid, FIFF.FIFF_LINE_FREQ, info['line_freq'])
if info.get('gantry_angle') is not None:
write_float(fid, FIFF.FIFF_GANTRY_ANGLE, info['gantry_angle'])
if data_type is not None:
write_int(fid, FIFF.FIFF_DATA_PACK, data_type)
if info.get('custom_ref_applied'):
write_int(fid, FIFF.FIFF_MNE_CUSTOM_REF, info['custom_ref_applied'])
if info.get('xplotter_layout'):
write_string(fid, FIFF.FIFF_XPLOTTER_LAYOUT, info['xplotter_layout'])
# Channel information
_write_ch_infos(fid, info['chs'], reset_range, rename_map)
# Subject information
if info.get('subject_info') is not None:
start_block(fid, FIFF.FIFFB_SUBJECT)
si = info['subject_info']
if si.get('id') is not None:
write_int(fid, FIFF.FIFF_SUBJ_ID, si['id'])
if si.get('his_id') is not None:
write_string(fid, FIFF.FIFF_SUBJ_HIS_ID, si['his_id'])
if si.get('last_name') is not None:
write_string(fid, FIFF.FIFF_SUBJ_LAST_NAME, si['last_name'])
if si.get('first_name') is not None:
write_string(fid, FIFF.FIFF_SUBJ_FIRST_NAME, si['first_name'])
if si.get('middle_name') is not None:
write_string(fid, FIFF.FIFF_SUBJ_MIDDLE_NAME, si['middle_name'])
if si.get('birthday') is not None:
write_julian(fid, FIFF.FIFF_SUBJ_BIRTH_DAY, si['birthday'])
if si.get('sex') is not None:
write_int(fid, FIFF.FIFF_SUBJ_SEX, si['sex'])
if si.get('hand') is not None:
write_int(fid, FIFF.FIFF_SUBJ_HAND, si['hand'])
if si.get('weight') is not None:
write_float(fid, FIFF.FIFF_SUBJ_WEIGHT, si['weight'])
if si.get('height') is not None:
write_float(fid, FIFF.FIFF_SUBJ_HEIGHT, si['height'])
end_block(fid, FIFF.FIFFB_SUBJECT)
del si
if info.get('device_info') is not None:
start_block(fid, FIFF.FIFFB_DEVICE)
di = info['device_info']
write_string(fid, FIFF.FIFF_DEVICE_TYPE, di['type'])
for key in ('model', 'serial', 'site'):
if di.get(key) is not None:
write_string(fid, getattr(FIFF, 'FIFF_DEVICE_' + key.upper()),
di[key])
end_block(fid, FIFF.FIFFB_DEVICE)
del di
if info.get('helium_info') is not None:
start_block(fid, FIFF.FIFFB_HELIUM)
hi = info['helium_info']
if hi.get('he_level_raw') is not None:
write_float(fid, FIFF.FIFF_HE_LEVEL_RAW, hi['he_level_raw'])
if hi.get('helium_level') is not None:
write_float(fid, FIFF.FIFF_HELIUM_LEVEL, hi['helium_level'])
if hi.get('orig_file_guid') is not None:
write_string(fid, FIFF.FIFF_ORIG_FILE_GUID, hi['orig_file_guid'])
write_int(fid, FIFF.FIFF_MEAS_DATE, hi['meas_date'])
end_block(fid, FIFF.FIFFB_HELIUM)
del hi
if info.get('hpi_subsystem') is not None:
hs = info['hpi_subsystem']
start_block(fid, FIFF.FIFFB_HPI_SUBSYSTEM)
if hs.get('ncoil') is not None:
write_int(fid, FIFF.FIFF_HPI_NCOIL, hs['ncoil'])
if hs.get('event_channel') is not None:
write_string(fid, FIFF.FIFF_EVENT_CHANNEL, hs['event_channel'])
if hs.get('hpi_coils') is not None:
for coil in hs['hpi_coils']:
start_block(fid, FIFF.FIFFB_HPI_COIL)
if coil.get('event_bits') is not None:
write_int(fid, FIFF.FIFF_EVENT_BITS,
coil['event_bits'])
end_block(fid, FIFF.FIFFB_HPI_COIL)
end_block(fid, FIFF.FIFFB_HPI_SUBSYSTEM)
del hs
# CTF compensation info
comps = info['comps']
if rename_map:
comps = deepcopy(comps)
_rename_comps(comps, rename_map)
write_ctf_comp(fid, comps)
# KIT system ID
if info.get('kit_system_id') is not None:
write_int(fid, FIFF.FIFF_MNE_KIT_SYSTEM_ID, info['kit_system_id'])
end_block(fid, FIFF.FIFFB_MEAS_INFO)
# Processing history
_write_proc_history(fid, info)
| def write_meas_info(fid, info, data_type=None, reset_range=True):
"""Write measurement info into a file id (from a fif file).
Parameters
----------
fid : file
Open file descriptor.
info : instance of Info
The measurement info structure.
data_type : int
The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),
5 (FIFFT_DOUBLE), or 16 (FIFFT_DAU_PACK16) for
raw data.
reset_range : bool
If True, info['chs'][k]['range'] will be set to unity.
Notes
-----
Tags are written in a particular order for compatibility with maxfilter.
"""
info._check_consistency()
_check_dates(info)
# Measurement info
start_block(fid, FIFF.FIFFB_MEAS_INFO)
# Add measurement id
if info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id'])
for event in info['events']:
start_block(fid, FIFF.FIFFB_EVENTS)
if event.get('channels') is not None:
write_int(fid, FIFF.FIFF_EVENT_CHANNELS, event['channels'])
if event.get('list') is not None:
write_int(fid, FIFF.FIFF_EVENT_LIST, event['list'])
end_block(fid, FIFF.FIFFB_EVENTS)
# HPI Result
for hpi_result in info['hpi_results']:
start_block(fid, FIFF.FIFFB_HPI_RESULT)
write_dig_points(fid, hpi_result['dig_points'])
if 'order' in hpi_result:
write_int(fid, FIFF.FIFF_HPI_DIGITIZATION_ORDER,
hpi_result['order'])
if 'used' in hpi_result:
write_int(fid, FIFF.FIFF_HPI_COILS_USED, hpi_result['used'])
if 'moments' in hpi_result:
write_float_matrix(fid, FIFF.FIFF_HPI_COIL_MOMENTS,
hpi_result['moments'])
if 'goodness' in hpi_result:
write_float(fid, FIFF.FIFF_HPI_FIT_GOODNESS,
hpi_result['goodness'])
if 'good_limit' in hpi_result:
write_float(fid, FIFF.FIFF_HPI_FIT_GOOD_LIMIT,
hpi_result['good_limit'])
if 'dist_limit' in hpi_result:
write_float(fid, FIFF.FIFF_HPI_FIT_DIST_LIMIT,
hpi_result['dist_limit'])
if 'accept' in hpi_result:
write_int(fid, FIFF.FIFF_HPI_FIT_ACCEPT, hpi_result['accept'])
if 'coord_trans' in hpi_result:
write_coord_trans(fid, hpi_result['coord_trans'])
end_block(fid, FIFF.FIFFB_HPI_RESULT)
# HPI Measurement
for hpi_meas in info['hpi_meas']:
start_block(fid, FIFF.FIFFB_HPI_MEAS)
if hpi_meas.get('creator') is not None:
write_string(fid, FIFF.FIFF_CREATOR, hpi_meas['creator'])
if hpi_meas.get('sfreq') is not None:
write_float(fid, FIFF.FIFF_SFREQ, hpi_meas['sfreq'])
if hpi_meas.get('nchan') is not None:
write_int(fid, FIFF.FIFF_NCHAN, hpi_meas['nchan'])
if hpi_meas.get('nave') is not None:
write_int(fid, FIFF.FIFF_NAVE, hpi_meas['nave'])
if hpi_meas.get('ncoil') is not None:
write_int(fid, FIFF.FIFF_HPI_NCOIL, hpi_meas['ncoil'])
if hpi_meas.get('first_samp') is not None:
write_int(fid, FIFF.FIFF_FIRST_SAMPLE, hpi_meas['first_samp'])
if hpi_meas.get('last_samp') is not None:
write_int(fid, FIFF.FIFF_LAST_SAMPLE, hpi_meas['last_samp'])
for hpi_coil in hpi_meas['hpi_coils']:
start_block(fid, FIFF.FIFFB_HPI_COIL)
if hpi_coil.get('number') is not None:
write_int(fid, FIFF.FIFF_HPI_COIL_NO, hpi_coil['number'])
if hpi_coil.get('epoch') is not None:
write_float_matrix(fid, FIFF.FIFF_EPOCH, hpi_coil['epoch'])
if hpi_coil.get('slopes') is not None:
write_float(fid, FIFF.FIFF_HPI_SLOPES, hpi_coil['slopes'])
if hpi_coil.get('corr_coeff') is not None:
write_float(fid, FIFF.FIFF_HPI_CORR_COEFF,
hpi_coil['corr_coeff'])
if hpi_coil.get('coil_freq') is not None:
write_float(fid, FIFF.FIFF_HPI_COIL_FREQ,
hpi_coil['coil_freq'])
end_block(fid, FIFF.FIFFB_HPI_COIL)
end_block(fid, FIFF.FIFFB_HPI_MEAS)
# Polhemus data
write_dig_points(fid, info['dig'], block=True)
# megacq parameters
if info['acq_pars'] is not None or info['acq_stim'] is not None:
start_block(fid, FIFF.FIFFB_DACQ_PARS)
if info['acq_pars'] is not None:
write_string(fid, FIFF.FIFF_DACQ_PARS, info['acq_pars'])
if info['acq_stim'] is not None:
write_string(fid, FIFF.FIFF_DACQ_STIM, info['acq_stim'])
end_block(fid, FIFF.FIFFB_DACQ_PARS)
# Coordinate transformations if the HPI result block was not there
if info['dev_head_t'] is not None:
write_coord_trans(fid, info['dev_head_t'])
if info['ctf_head_t'] is not None:
write_coord_trans(fid, info['ctf_head_t'])
if info['dev_ctf_t'] is not None:
write_coord_trans(fid, info['dev_ctf_t'])
# Projectors
ch_names_mapping = _make_ch_names_mapping(info['chs'])
_write_proj(fid, info['projs'], rename=rename_map)
# Bad channels
if len(info['bads']) > 0:
bads = _rename_list(info['bads'].copy(), rename_map)
start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, bads)
end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
# General
if info.get('experimenter') is not None:
write_string(fid, FIFF.FIFF_EXPERIMENTER, info['experimenter'])
if info.get('description') is not None:
write_string(fid, FIFF.FIFF_DESCRIPTION, info['description'])
if info.get('proj_id') is not None:
write_int(fid, FIFF.FIFF_PROJ_ID, info['proj_id'])
if info.get('proj_name') is not None:
write_string(fid, FIFF.FIFF_PROJ_NAME, info['proj_name'])
if info.get('meas_date') is not None:
write_int(fid, FIFF.FIFF_MEAS_DATE, _dt_to_stamp(info['meas_date']))
if info.get('utc_offset') is not None:
write_string(fid, FIFF.FIFF_UTC_OFFSET, info['utc_offset'])
write_int(fid, FIFF.FIFF_NCHAN, info['nchan'])
write_float(fid, FIFF.FIFF_SFREQ, info['sfreq'])
if info['lowpass'] is not None:
write_float(fid, FIFF.FIFF_LOWPASS, info['lowpass'])
if info['highpass'] is not None:
write_float(fid, FIFF.FIFF_HIGHPASS, info['highpass'])
if info.get('line_freq') is not None:
write_float(fid, FIFF.FIFF_LINE_FREQ, info['line_freq'])
if info.get('gantry_angle') is not None:
write_float(fid, FIFF.FIFF_GANTRY_ANGLE, info['gantry_angle'])
if data_type is not None:
write_int(fid, FIFF.FIFF_DATA_PACK, data_type)
if info.get('custom_ref_applied'):
write_int(fid, FIFF.FIFF_MNE_CUSTOM_REF, info['custom_ref_applied'])
if info.get('xplotter_layout'):
write_string(fid, FIFF.FIFF_XPLOTTER_LAYOUT, info['xplotter_layout'])
# Channel information
_write_ch_infos(fid, info['chs'], reset_range, rename_map)
# Subject information
if info.get('subject_info') is not None:
start_block(fid, FIFF.FIFFB_SUBJECT)
si = info['subject_info']
if si.get('id') is not None:
write_int(fid, FIFF.FIFF_SUBJ_ID, si['id'])
if si.get('his_id') is not None:
write_string(fid, FIFF.FIFF_SUBJ_HIS_ID, si['his_id'])
if si.get('last_name') is not None:
write_string(fid, FIFF.FIFF_SUBJ_LAST_NAME, si['last_name'])
if si.get('first_name') is not None:
write_string(fid, FIFF.FIFF_SUBJ_FIRST_NAME, si['first_name'])
if si.get('middle_name') is not None:
write_string(fid, FIFF.FIFF_SUBJ_MIDDLE_NAME, si['middle_name'])
if si.get('birthday') is not None:
write_julian(fid, FIFF.FIFF_SUBJ_BIRTH_DAY, si['birthday'])
if si.get('sex') is not None:
write_int(fid, FIFF.FIFF_SUBJ_SEX, si['sex'])
if si.get('hand') is not None:
write_int(fid, FIFF.FIFF_SUBJ_HAND, si['hand'])
if si.get('weight') is not None:
write_float(fid, FIFF.FIFF_SUBJ_WEIGHT, si['weight'])
if si.get('height') is not None:
write_float(fid, FIFF.FIFF_SUBJ_HEIGHT, si['height'])
end_block(fid, FIFF.FIFFB_SUBJECT)
del si
if info.get('device_info') is not None:
start_block(fid, FIFF.FIFFB_DEVICE)
di = info['device_info']
write_string(fid, FIFF.FIFF_DEVICE_TYPE, di['type'])
for key in ('model', 'serial', 'site'):
if di.get(key) is not None:
write_string(fid, getattr(FIFF, 'FIFF_DEVICE_' + key.upper()),
di[key])
end_block(fid, FIFF.FIFFB_DEVICE)
del di
if info.get('helium_info') is not None:
start_block(fid, FIFF.FIFFB_HELIUM)
hi = info['helium_info']
if hi.get('he_level_raw') is not None:
write_float(fid, FIFF.FIFF_HE_LEVEL_RAW, hi['he_level_raw'])
if hi.get('helium_level') is not None:
write_float(fid, FIFF.FIFF_HELIUM_LEVEL, hi['helium_level'])
if hi.get('orig_file_guid') is not None:
write_string(fid, FIFF.FIFF_ORIG_FILE_GUID, hi['orig_file_guid'])
write_int(fid, FIFF.FIFF_MEAS_DATE, hi['meas_date'])
end_block(fid, FIFF.FIFFB_HELIUM)
del hi
if info.get('hpi_subsystem') is not None:
hs = info['hpi_subsystem']
start_block(fid, FIFF.FIFFB_HPI_SUBSYSTEM)
if hs.get('ncoil') is not None:
write_int(fid, FIFF.FIFF_HPI_NCOIL, hs['ncoil'])
if hs.get('event_channel') is not None:
write_string(fid, FIFF.FIFF_EVENT_CHANNEL, hs['event_channel'])
if hs.get('hpi_coils') is not None:
for coil in hs['hpi_coils']:
start_block(fid, FIFF.FIFFB_HPI_COIL)
if coil.get('event_bits') is not None:
write_int(fid, FIFF.FIFF_EVENT_BITS,
coil['event_bits'])
end_block(fid, FIFF.FIFFB_HPI_COIL)
end_block(fid, FIFF.FIFFB_HPI_SUBSYSTEM)
del hs
# CTF compensation info
comps = info['comps']
if rename_map:
comps = deepcopy(comps)
_rename_comps(comps, rename_map)
write_ctf_comp(fid, comps)
# KIT system ID
if info.get('kit_system_id') is not None:
write_int(fid, FIFF.FIFF_MNE_KIT_SYSTEM_ID, info['kit_system_id'])
end_block(fid, FIFF.FIFFB_MEAS_INFO)
# Processing history
_write_proc_history(fid, info)
|
35,890 | def drive(cfg, model_path=None, use_joystick=False, model_type=None,
camera_type='single', meta=[]):
"""
Construct a working robotic vehicle from many parts. Each part runs as a
job in the Vehicle loop, calling either it's run or run_threaded method
depending on the constructor flag `threaded`. All parts are updated one
after another at the framerate given in cfg.DRIVE_LOOP_HZ assuming each
part finishes processing in a timely manner. Parts may have named outputs
and inputs. The framework handles passing named outputs to parts
requesting the same named input.
"""
logger.info(f'PID: {os.getpid()}')
if cfg.DONKEY_GYM:
#the simulator will use cuda and then we usually run out of resources
#if we also try to use cuda. so disable for donkey_gym.
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
if model_type is None:
if cfg.TRAIN_LOCALIZER:
model_type = "localizer"
elif cfg.TRAIN_BEHAVIORS:
model_type = "behavior"
else:
model_type = cfg.DEFAULT_MODEL_TYPE
#Initialize car
V = dk.vehicle.Vehicle()
#Initialize logging before anything else to allow console logging
if cfg.HAVE_CONSOLE_LOGGING:
logger.setLevel(logging.getLevelName(cfg.LOGGING_LEVEL))
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter(cfg.LOGGING_FORMAT))
logger.addHandler(ch)
if cfg.HAVE_MQTT_TELEMETRY:
from donkeycar.parts.telemetry import MqttTelemetry
tel = MqttTelemetry(cfg)
if cfg.HAVE_ODOM:
if cfg.ENCODER_TYPE == "GPIO":
from donkeycar.parts.encoder import RotaryEncoder
enc = RotaryEncoder(mm_per_tick=0.306096, pin = cfg.ODOM_PIN, debug = cfg.ODOM_DEBUG)
V.add(enc, inputs=['throttle'], outputs=['enc/speed'], threaded=True)
elif cfg.ENCODER_TYPE == "arduino":
from donkeycar.parts.encoder import ArduinoEncoder
enc = ArduinoEncoder()
V.add(enc, outputs=['enc/speed'], threaded=True)
else:
print("No supported encoder found")
logger.info("cfg.CAMERA_TYPE %s"%cfg.CAMERA_TYPE)
if camera_type == "stereo":
if cfg.CAMERA_TYPE == "WEBCAM":
from donkeycar.parts.camera import Webcam
camA = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 0)
camB = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 1)
elif cfg.CAMERA_TYPE == "CVCAM":
from donkeycar.parts.cv import CvCam
camA = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 0)
camB = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 1)
else:
raise(Exception("Unsupported camera type: %s" % cfg.CAMERA_TYPE))
V.add(camA, outputs=['cam/image_array_a'], threaded=True)
V.add(camB, outputs=['cam/image_array_b'], threaded=True)
from donkeycar.parts.image import StereoPair
V.add(StereoPair(), inputs=['cam/image_array_a', 'cam/image_array_b'],
outputs=['cam/image_array'])
elif cfg.CAMERA_TYPE == "D435":
from donkeycar.parts.realsense435i import RealSense435i
cam = RealSense435i(
enable_rgb=cfg.REALSENSE_D435_RGB,
enable_depth=cfg.REALSENSE_D435_DEPTH,
enable_imu=cfg.REALSENSE_D435_IMU,
device_id=cfg.REALSENSE_D435_ID)
V.add(cam, inputs=[],
outputs=['cam/image_array', 'cam/depth_array',
'imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z'],
threaded=True)
else:
if cfg.DONKEY_GYM:
from donkeycar.parts.dgym import DonkeyGymEnv
inputs = []
outputs = ['cam/image_array']
threaded = True
if cfg.DONKEY_GYM:
from donkeycar.parts.dgym import DonkeyGymEnv
#rbx
cam = DonkeyGymEnv(cfg.DONKEY_SIM_PATH, host=cfg.SIM_HOST, env_name=cfg.DONKEY_GYM_ENV_NAME, conf=cfg.GYM_CONF, record_location=cfg.SIM_RECORD_LOCATION, record_gyroaccel=cfg.SIM_RECORD_GYROACCEL, record_velocity=cfg.SIM_RECORD_VELOCITY, record_lidar=cfg.SIM_RECORD_LIDAR, delay=cfg.SIM_ARTIFICIAL_LATENCY)
threaded = True
inputs = ['angle', 'throttle']
elif cfg.CAMERA_TYPE == "PICAM":
from donkeycar.parts.camera import PiCamera
cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, vflip=cfg.CAMERA_VFLIP, hflip=cfg.CAMERA_HFLIP)
elif cfg.CAMERA_TYPE == "WEBCAM":
from donkeycar.parts.camera import Webcam
cam = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "CVCAM":
from donkeycar.parts.cv import CvCam
cam = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "CSIC":
from donkeycar.parts.camera import CSICamera
cam = CSICamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, gstreamer_flip=cfg.CSIC_CAM_GSTREAMER_FLIP_PARM)
elif cfg.CAMERA_TYPE == "V4L":
from donkeycar.parts.camera import V4LCamera
cam = V4LCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE)
elif cfg.CAMERA_TYPE == "MOCK":
from donkeycar.parts.camera import MockCamera
cam = MockCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "IMAGE_LIST":
from donkeycar.parts.camera import ImageListCamera
cam = ImageListCamera(path_mask=cfg.PATH_MASK)
elif cfg.CAMERA_TYPE == "LEOPARD":
from donkeycar.parts.leopard_imaging import LICamera
cam = LICamera(width=cfg.IMAGE_W, height=cfg.IMAGE_H, fps=cfg.CAMERA_FRAMERATE)
else:
raise(Exception("Unkown camera type: %s" % cfg.CAMERA_TYPE))
# add lidar
if cfg.USE_LIDAR:
from donkeycar.parts.lidar import RPLidar
if cfg.LIDAR_TYPE == 'RP':
print("adding RP lidar part")
lidar = RPLidar(lower_limit = cfg.LIDAR_LOWER_LIMIT, upper_limit = cfg.LIDAR_UPPER_LIMIT)
V.add(lidar, inputs=[],outputs=['lidar/dist_array'], threaded=True)
if cfg.LIDAR_TYPE == 'YD':
print("YD Lidar not yet supported")
# Donkey gym part will output position information if it is configured
if cfg.DONKEY_GYM:
if cfg.SIM_RECORD_LOCATION:
outputs += ['pos/pos_x', 'pos/pos_y', 'pos/pos_z', 'pos/speed', 'pos/cte']
if cfg.SIM_RECORD_GYROACCEL:
outputs += ['gyro/gyro_x', 'gyro/gyro_y', 'gyro/gyro_z', 'accel/accel_x', 'accel/accel_y', 'accel/accel_z']
if cfg.SIM_RECORD_VELOCITY:
outputs += ['vel/vel_x', 'vel/vel_y', 'vel/vel_z']
if cfg.SIM_RECORD_LIDAR:
outputs += ['lidar/dist_array']
V.add(cam, inputs=inputs, outputs=outputs, threaded=threaded)
#This web controller will create a web server that is capable
#of managing steering, throttle, and modes, and more.
ctr = LocalWebController(port=cfg.WEB_CONTROL_PORT, mode=cfg.WEB_INIT_MODE)
V.add(ctr,
inputs=['cam/image_array', 'tub/num_records'],
outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
threaded=True)
if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT:
#modify max_throttle closer to 1.0 to have more power
#modify steering_scale lower than 1.0 to have less responsive steering
if cfg.CONTROLLER_TYPE == "pigpio_rc": # an RC controllers read by GPIO pins. They typically don't have buttons
from donkeycar.parts.controller import RCReceiver
ctr = RCReceiver(cfg)
V.add(ctr, inputs=['cam/image_array'], outputs=['user/angle', 'user/throttle', 'recording'],threaded=False)
else:
if cfg.CONTROLLER_TYPE == "custom": #custom controller created with `donkey createjs` command
from my_joystick import MyJoystickController
ctr = MyJoystickController(
throttle_dir=cfg.JOYSTICK_THROTTLE_DIR,
throttle_scale=cfg.JOYSTICK_MAX_THROTTLE,
steering_scale=cfg.JOYSTICK_STEERING_SCALE,
auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE)
ctr.set_deadzone(cfg.JOYSTICK_DEADZONE)
elif cfg.CONTROLLER_TYPE == "MM1":
from donkeycar.parts.robohat import RoboHATController
ctr = RoboHATController(cfg)
else:
from donkeycar.parts.controller import get_js_controller
ctr = get_js_controller(cfg)
if cfg.USE_NETWORKED_JS:
from donkeycar.parts.controller import JoyStickSub
netwkJs = JoyStickSub(cfg.NETWORK_JS_SERVER_IP)
V.add(netwkJs, threaded=True)
ctr.js = netwkJs
V.add(ctr, inputs=['cam/image_array'], outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],threaded=True)
#this throttle filter will allow one tap back for esc reverse
th_filter = ThrottleFilter()
V.add(th_filter, inputs=['user/throttle'], outputs=['user/throttle'])
#See if we should even run the pilot module.
#This is only needed because the part run_condition only accepts boolean
class PilotCondition:
def run(self, mode):
if mode == 'user':
return False
else:
return True
V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot'])
class LedConditionLogic:
def __init__(self, cfg):
self.cfg = cfg
def run(self, mode, recording, recording_alert, behavior_state, model_file_changed, track_loc):
#returns a blink rate. 0 for off. -1 for on. positive for rate.
if track_loc is not None:
led.set_rgb(*self.cfg.LOC_COLORS[track_loc])
return -1
if model_file_changed:
led.set_rgb(self.cfg.MODEL_RELOADED_LED_R, self.cfg.MODEL_RELOADED_LED_G, self.cfg.MODEL_RELOADED_LED_B)
return 0.1
else:
led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)
if recording_alert:
led.set_rgb(*recording_alert)
return self.cfg.REC_COUNT_ALERT_BLINK_RATE
else:
led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)
if behavior_state is not None and model_type == 'behavior':
r, g, b = self.cfg.BEHAVIOR_LED_COLORS[behavior_state]
led.set_rgb(r, g, b)
return -1 #solid on
if recording:
return -1 #solid on
elif mode == 'user':
return 1
elif mode == 'local_angle':
return 0.5
elif mode == 'local':
return 0.1
return 0
if cfg.HAVE_RGB_LED and not cfg.DONKEY_GYM:
from donkeycar.parts.led_status import RGB_LED
led = RGB_LED(cfg.LED_PIN_R, cfg.LED_PIN_G, cfg.LED_PIN_B, cfg.LED_INVERT)
led.set_rgb(cfg.LED_R, cfg.LED_G, cfg.LED_B)
V.add(LedConditionLogic(cfg), inputs=['user/mode', 'recording', "records/alert", 'behavior/state', 'modelfile/modified', "pilot/loc"],
outputs=['led/blink_rate'])
V.add(led, inputs=['led/blink_rate'])
def get_record_alert_color(num_records):
col = (0, 0, 0)
for count, color in cfg.RECORD_ALERT_COLOR_ARR:
if num_records >= count:
col = color
return col
class RecordTracker:
def __init__(self):
self.last_num_rec_print = 0
self.dur_alert = 0
self.force_alert = 0
def run(self, num_records):
if num_records is None:
return 0
if self.last_num_rec_print != num_records or self.force_alert:
self.last_num_rec_print = num_records
if num_records % 10 == 0:
print("recorded", num_records, "records")
if num_records % cfg.REC_COUNT_ALERT == 0 or self.force_alert:
self.dur_alert = num_records // cfg.REC_COUNT_ALERT * cfg.REC_COUNT_ALERT_CYC
self.force_alert = 0
if self.dur_alert > 0:
self.dur_alert -= 1
if self.dur_alert != 0:
return get_record_alert_color(num_records)
return 0
rec_tracker_part = RecordTracker()
V.add(rec_tracker_part, inputs=["tub/num_records"], outputs=['records/alert'])
if cfg.AUTO_RECORD_ON_THROTTLE:
def show_record_count_status():
rec_tracker_part.last_num_rec_print = 0
rec_tracker_part.force_alert = 1
if (cfg.CONTROLLER_TYPE != "pigpio_rc") and (cfg.CONTROLLER_TYPE != "MM1"): # these controllers don't use the joystick class
if isinstance(ctr, JoystickController):
ctr.set_button_down_trigger('circle', show_record_count_status) #then we are not using the circle button. hijack that to force a record count indication
else:
show_record_count_status()
# ctr.set_button_down_trigger('circle', show_record_count_status)
#Sombrero
if cfg.HAVE_SOMBRERO:
from donkeycar.parts.sombrero import Sombrero
s = Sombrero()
#IMU
if cfg.HAVE_IMU:
from donkeycar.parts.imu import IMU
imu = IMU(sensor=cfg.IMU_SENSOR, dlp_setting=cfg.IMU_DLP_CONFIG)
V.add(imu, outputs=['imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z'], threaded=True)
# Use the FPV preview, which will show the cropped image output, or the full frame.
if cfg.USE_FPV:
V.add(WebFpv(), inputs=['cam/image_array'], threaded=True)
#Behavioral state
if cfg.TRAIN_BEHAVIORS:
bh = BehaviorPart(cfg.BEHAVIOR_LIST)
V.add(bh, outputs=['behavior/state', 'behavior/label', "behavior/one_hot_state_array"])
try:
ctr.set_button_down_trigger('L1', bh.increment_state)
except:
pass
inputs = ['cam/image_array', "behavior/one_hot_state_array"]
#IMU
elif cfg.USE_LIDAR:
inputs = ['cam/image_array', 'lidar/dist_array']
elif cfg.HAVE_ODOM:
inputs = ['cam/image_array', 'enc/speed']
elif model_type == "imu":
assert(cfg.HAVE_IMU)
#Run the pilot if the mode is not user.
inputs=['cam/image_array',
'imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z']
elif cfg.USE_LIDAR:
inputs = ['cam/image_array', 'lidar/dist_array']
else:
inputs=['cam/image_array']
def load_model(kl, model_path):
start = time.time()
print('loading model', model_path)
kl.load(model_path)
print('finished loading in %s sec.' % (str(time.time() - start)) )
def load_weights(kl, weights_path):
start = time.time()
try:
print('loading model weights', weights_path)
kl.model.load_weights(weights_path)
print('finished loading in %s sec.' % (str(time.time() - start)) )
except Exception as e:
print(e)
print('ERR>> problems loading weights', weights_path)
def load_model_json(kl, json_fnm):
start = time.time()
print('loading model json', json_fnm)
from tensorflow.python import keras
try:
with open(json_fnm, 'r') as handle:
contents = handle.read()
kl.model = keras.models.model_from_json(contents)
print('finished loading json in %s sec.' % (str(time.time() - start)) )
except Exception as e:
print(e)
print("ERR>> problems loading model json", json_fnm)
if model_path:
#When we have a model, first create an appropriate Keras part
kl = dk.utils.get_model_by_type(model_type, cfg)
model_reload_cb = None
if '.h5' in model_path or '.uff' in model_path or 'tflite' in model_path or '.pkl' in model_path:
#when we have a .h5 extension
#load everything from the model file
load_model(kl, model_path)
def reload_model(filename):
load_model(kl, filename)
model_reload_cb = reload_model
elif '.json' in model_path:
#when we have a .json extension
#load the model from there and look for a matching
#.wts file with just weights
load_model_json(kl, model_path)
weights_path = model_path.replace('.json', '.weights')
load_weights(kl, weights_path)
def reload_weights(filename):
weights_path = filename.replace('.json', '.weights')
load_weights(kl, weights_path)
model_reload_cb = reload_weights
else:
print("ERR>> Unknown extension type on model file!!")
return
#this part will signal visual LED, if connected
V.add(FileWatcher(model_path, verbose=True), outputs=['modelfile/modified'])
#these parts will reload the model file, but only when ai is running so we don't interrupt user driving
V.add(FileWatcher(model_path), outputs=['modelfile/dirty'], run_condition="ai_running")
V.add(DelayedTrigger(100), inputs=['modelfile/dirty'], outputs=['modelfile/reload'], run_condition="ai_running")
V.add(TriggeredCallback(model_path, model_reload_cb), inputs=["modelfile/reload"], run_condition="ai_running")
outputs=['pilot/angle', 'pilot/throttle']
if cfg.TRAIN_LOCALIZER:
outputs.append("pilot/loc")
V.add(kl, inputs=inputs,
outputs=outputs,
run_condition='run_pilot')
if cfg.STOP_SIGN_DETECTOR:
from donkeycar.parts.object_detector.stop_sign_detector import StopSignDetector
V.add(StopSignDetector(cfg.STOP_SIGN_MIN_SCORE, cfg.STOP_SIGN_SHOW_BOUNDING_BOX), inputs=['cam/image_array', 'pilot/throttle'], outputs=['pilot/throttle', 'cam/image_array'])
#Choose what inputs should change the car.
class DriveMode:
def run(self, mode,
user_angle, user_throttle,
pilot_angle, pilot_throttle):
if mode == 'user':
return user_angle, user_throttle
elif mode == 'local_angle':
return pilot_angle if pilot_angle else 0.0, user_throttle
else:
return pilot_angle if pilot_angle else 0.0, pilot_throttle * cfg.AI_THROTTLE_MULT if pilot_throttle else 0.0
V.add(DriveMode(),
inputs=['user/mode', 'user/angle', 'user/throttle',
'pilot/angle', 'pilot/throttle'],
outputs=['angle', 'throttle'])
#to give the car a boost when starting ai mode in a race.
aiLauncher = AiLaunch(cfg.AI_LAUNCH_DURATION, cfg.AI_LAUNCH_THROTTLE, cfg.AI_LAUNCH_KEEP_ENABLED)
V.add(aiLauncher,
inputs=['user/mode', 'throttle'],
outputs=['throttle'])
if (cfg.CONTROLLER_TYPE != "pigpio_rc") and (cfg.CONTROLLER_TYPE != "MM1"):
if isinstance(ctr, JoystickController):
ctr.set_button_down_trigger(cfg.AI_LAUNCH_ENABLE_BUTTON, aiLauncher.enable_ai_launch)
class AiRunCondition:
'''
A bool part to let us know when ai is running.
'''
def run(self, mode):
if mode == "user":
return False
return True
V.add(AiRunCondition(), inputs=['user/mode'], outputs=['ai_running'])
#Ai Recording
class AiRecordingCondition:
'''
return True when ai mode, otherwize respect user mode recording flag
'''
def run(self, mode, recording):
if mode == 'user':
return recording
return True
if cfg.RECORD_DURING_AI:
V.add(AiRecordingCondition(), inputs=['user/mode', 'recording'], outputs=['recording'])
#Drive train setup
if cfg.DONKEY_GYM or cfg.DRIVE_TRAIN_TYPE == "MOCK":
pass
elif cfg.DRIVE_TRAIN_TYPE == "I2C_SERVO":
from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle
steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
throttle = PWMThrottle(controller=throttle_controller,
max_pulse=cfg.THROTTLE_FORWARD_PWM,
zero_pulse=cfg.THROTTLE_STOPPED_PWM,
min_pulse=cfg.THROTTLE_REVERSE_PWM)
V.add(steering, inputs=['angle'], threaded=True)
V.add(throttle, inputs=['throttle'], threaded=True)
elif cfg.DRIVE_TRAIN_TYPE == "DC_STEER_THROTTLE":
from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM
steering = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT, cfg.HBRIDGE_PIN_RIGHT)
throttle = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD)
V.add(steering, inputs=['angle'])
V.add(throttle, inputs=['throttle'])
elif cfg.DRIVE_TRAIN_TYPE == "DC_TWO_WHEEL":
from donkeycar.parts.actuator import TwoWheelSteeringThrottle, Mini_HBridge_DC_Motor_PWM
left_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT_FWD, cfg.HBRIDGE_PIN_LEFT_BWD)
right_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_RIGHT_FWD, cfg.HBRIDGE_PIN_RIGHT_BWD)
two_wheel_control = TwoWheelSteeringThrottle()
V.add(two_wheel_control,
inputs=['throttle', 'angle'],
outputs=['left_motor_speed', 'right_motor_speed'])
V.add(left_motor, inputs=['left_motor_speed'])
V.add(right_motor, inputs=['right_motor_speed'])
elif cfg.DRIVE_TRAIN_TYPE == "DC_TWO_WHEEL_L298N":
from donkeycar.parts.actuator import TwoWheelSteeringThrottle, L298N_HBridge_DC_Motor
left_motor = L298N_HBridge_DC_Motor(cfg.HBRIDGE_L298N_PIN_LEFT_FWD, cfg.HBRIDGE_L298N_PIN_LEFT_BWD, cfg.HBRIDGE_L298N_PIN_LEFT_EN)
right_motor = L298N_HBridge_DC_Motor(cfg.HBRIDGE_L298N_PIN_RIGHT_FWD, cfg.HBRIDGE_L298N_PIN_RIGHT_BWD, cfg.HBRIDGE_L298N_PIN_RIGHT_EN)
two_wheel_control = TwoWheelSteeringThrottle()
V.add(two_wheel_control,
inputs=['throttle', 'angle'],
outputs=['left_motor_speed', 'right_motor_speed'])
V.add(left_motor, inputs=['left_motor_speed'])
V.add(right_motor, inputs=['right_motor_speed'])
elif cfg.DRIVE_TRAIN_TYPE == "SERVO_HBRIDGE_PWM":
from donkeycar.parts.actuator import ServoBlaster, PWMSteering
steering_controller = ServoBlaster(cfg.STEERING_CHANNEL) #really pin
#PWM pulse values should be in the range of 100 to 200
assert(cfg.STEERING_LEFT_PWM <= 200)
assert(cfg.STEERING_RIGHT_PWM <= 200)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM
motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD)
V.add(steering, inputs=['angle'], threaded=True)
V.add(motor, inputs=["throttle"])
elif cfg.DRIVE_TRAIN_TYPE == "MM1":
from donkeycar.parts.robohat import RoboHATDriver
V.add(RoboHATDriver(cfg), inputs=['angle', 'throttle'])
elif cfg.DRIVE_TRAIN_TYPE == "PIGPIO_PWM":
from donkeycar.parts.actuator import PWMSteering, PWMThrottle, PiGPIO_PWM
steering_controller = PiGPIO_PWM(cfg.STEERING_PWM_PIN, freq=cfg.STEERING_PWM_FREQ, inverted=cfg.STEERING_PWM_INVERTED)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
throttle_controller = PiGPIO_PWM(cfg.THROTTLE_PWM_PIN, freq=cfg.THROTTLE_PWM_FREQ, inverted=cfg.THROTTLE_PWM_INVERTED)
throttle = PWMThrottle(controller=throttle_controller,
max_pulse=cfg.THROTTLE_FORWARD_PWM,
zero_pulse=cfg.THROTTLE_STOPPED_PWM,
min_pulse=cfg.THROTTLE_REVERSE_PWM)
V.add(steering, inputs=['angle'], threaded=True)
V.add(throttle, inputs=['throttle'], threaded=True)
# OLED setup
if cfg.USE_SSD1306_128_32:
from donkeycar.parts.oled import OLEDPart
auto_record_on_throttle = cfg.USE_JOYSTICK_AS_DEFAULT and cfg.AUTO_RECORD_ON_THROTTLE
oled_part = OLEDPart(cfg.SSD1306_128_32_I2C_BUSNUM, auto_record_on_throttle=auto_record_on_throttle)
V.add(oled_part, inputs=['recording', 'tub/num_records', 'user/mode'], outputs=[], threaded=True)
#add tub to save data
if cfg.USE_LIDAR:
inputs = ['cam/image_array', 'lidar/dist_array', 'user/angle', 'user/throttle', 'user/mode']
types = ['image_array', 'nparray','float', 'float', 'str']
else:
inputs=['cam/image_array','user/angle', 'user/throttle', 'user/mode']
types=['image_array','float', 'float','str']
if cfg.HAVE_ODOM:
inputs += ['enc/speed']
types += ['float']
if cfg.TRAIN_BEHAVIORS:
inputs += ['behavior/state', 'behavior/label', "behavior/one_hot_state_array"]
types += ['int', 'str', 'vector']
if cfg.CAMERA_TYPE == "D435" and cfg.REALSENSE_D435_DEPTH:
inputs += ['cam/depth_array']
types += ['gray16_array']
if cfg.HAVE_IMU or (cfg.CAMERA_TYPE == "D435" and cfg.REALSENSE_D435_IMU):
inputs += ['imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z']
types +=['float', 'float', 'float',
'float', 'float', 'float']
# rbx
if cfg.DONKEY_GYM:
if cfg.SIM_RECORD_LOCATION:
inputs += ['pos/pos_x', 'pos/pos_y', 'pos/pos_z', 'pos/speed', 'pos/cte']
types += ['float', 'float', 'float', 'float', 'float']
if cfg.SIM_RECORD_GYROACCEL:
inputs += ['gyro/gyro_x', 'gyro/gyro_y', 'gyro/gyro_z', 'accel/accel_x', 'accel/accel_y', 'accel/accel_z']
types += ['float', 'float', 'float', 'float', 'float', 'float']
if cfg.SIM_RECORD_VELOCITY:
inputs += ['vel/vel_x', 'vel/vel_y', 'vel/vel_z']
types += ['float', 'float', 'float']
if cfg.SIM_RECORD_LIDAR:
inputs += ['lidar/dist_array']
types += ['nparray']
if cfg.RECORD_DURING_AI:
inputs += ['pilot/angle', 'pilot/throttle']
types += ['float', 'float']
if cfg.HAVE_PERFMON:
from donkeycar.parts.perfmon import PerfMonitor
mon = PerfMonitor(cfg)
perfmon_outputs = ['perf/cpu', 'perf/mem', 'perf/freq']
inputs += perfmon_outputs
types += ['float', 'float', 'float']
V.add(mon, inputs=[], outputs=perfmon_outputs, threaded=True)
# do we want to store new records into own dir or append to existing
tub_path = TubHandler(path=cfg.DATA_PATH).create_tub_path() if \
cfg.AUTO_CREATE_NEW_TUB else cfg.DATA_PATH
tub_writer = TubWriter(tub_path, inputs=inputs, types=types, metadata=meta)
V.add(tub_writer, inputs=inputs, outputs=["tub/num_records"], run_condition='recording')
# Telemetry (we add the same metrics added to the TubHandler
if cfg.HAVE_MQTT_TELEMETRY:
telem_inputs, _ = tel.add_step_inputs(inputs, types)
V.add(tel, inputs=telem_inputs, outputs=["tub/queue_size"], threaded=True)
if cfg.PUB_CAMERA_IMAGES:
from donkeycar.parts.network import TCPServeValue
from donkeycar.parts.image import ImgArrToJpg
pub = TCPServeValue("camera")
V.add(ImgArrToJpg(), inputs=['cam/image_array'], outputs=['jpg/bin'])
V.add(pub, inputs=['jpg/bin'])
if type(ctr) is LocalWebController:
if cfg.DONKEY_GYM:
print("You can now go to http://localhost:%d to drive your car." % cfg.WEB_CONTROL_PORT)
else:
print("You can now go to <your hostname.local>:%d to drive your car." % cfg.WEB_CONTROL_PORT)
elif(cfg.CONTROLLER_TYPE != "pigpio_rc") and (cfg.CONTROLLER_TYPE != "MM1"):
if isinstance(ctr, JoystickController):
print("You can now move your joystick to drive your car.")
ctr.set_tub(tub_writer.tub)
ctr.print_controls()
#run the vehicle for 20 seconds
V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
| def drive(cfg, model_path=None, use_joystick=False, model_type=None,
camera_type='single', meta=[]):
"""
Construct a working robotic vehicle from many parts. Each part runs as a
job in the Vehicle loop, calling either it's run or run_threaded method
depending on the constructor flag `threaded`. All parts are updated one
after another at the framerate given in cfg.DRIVE_LOOP_HZ assuming each
part finishes processing in a timely manner. Parts may have named outputs
and inputs. The framework handles passing named outputs to parts
requesting the same named input.
"""
logger.info(f'PID: {os.getpid()}')
if cfg.DONKEY_GYM:
#the simulator will use cuda and then we usually run out of resources
#if we also try to use cuda. so disable for donkey_gym.
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
if model_type is None:
if cfg.TRAIN_LOCALIZER:
model_type = "localizer"
elif cfg.TRAIN_BEHAVIORS:
model_type = "behavior"
else:
model_type = cfg.DEFAULT_MODEL_TYPE
#Initialize car
V = dk.vehicle.Vehicle()
#Initialize logging before anything else to allow console logging
if cfg.HAVE_CONSOLE_LOGGING:
logger.setLevel(logging.getLevelName(cfg.LOGGING_LEVEL))
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter(cfg.LOGGING_FORMAT))
logger.addHandler(ch)
if cfg.HAVE_MQTT_TELEMETRY:
from donkeycar.parts.telemetry import MqttTelemetry
tel = MqttTelemetry(cfg)
if cfg.HAVE_ODOM:
if cfg.ENCODER_TYPE == "GPIO":
from donkeycar.parts.encoder import RotaryEncoder
enc = RotaryEncoder(mm_per_tick=0.306096, pin = cfg.ODOM_PIN, debug = cfg.ODOM_DEBUG)
V.add(enc, inputs=['throttle'], outputs=['enc/speed'], threaded=True)
elif cfg.ENCODER_TYPE == "arduino":
from donkeycar.parts.encoder import ArduinoEncoder
enc = ArduinoEncoder()
V.add(enc, outputs=['enc/speed'], threaded=True)
else:
print("No supported encoder found")
logger.info("cfg.CAMERA_TYPE %s"%cfg.CAMERA_TYPE)
if camera_type == "stereo":
if cfg.CAMERA_TYPE == "WEBCAM":
from donkeycar.parts.camera import Webcam
camA = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 0)
camB = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 1)
elif cfg.CAMERA_TYPE == "CVCAM":
from donkeycar.parts.cv import CvCam
camA = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 0)
camB = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 1)
else:
raise(Exception("Unsupported camera type: %s" % cfg.CAMERA_TYPE))
V.add(camA, outputs=['cam/image_array_a'], threaded=True)
V.add(camB, outputs=['cam/image_array_b'], threaded=True)
from donkeycar.parts.image import StereoPair
V.add(StereoPair(), inputs=['cam/image_array_a', 'cam/image_array_b'],
outputs=['cam/image_array'])
elif cfg.CAMERA_TYPE == "D435":
from donkeycar.parts.realsense435i import RealSense435i
cam = RealSense435i(
enable_rgb=cfg.REALSENSE_D435_RGB,
enable_depth=cfg.REALSENSE_D435_DEPTH,
enable_imu=cfg.REALSENSE_D435_IMU,
device_id=cfg.REALSENSE_D435_ID)
V.add(cam, inputs=[],
outputs=['cam/image_array', 'cam/depth_array',
'imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z'],
threaded=True)
else:
if cfg.DONKEY_GYM:
from donkeycar.parts.dgym import DonkeyGymEnv
inputs = []
outputs = ['cam/image_array']
threaded = True
if cfg.DONKEY_GYM:
from donkeycar.parts.dgym import DonkeyGymEnv
#rbx
cam = DonkeyGymEnv(cfg.DONKEY_SIM_PATH, host=cfg.SIM_HOST, env_name=cfg.DONKEY_GYM_ENV_NAME, conf=cfg.GYM_CONF, record_location=cfg.SIM_RECORD_LOCATION, record_gyroaccel=cfg.SIM_RECORD_GYROACCEL, record_velocity=cfg.SIM_RECORD_VELOCITY, record_lidar=cfg.SIM_RECORD_LIDAR, delay=cfg.SIM_ARTIFICIAL_LATENCY)
threaded = True
inputs = ['angle', 'throttle']
elif cfg.CAMERA_TYPE == "PICAM":
from donkeycar.parts.camera import PiCamera
cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, vflip=cfg.CAMERA_VFLIP, hflip=cfg.CAMERA_HFLIP)
elif cfg.CAMERA_TYPE == "WEBCAM":
from donkeycar.parts.camera import Webcam
cam = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "CVCAM":
from donkeycar.parts.cv import CvCam
cam = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "CSIC":
from donkeycar.parts.camera import CSICamera
cam = CSICamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, gstreamer_flip=cfg.CSIC_CAM_GSTREAMER_FLIP_PARM)
elif cfg.CAMERA_TYPE == "V4L":
from donkeycar.parts.camera import V4LCamera
cam = V4LCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE)
elif cfg.CAMERA_TYPE == "MOCK":
from donkeycar.parts.camera import MockCamera
cam = MockCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "IMAGE_LIST":
from donkeycar.parts.camera import ImageListCamera
cam = ImageListCamera(path_mask=cfg.PATH_MASK)
elif cfg.CAMERA_TYPE == "LEOPARD":
from donkeycar.parts.leopard_imaging import LICamera
cam = LICamera(width=cfg.IMAGE_W, height=cfg.IMAGE_H, fps=cfg.CAMERA_FRAMERATE)
else:
raise(Exception("Unkown camera type: %s" % cfg.CAMERA_TYPE))
# add lidar
if cfg.USE_LIDAR:
from donkeycar.parts.lidar import RPLidar
if cfg.LIDAR_TYPE == 'RP':
print("adding RP lidar part")
lidar = RPLidar(lower_limit = cfg.LIDAR_LOWER_LIMIT, upper_limit = cfg.LIDAR_UPPER_LIMIT)
V.add(lidar, inputs=[],outputs=['lidar/dist_array'], threaded=True)
if cfg.LIDAR_TYPE == 'YD':
print("YD Lidar not yet supported")
# Donkey gym part will output position information if it is configured
if cfg.DONKEY_GYM:
if cfg.SIM_RECORD_LOCATION:
outputs += ['pos/pos_x', 'pos/pos_y', 'pos/pos_z', 'pos/speed', 'pos/cte']
if cfg.SIM_RECORD_GYROACCEL:
outputs += ['gyro/gyro_x', 'gyro/gyro_y', 'gyro/gyro_z', 'accel/accel_x', 'accel/accel_y', 'accel/accel_z']
if cfg.SIM_RECORD_VELOCITY:
outputs += ['vel/vel_x', 'vel/vel_y', 'vel/vel_z']
if cfg.SIM_RECORD_LIDAR:
outputs += ['lidar/dist_array']
V.add(cam, inputs=inputs, outputs=outputs, threaded=threaded)
#This web controller will create a web server that is capable
#of managing steering, throttle, and modes, and more.
ctr = LocalWebController(port=cfg.WEB_CONTROL_PORT, mode=cfg.WEB_INIT_MODE)
V.add(ctr,
inputs=['cam/image_array', 'tub/num_records'],
outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
threaded=True)
if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT:
#modify max_throttle closer to 1.0 to have more power
#modify steering_scale lower than 1.0 to have less responsive steering
if cfg.CONTROLLER_TYPE == "pigpio_rc": # an RC controllers read by GPIO pins. They typically don't have buttons
from donkeycar.parts.controller import RCReceiver
ctr = RCReceiver(cfg)
V.add(ctr, inputs=['cam/image_array'], outputs=['user/angle', 'user/throttle', 'recording'],threaded=False)
else:
if cfg.CONTROLLER_TYPE == "custom": #custom controller created with `donkey createjs` command
from my_joystick import MyJoystickController
ctr = MyJoystickController(
throttle_dir=cfg.JOYSTICK_THROTTLE_DIR,
throttle_scale=cfg.JOYSTICK_MAX_THROTTLE,
steering_scale=cfg.JOYSTICK_STEERING_SCALE,
auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE)
ctr.set_deadzone(cfg.JOYSTICK_DEADZONE)
elif cfg.CONTROLLER_TYPE == "MM1":
from donkeycar.parts.robohat import RoboHATController
ctr = RoboHATController(cfg)
else:
from donkeycar.parts.controller import get_js_controller
ctr = get_js_controller(cfg)
if cfg.USE_NETWORKED_JS:
from donkeycar.parts.controller import JoyStickSub
netwkJs = JoyStickSub(cfg.NETWORK_JS_SERVER_IP)
V.add(netwkJs, threaded=True)
ctr.js = netwkJs
V.add(ctr, inputs=['cam/image_array'], outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],threaded=True)
#this throttle filter will allow one tap back for esc reverse
th_filter = ThrottleFilter()
V.add(th_filter, inputs=['user/throttle'], outputs=['user/throttle'])
#See if we should even run the pilot module.
#This is only needed because the part run_condition only accepts boolean
class PilotCondition:
def run(self, mode):
if mode == 'user':
return False
else:
return True
V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot'])
class LedConditionLogic:
def __init__(self, cfg):
self.cfg = cfg
def run(self, mode, recording, recording_alert, behavior_state, model_file_changed, track_loc):
#returns a blink rate. 0 for off. -1 for on. positive for rate.
if track_loc is not None:
led.set_rgb(*self.cfg.LOC_COLORS[track_loc])
return -1
if model_file_changed:
led.set_rgb(self.cfg.MODEL_RELOADED_LED_R, self.cfg.MODEL_RELOADED_LED_G, self.cfg.MODEL_RELOADED_LED_B)
return 0.1
else:
led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)
if recording_alert:
led.set_rgb(*recording_alert)
return self.cfg.REC_COUNT_ALERT_BLINK_RATE
else:
led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)
if behavior_state is not None and model_type == 'behavior':
r, g, b = self.cfg.BEHAVIOR_LED_COLORS[behavior_state]
led.set_rgb(r, g, b)
return -1 #solid on
if recording:
return -1 #solid on
elif mode == 'user':
return 1
elif mode == 'local_angle':
return 0.5
elif mode == 'local':
return 0.1
return 0
if cfg.HAVE_RGB_LED and not cfg.DONKEY_GYM:
from donkeycar.parts.led_status import RGB_LED
led = RGB_LED(cfg.LED_PIN_R, cfg.LED_PIN_G, cfg.LED_PIN_B, cfg.LED_INVERT)
led.set_rgb(cfg.LED_R, cfg.LED_G, cfg.LED_B)
V.add(LedConditionLogic(cfg), inputs=['user/mode', 'recording', "records/alert", 'behavior/state', 'modelfile/modified', "pilot/loc"],
outputs=['led/blink_rate'])
V.add(led, inputs=['led/blink_rate'])
def get_record_alert_color(num_records):
col = (0, 0, 0)
for count, color in cfg.RECORD_ALERT_COLOR_ARR:
if num_records >= count:
col = color
return col
class RecordTracker:
def __init__(self):
self.last_num_rec_print = 0
self.dur_alert = 0
self.force_alert = 0
def run(self, num_records):
if num_records is None:
return 0
if self.last_num_rec_print != num_records or self.force_alert:
self.last_num_rec_print = num_records
if num_records % 10 == 0:
print("recorded", num_records, "records")
if num_records % cfg.REC_COUNT_ALERT == 0 or self.force_alert:
self.dur_alert = num_records // cfg.REC_COUNT_ALERT * cfg.REC_COUNT_ALERT_CYC
self.force_alert = 0
if self.dur_alert > 0:
self.dur_alert -= 1
if self.dur_alert != 0:
return get_record_alert_color(num_records)
return 0
rec_tracker_part = RecordTracker()
V.add(rec_tracker_part, inputs=["tub/num_records"], outputs=['records/alert'])
if cfg.AUTO_RECORD_ON_THROTTLE:
def show_record_count_status():
rec_tracker_part.last_num_rec_print = 0
rec_tracker_part.force_alert = 1
if (cfg.CONTROLLER_TYPE != "pigpio_rc") and (cfg.CONTROLLER_TYPE != "MM1"): # these controllers don't use the joystick class
if isinstance(ctr, JoystickController):
ctr.set_button_down_trigger('circle', show_record_count_status) #then we are not using the circle button. hijack that to force a record count indication
else:
show_record_count_status()
# ctr.set_button_down_trigger('circle', show_record_count_status)
#Sombrero
if cfg.HAVE_SOMBRERO:
from donkeycar.parts.sombrero import Sombrero
s = Sombrero()
#IMU
if cfg.HAVE_IMU:
from donkeycar.parts.imu import IMU
imu = IMU(sensor=cfg.IMU_SENSOR, dlp_setting=cfg.IMU_DLP_CONFIG)
V.add(imu, outputs=['imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z'], threaded=True)
# Use the FPV preview, which will show the cropped image output, or the full frame.
if cfg.USE_FPV:
V.add(WebFpv(), inputs=['cam/image_array'], threaded=True)
#Behavioral state
if cfg.TRAIN_BEHAVIORS:
bh = BehaviorPart(cfg.BEHAVIOR_LIST)
V.add(bh, outputs=['behavior/state', 'behavior/label', "behavior/one_hot_state_array"])
try:
ctr.set_button_down_trigger('L1', bh.increment_state)
except:
pass
inputs = ['cam/image_array', "behavior/one_hot_state_array"]
#IMU
elif cfg.USE_LIDAR:
inputs = ['cam/image_array', 'lidar/dist_array']
elif cfg.HAVE_ODOM:
inputs = ['cam/image_array', 'enc/speed']
elif model_type == "imu":
assert(cfg.HAVE_IMU)
#Run the pilot if the mode is not user.
inputs=['cam/image_array',
'imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z']
elif cfg.USE_LIDAR:
inputs = ['cam/image_array', 'lidar/dist_array']
else:
inputs=['cam/image_array']
def load_model(kl, model_path):
start = time.time()
print('loading model', model_path)
kl.load(model_path)
print('finished loading in %s sec.' % (str(time.time() - start)) )
def load_weights(kl, weights_path):
start = time.time()
try:
print('loading model weights', weights_path)
kl.model.load_weights(weights_path)
print('finished loading in %s sec.' % (str(time.time() - start)) )
except Exception as e:
print(e)
print('ERR>> problems loading weights', weights_path)
def load_model_json(kl, json_fnm):
start = time.time()
print('loading model json', json_fnm)
from tensorflow.python import keras
try:
with open(json_fnm, 'r') as handle:
contents = handle.read()
kl.model = keras.models.model_from_json(contents)
print('finished loading json in %s sec.' % (str(time.time() - start)) )
except Exception as e:
print(e)
print("ERR>> problems loading model json", json_fnm)
if model_path:
#When we have a model, first create an appropriate Keras part
kl = dk.utils.get_model_by_type(model_type, cfg)
model_reload_cb = None
if '.h5' in model_path or '.uff' in model_path or 'tflite' in model_path or '.pkl' in model_path:
#when we have a .h5 extension
#load everything from the model file
load_model(kl, model_path)
def reload_model(filename):
load_model(kl, filename)
model_reload_cb = reload_model
elif '.json' in model_path:
#when we have a .json extension
#load the model from there and look for a matching
#.wts file with just weights
load_model_json(kl, model_path)
weights_path = model_path.replace('.json', '.weights')
load_weights(kl, weights_path)
def reload_weights(filename):
weights_path = filename.replace('.json', '.weights')
load_weights(kl, weights_path)
model_reload_cb = reload_weights
else:
print("ERR>> Unknown extension type on model file!!")
return
#this part will signal visual LED, if connected
V.add(FileWatcher(model_path, verbose=True), outputs=['modelfile/modified'])
#these parts will reload the model file, but only when ai is running so we don't interrupt user driving
V.add(FileWatcher(model_path), outputs=['modelfile/dirty'], run_condition="ai_running")
V.add(DelayedTrigger(100), inputs=['modelfile/dirty'], outputs=['modelfile/reload'], run_condition="ai_running")
V.add(TriggeredCallback(model_path, model_reload_cb), inputs=["modelfile/reload"], run_condition="ai_running")
outputs=['pilot/angle', 'pilot/throttle']
if cfg.TRAIN_LOCALIZER:
outputs.append("pilot/loc")
V.add(kl, inputs=inputs,
outputs=outputs,
run_condition='run_pilot')
if cfg.STOP_SIGN_DETECTOR:
from donkeycar.parts.object_detector.stop_sign_detector import StopSignDetector
V.add(StopSignDetector(cfg.STOP_SIGN_MIN_SCORE, cfg.STOP_SIGN_SHOW_BOUNDING_BOX), inputs=['cam/image_array', 'pilot/throttle'], outputs=['pilot/throttle', 'cam/image_array'])
#Choose what inputs should change the car.
class DriveMode:
def run(self, mode,
user_angle, user_throttle,
pilot_angle, pilot_throttle):
if mode == 'user':
return user_angle, user_throttle
elif mode == 'local_angle':
return pilot_angle if pilot_angle else 0.0, user_throttle
else:
return pilot_angle if pilot_angle else 0.0, pilot_throttle * cfg.AI_THROTTLE_MULT if pilot_throttle else 0.0
V.add(DriveMode(),
inputs=['user/mode', 'user/angle', 'user/throttle',
'pilot/angle', 'pilot/throttle'],
outputs=['angle', 'throttle'])
#to give the car a boost when starting ai mode in a race.
aiLauncher = AiLaunch(cfg.AI_LAUNCH_DURATION, cfg.AI_LAUNCH_THROTTLE, cfg.AI_LAUNCH_KEEP_ENABLED)
V.add(aiLauncher,
inputs=['user/mode', 'throttle'],
outputs=['throttle'])
if (cfg.CONTROLLER_TYPE != "pigpio_rc") and (cfg.CONTROLLER_TYPE != "MM1"):
if isinstance(ctr, JoystickController):
ctr.set_button_down_trigger(cfg.AI_LAUNCH_ENABLE_BUTTON, aiLauncher.enable_ai_launch)
class AiRunCondition:
'''
A bool part to let us know when ai is running.
'''
def run(self, mode):
if mode == "user":
return False
return True
V.add(AiRunCondition(), inputs=['user/mode'], outputs=['ai_running'])
#Ai Recording
class AiRecordingCondition:
'''
return True when ai mode, otherwize respect user mode recording flag
'''
def run(self, mode, recording):
if mode == 'user':
return recording
return True
if cfg.RECORD_DURING_AI:
V.add(AiRecordingCondition(), inputs=['user/mode', 'recording'], outputs=['recording'])
#Drive train setup
if cfg.DONKEY_GYM or cfg.DRIVE_TRAIN_TYPE == "MOCK":
pass
elif cfg.DRIVE_TRAIN_TYPE == "I2C_SERVO":
from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle
steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
throttle = PWMThrottle(controller=throttle_controller,
max_pulse=cfg.THROTTLE_FORWARD_PWM,
zero_pulse=cfg.THROTTLE_STOPPED_PWM,
min_pulse=cfg.THROTTLE_REVERSE_PWM)
V.add(steering, inputs=['angle'], threaded=True)
V.add(throttle, inputs=['throttle'], threaded=True)
elif cfg.DRIVE_TRAIN_TYPE == "DC_STEER_THROTTLE":
from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM
steering = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT, cfg.HBRIDGE_PIN_RIGHT)
throttle = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD)
V.add(steering, inputs=['angle'])
V.add(throttle, inputs=['throttle'])
elif cfg.DRIVE_TRAIN_TYPE == "DC_TWO_WHEEL":
from donkeycar.parts.actuator import TwoWheelSteeringThrottle, Mini_HBridge_DC_Motor_PWM
left_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT_FWD, cfg.HBRIDGE_PIN_LEFT_BWD)
right_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_RIGHT_FWD, cfg.HBRIDGE_PIN_RIGHT_BWD)
two_wheel_control = TwoWheelSteeringThrottle()
V.add(two_wheel_control,
inputs=['throttle', 'angle'],
outputs=['left_motor_speed', 'right_motor_speed'])
V.add(left_motor, inputs=['left_motor_speed'])
V.add(right_motor, inputs=['right_motor_speed'])
elif cfg.DRIVE_TRAIN_TYPE == "DC_TWO_WHEEL_L298N":
from donkeycar.parts.actuator import TwoWheelSteeringThrottle, L298N_HBridge_DC_Motor
left_motor = L298N_HBridge_DC_Motor(cfg.HBRIDGE_L298N_PIN_LEFT_FWD, cfg.HBRIDGE_L298N_PIN_LEFT_BWD, cfg.HBRIDGE_L298N_PIN_LEFT_EN)
right_motor = L298N_HBridge_DC_Motor(cfg.HBRIDGE_L298N_PIN_RIGHT_FWD, cfg.HBRIDGE_L298N_PIN_RIGHT_BWD, cfg.HBRIDGE_L298N_PIN_RIGHT_EN)
two_wheel_control = TwoWheelSteeringThrottle()
V.add(two_wheel_control,
inputs=['throttle', 'angle'],
outputs=['left_motor_speed', 'right_motor_speed'])
V.add(left_motor, inputs=['left_motor_speed'])
V.add(right_motor, inputs=['right_motor_speed'])
elif cfg.DRIVE_TRAIN_TYPE == "SERVO_HBRIDGE_PWM":
from donkeycar.parts.actuator import ServoBlaster, PWMSteering
steering_controller = ServoBlaster(cfg.STEERING_CHANNEL) #really pin
#PWM pulse values should be in the range of 100 to 200
assert(cfg.STEERING_LEFT_PWM <= 200)
assert(cfg.STEERING_RIGHT_PWM <= 200)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM
motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD)
V.add(steering, inputs=['angle'], threaded=True)
V.add(motor, inputs=["throttle"])
elif cfg.DRIVE_TRAIN_TYPE == "MM1":
from donkeycar.parts.robohat import RoboHATDriver
V.add(RoboHATDriver(cfg), inputs=['angle', 'throttle'])
elif cfg.DRIVE_TRAIN_TYPE == "PIGPIO_PWM":
from donkeycar.parts.actuator import PWMSteering, PWMThrottle, PiGPIO_PWM
steering_controller = PiGPIO_PWM(cfg.STEERING_PWM_PIN, freq=cfg.STEERING_PWM_FREQ, inverted=cfg.STEERING_PWM_INVERTED)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
throttle_controller = PiGPIO_PWM(cfg.THROTTLE_PWM_PIN, freq=cfg.THROTTLE_PWM_FREQ, inverted=cfg.THROTTLE_PWM_INVERTED)
throttle = PWMThrottle(controller=throttle_controller,
max_pulse=cfg.THROTTLE_FORWARD_PWM,
zero_pulse=cfg.THROTTLE_STOPPED_PWM,
min_pulse=cfg.THROTTLE_REVERSE_PWM)
V.add(steering, inputs=['angle'], threaded=True)
V.add(throttle, inputs=['throttle'], threaded=True)
# OLED setup
if cfg.USE_SSD1306_128_32:
from donkeycar.parts.oled import OLEDPart
auto_record_on_throttle = cfg.USE_JOYSTICK_AS_DEFAULT and cfg.AUTO_RECORD_ON_THROTTLE
oled_part = OLEDPart(cfg.SSD1306_128_32_I2C_BUSNUM, auto_record_on_throttle=auto_record_on_throttle)
V.add(oled_part, inputs=['recording', 'tub/num_records', 'user/mode'], outputs=[], threaded=True)
#add tub to save data
if cfg.USE_LIDAR:
inputs = ['cam/image_array', 'lidar/dist_array', 'user/angle', 'user/throttle', 'user/mode']
types = ['image_array', 'nparray','float', 'float', 'str']
else:
inputs=['cam/image_array','user/angle', 'user/throttle', 'user/mode']
types=['image_array','float', 'float','str']
if cfg.HAVE_ODOM:
inputs += ['enc/speed']
types += ['float']
if cfg.TRAIN_BEHAVIORS:
inputs += ['behavior/state', 'behavior/label', "behavior/one_hot_state_array"]
types += ['int', 'str', 'vector']
if cfg.CAMERA_TYPE == "D435" and cfg.REALSENSE_D435_DEPTH:
inputs += ['cam/depth_array']
types += ['gray16_array']
if cfg.HAVE_IMU or (cfg.CAMERA_TYPE == "D435" and cfg.REALSENSE_D435_IMU):
inputs += ['imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z']
types +=['float', 'float', 'float',
'float', 'float', 'float']
# rbx
if cfg.DONKEY_GYM:
if cfg.SIM_RECORD_LOCATION:
inputs += ['pos/pos_x', 'pos/pos_y', 'pos/pos_z', 'pos/speed', 'pos/cte']
types += ['float', 'float', 'float', 'float', 'float']
if cfg.SIM_RECORD_GYROACCEL:
inputs += ['gyro/gyro_x', 'gyro/gyro_y', 'gyro/gyro_z', 'accel/accel_x', 'accel/accel_y', 'accel/accel_z']
types += ['float', 'float', 'float', 'float', 'float', 'float']
if cfg.SIM_RECORD_VELOCITY:
inputs += ['vel/vel_x', 'vel/vel_y', 'vel/vel_z']
types += ['float', 'float', 'float']
if cfg.SIM_RECORD_LIDAR:
inputs += ['lidar/dist_array']
types += ['nparray']
if cfg.RECORD_DURING_AI:
inputs += ['pilot/angle', 'pilot/throttle']
types += ['float', 'float']
if cfg.HAVE_PERFMON:
from donkeycar.parts.perfmon import PerfMonitor
mon = PerfMonitor(cfg)
perfmon_outputs = ['perf/cpu', 'perf/mem', 'perf/freq']
inputs += perfmon_outputs
types += ['float', 'float', 'float']
V.add(mon, inputs=[], outputs=perfmon_outputs, threaded=True)
# do we want to store new records into own dir or append to existing
tub_path = TubHandler(path=cfg.DATA_PATH).create_tub_path() if \
cfg.AUTO_CREATE_NEW_TUB else cfg.DATA_PATH
tub_writer = TubWriter(tub_path, inputs=inputs, types=types, metadata=meta)
V.add(tub_writer, inputs=inputs, outputs=["tub/num_records"], run_condition='recording')
# Telemetry (we add the same metrics added to the TubHandler
if cfg.HAVE_MQTT_TELEMETRY:
telem_inputs, _ = tel.add_step_inputs(inputs, types)
V.add(tel, inputs=telem_inputs, outputs=["tub/queue_size"], threaded=True)
if cfg.PUB_CAMERA_IMAGES:
from donkeycar.parts.network import TCPServeValue
from donkeycar.parts.image import ImgArrToJpg
pub = TCPServeValue("camera")
V.add(ImgArrToJpg(), inputs=['cam/image_array'], outputs=['jpg/bin'])
V.add(pub, inputs=['jpg/bin'])
if type(ctr) is LocalWebController:
if cfg.DONKEY_GYM:
print("You can now go to http://localhost:%d to drive your car." % cfg.WEB_CONTROL_PORT)
else:
print("You can now go to <your hostname.local>:%d to drive your car." % cfg.WEB_CONTROL_PORT)
elif (cfg.CONTROLLER_TYPE != "pigpio_rc") and (cfg.CONTROLLER_TYPE != "MM1"):
if isinstance(ctr, JoystickController):
print("You can now move your joystick to drive your car.")
ctr.set_tub(tub_writer.tub)
ctr.print_controls()
#run the vehicle for 20 seconds
V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
|
30,452 | def get_self_deployed_token():
if not (AUTH_ID and SELF_TENANT_ID and APP_SECRET):
return_error('You must provide the Tenant ID, Application ID and Client Secret.')
integration_context = demisto.getIntegrationContext()
if integration_context and integration_context['token_expiration_time']:
token_expiration_time = integration_context['token_expiration_time']
now = int(time.time())
if token_expiration_time < now:
return integration_context['token']
url = 'https://login.windows.net/{}/oauth2/token'.format(SELF_TENANT_ID)
resource_app_id_uri = 'https://api.securitycenter.windows.com'
data = {
'resource': resource_app_id_uri,
'client_id': APP_ID,
'client_secret': APP_SECRET,
'grant_type': 'client_credentials'
}
response = requests.post(url, data, verify=USE_SSL)
body = response.json()
if response.status_code != 200:
return_error('Error in Microsoft authorization: {}'.format(str(body)))
demisto.setIntegrationContext({
'token_expiration_time': body['expires_on'],
'token': body['access_token']
})
return body['access_token']
| def get_self_deployed_token():
if not (AUTH_ID and SELF_TENANT_ID and APP_SECRET):
return_error('You must provide the Tenant ID, Application ID and Client Secret.')
integration_context = demisto.getIntegrationContext()
if integration_context and integration_context.get('token_expiration_time'):
token_expiration_time = integration_context['token_expiration_time']
now = int(time.time())
if token_expiration_time < now:
return integration_context['token']
url = 'https://login.windows.net/{}/oauth2/token'.format(SELF_TENANT_ID)
resource_app_id_uri = 'https://api.securitycenter.windows.com'
data = {
'resource': resource_app_id_uri,
'client_id': APP_ID,
'client_secret': APP_SECRET,
'grant_type': 'client_credentials'
}
response = requests.post(url, data, verify=USE_SSL)
body = response.json()
if response.status_code != 200:
return_error('Error in Microsoft authorization: {}'.format(str(body)))
demisto.setIntegrationContext({
'token_expiration_time': body['expires_on'],
'token': body['access_token']
})
return body['access_token']
|
11,899 | def contain(image, size, method=Image.BICUBIC):
"""
Returns a sized version of the image, expanded to fill the requested aspect ratio
and size.
:param image: The image to size and crop.
:param size: The requested output size in pixels, given as a
(width, height) tuple.
:param method: What resampling method to use. Default is
:py:attr:`PIL.Image.BICUBIC`. See :ref:`concept-filters`.
:return: An image.
"""
im_ratio = image.width / image.height
dest_ratio = size[0] / size[1]
if im_ratio != dest_ratio:
if im_ratio > dest_ratio:
new_height = int(image.height / image.width * size[0])
if new_height != size[1]:
size = (size[0], new_height)
else:
new_width = int(image.width / image.height * size[1])
if new_width != size[0]:
size = (new_width, size[1])
return image.resize(size, resample=method)
| def contain(image, size, method=Image.BICUBIC):
"""
Returns a sized version of the image, expanded to fill the requested aspect ratio
and size.
:param image: The image to resize and crop.
:param size: The requested output size in pixels, given as a
(width, height) tuple.
:param method: What resampling method to use. Default is
:py:attr:`PIL.Image.BICUBIC`. See :ref:`concept-filters`.
:return: An image.
"""
im_ratio = image.width / image.height
dest_ratio = size[0] / size[1]
if im_ratio != dest_ratio:
if im_ratio > dest_ratio:
new_height = int(image.height / image.width * size[0])
if new_height != size[1]:
size = (size[0], new_height)
else:
new_width = int(image.width / image.height * size[1])
if new_width != size[0]:
size = (new_width, size[1])
return image.resize(size, resample=method)
|
32,404 | def main():
global EMAIL
global SEND_AS
command = demisto.command()
client = Client()
demisto.info(f'Command being called is {command}')
commands = {
'test-module': test_module,
'send-mail': send_mail_command,
'reply-mail': reply_mail_command,
'fetch-incidents': fetch_incidents,
'gmail-auth-link': auth_link_command,
'gmail-auth-test': auth_test_command,
'gmail-get-attachments': get_attachments_command,
}
try:
if command == 'fetch-incidents':
demisto.incidents(fetch_incidents(client))
sys.exit(0)
if command in commands:
demisto.results(commands[command](client))
# Log exceptions
except Exception as e:
import traceback
return_error('GMAIL: {} {}'.format(str(e), traceback.format_exc()))
| def main():
global EMAIL
global SEND_AS
command = demisto.command()
client = Client()
demisto.info(f'Command being called is {command}')
commands = {
'test-module': test_module,
'send-mail': send_mail_command,
'reply-mail': reply_mail_command,
'fetch-incidents': fetch_incidents,
'gmail-auth-link': auth_link_command,
'gmail-auth-test': auth_test_command,
'gmail-get-attachments': get_attachments_command,
}
try:
if command == 'fetch-incidents':
demisto.incidents(fetch_incidents(client))
sys.exit(0)
if command in commands:
demisto.results(commands[command](client))
# Log exceptions
except Exception as e:
import traceback
return_error(f'An error occured: {e}', error=e)
|
45,349 | def _predict(
booster,
data,
**kwargs,
):
"""
Run distributed prediction with a trained booster on Ray backend.
During work it runs xgb.predict on each worker for row partition of `data`
and creates Modin DataFrame with prediction results.
Parameters
----------
booster : xgboost.Booster
A trained booster.
data : modin.experimental.xgboost.DMatrix
Input data used for prediction.
**kwargs : dict
Other parameters are the same as `xgboost.Booster.predict`.
Returns
-------
modin.pandas.DataFrame
Modin DataFrame with prediction results.
"""
s = time.time()
# Get metainfo from dmatrix
input_index, input_columns, row_lengths = data.data_metainfo
# Infer columns of result
def _get_num_columns(booster, n_features, **kwargs):
rng = np.random.RandomState(777)
test_data = rng.randn(1, n_features)
test_predictions = booster.predict(
xgb.DMatrix(test_data), validate_features=False, **kwargs
)
num_columns = (
test_predictions.shape[1] if len(test_predictions.shape) > 1 else 1
)
return num_columns
result_num_columns = _get_num_columns(booster, len(input_columns), **kwargs)
new_columns = list(range(result_num_columns))
# Put common data in object store
booster = ray.put(booster)
new_columns_ref = ray.put(new_columns)
prediction_refs = [
_map_predict.remote(booster, part, new_columns_ref, **kwargs)
for _, part in data.data
]
predictions = from_partitions(
prediction_refs,
0,
index=input_index,
columns=new_columns,
row_lengths=row_lengths,
column_widths=[len(new_columns)],
)
LOGGER.info(f"Prediction time: {time.time() - s} s")
return predictions
| def _predict(
booster,
data,
**kwargs,
):
"""
Run distributed prediction with a trained booster on Ray backend.
During execution it runs `xgb.predict` on each worker for subset of `data`
and creates Modin DataFrame with prediction results.
Parameters
----------
booster : xgboost.Booster
A trained booster.
data : modin.experimental.xgboost.DMatrix
Input data used for prediction.
**kwargs : dict
Other parameters are the same as `xgboost.Booster.predict`.
Returns
-------
modin.pandas.DataFrame
Modin DataFrame with prediction results.
"""
s = time.time()
# Get metainfo from dmatrix
input_index, input_columns, row_lengths = data.data_metainfo
# Infer columns of result
def _get_num_columns(booster, n_features, **kwargs):
rng = np.random.RandomState(777)
test_data = rng.randn(1, n_features)
test_predictions = booster.predict(
xgb.DMatrix(test_data), validate_features=False, **kwargs
)
num_columns = (
test_predictions.shape[1] if len(test_predictions.shape) > 1 else 1
)
return num_columns
result_num_columns = _get_num_columns(booster, len(input_columns), **kwargs)
new_columns = list(range(result_num_columns))
# Put common data in object store
booster = ray.put(booster)
new_columns_ref = ray.put(new_columns)
prediction_refs = [
_map_predict.remote(booster, part, new_columns_ref, **kwargs)
for _, part in data.data
]
predictions = from_partitions(
prediction_refs,
0,
index=input_index,
columns=new_columns,
row_lengths=row_lengths,
column_widths=[len(new_columns)],
)
LOGGER.info(f"Prediction time: {time.time() - s} s")
return predictions
|
13,216 | def invalid_json_error(release, ex):
"""Error for invalid JSON format."""
release.model.errors = {
'errors': str(ex),
}
| def invalid_json_error(release, ex):
"""Error for invalid JSON format."""
release.model.errors = {
'errors': ex.message,
}
|
32,473 | def main() -> None:
LOG('command is %s' % (demisto.command(),))
try:
handle_proxy()
if demisto.command() == 'test-module':
# This is the call made when pressing the integration test button.
http_request('/domains/categorization/google.com?showLabels')
demisto.results('ok')
sys.exit(0)
elif demisto.command() == 'investigate-umbrella-domain-categorization' or demisto.command() == \
'umbrella-domain-categorization':
demisto.results(get_domain_categorization_command())
elif demisto.command() == 'investigate-umbrella-domain-search' or demisto.command() == 'umbrella-domain-search':
demisto.results(get_domain_search_command())
elif demisto.command() == 'investigate-umbrella-domain-co-occurrences' or demisto.command() == \
'umbrella-domain-co-occurrences':
demisto.results(get_domain_co_occurrences_command())
elif demisto.command() == 'investigate-umbrella-domain-related' or demisto.command() == 'umbrella-domain-related':
demisto.results(get_domain_related_command())
elif demisto.command() == 'investigate-umbrella-domain-security' or demisto.command() == 'umbrella-domain-security':
demisto.results(get_domain_security_command())
elif demisto.command() == 'investigate-umbrella-domain-dns-history' or demisto.command() == \
'umbrella-domain-dns-history':
demisto.results(get_domain_dns_history_command())
elif demisto.command() == 'investigate-umbrella-ip-dns-history' or demisto.command() == 'umbrella-ip-dns-history':
demisto.results(get_ip_dns_history_command())
elif demisto.command() == 'investigate-umbrella-ip-malicious-domains' or demisto.command() == \
'umbrella-ip-malicious-domains':
demisto.results(get_ip_malicious_domains_command())
# new-commands:
elif demisto.command() == 'domain':
return_results(get_domain_command())
elif demisto.command() == 'umbrella-get-related-domains':
demisto.results(get_related_domains_command())
elif demisto.command() == 'umbrella-get-domain-classifiers':
demisto.results(get_domain_classifiers_command())
elif demisto.command() == 'umbrella-get-domain-queryvolume':
demisto.results(get_domain_query_volume_command())
elif demisto.command() == 'umbrella-get-domain-details':
demisto.results(get_domain_details_command())
elif demisto.command() == 'umbrella-get-domains-for-email-registrar':
demisto.results(get_domains_for_email_registrar_command())
elif demisto.command() == 'umbrella-get-domains-for-nameserver':
demisto.results(get_domains_for_nameserver_command())
elif demisto.command() == 'umbrella-get-whois-for-domain':
return_results(get_whois_for_domain_command())
elif demisto.command() == 'umbrella-get-malicious-domains-for-ip':
demisto.results(get_malicious_domains_for_ip_command())
elif demisto.command() == 'umbrella-get-domains-using-regex':
demisto.results(get_domain_using_regex_command())
elif demisto.command() == 'umbrella-get-domain-timeline':
demisto.results(get_domain_timeline_command())
elif demisto.command() == 'umbrella-get-ip-timeline':
demisto.results(get_ip_timeline_command())
elif demisto.command() == 'umbrella-get-url-timeline':
demisto.results(get_url_timeline_command())
except HTTPError as e:
if e.args[0]:
return_error(e.args[0])
else:
return_error(f"HTTP error with code {e.response.status_code}")
except Exception as e:
LOG(str(e))
LOG.print_log()
return_error(str(e))
| def main() -> None:
demisto.debug(f'Command being called is {demisto.command()}')
try:
handle_proxy()
if demisto.command() == 'test-module':
# This is the call made when pressing the integration test button.
http_request('/domains/categorization/google.com?showLabels')
demisto.results('ok')
sys.exit(0)
elif demisto.command() == 'investigate-umbrella-domain-categorization' or demisto.command() == \
'umbrella-domain-categorization':
demisto.results(get_domain_categorization_command())
elif demisto.command() == 'investigate-umbrella-domain-search' or demisto.command() == 'umbrella-domain-search':
demisto.results(get_domain_search_command())
elif demisto.command() == 'investigate-umbrella-domain-co-occurrences' or demisto.command() == \
'umbrella-domain-co-occurrences':
demisto.results(get_domain_co_occurrences_command())
elif demisto.command() == 'investigate-umbrella-domain-related' or demisto.command() == 'umbrella-domain-related':
demisto.results(get_domain_related_command())
elif demisto.command() == 'investigate-umbrella-domain-security' or demisto.command() == 'umbrella-domain-security':
demisto.results(get_domain_security_command())
elif demisto.command() == 'investigate-umbrella-domain-dns-history' or demisto.command() == \
'umbrella-domain-dns-history':
demisto.results(get_domain_dns_history_command())
elif demisto.command() == 'investigate-umbrella-ip-dns-history' or demisto.command() == 'umbrella-ip-dns-history':
demisto.results(get_ip_dns_history_command())
elif demisto.command() == 'investigate-umbrella-ip-malicious-domains' or demisto.command() == \
'umbrella-ip-malicious-domains':
demisto.results(get_ip_malicious_domains_command())
# new-commands:
elif demisto.command() == 'domain':
return_results(get_domain_command())
elif demisto.command() == 'umbrella-get-related-domains':
demisto.results(get_related_domains_command())
elif demisto.command() == 'umbrella-get-domain-classifiers':
demisto.results(get_domain_classifiers_command())
elif demisto.command() == 'umbrella-get-domain-queryvolume':
demisto.results(get_domain_query_volume_command())
elif demisto.command() == 'umbrella-get-domain-details':
demisto.results(get_domain_details_command())
elif demisto.command() == 'umbrella-get-domains-for-email-registrar':
demisto.results(get_domains_for_email_registrar_command())
elif demisto.command() == 'umbrella-get-domains-for-nameserver':
demisto.results(get_domains_for_nameserver_command())
elif demisto.command() == 'umbrella-get-whois-for-domain':
return_results(get_whois_for_domain_command())
elif demisto.command() == 'umbrella-get-malicious-domains-for-ip':
demisto.results(get_malicious_domains_for_ip_command())
elif demisto.command() == 'umbrella-get-domains-using-regex':
demisto.results(get_domain_using_regex_command())
elif demisto.command() == 'umbrella-get-domain-timeline':
demisto.results(get_domain_timeline_command())
elif demisto.command() == 'umbrella-get-ip-timeline':
demisto.results(get_ip_timeline_command())
elif demisto.command() == 'umbrella-get-url-timeline':
demisto.results(get_url_timeline_command())
except HTTPError as e:
if e.args[0]:
return_error(e.args[0])
else:
return_error(f"HTTP error with code {e.response.status_code}")
except Exception as e:
LOG(str(e))
LOG.print_log()
return_error(str(e))
|
23,096 | def asarray(a, allow_unknown_chunksizes=False, **kwargs):
"""Convert the input to a dask array.
Parameters
----------
a : array-like
Input data, in any form that can be converted to a dask array.
Returns
-------
out : dask array
Dask array interpretation of a.
Examples
--------
>>> import dask.array as da
>>> import numpy as np
>>> x = np.arange(3)
>>> da.asarray(x)
dask.array<array, shape=(3,), dtype=int64, chunksize=(3,), chunktype=numpy.ndarray>
>>> y = [[1, 2, 3], [4, 5, 6]]
>>> da.asarray(y)
dask.array<array, shape=(2, 3), dtype=int64, chunksize=(2, 3), chunktype=numpy.ndarray>
"""
if isinstance(a, Array):
return a
elif hasattr(a, "to_dask_array"):
return a.to_dask_array()
elif type(a).__module__.split(".")[0] == "xarray" and hasattr(a, "data"):
return asarray(a.data)
elif isinstance(a, (list, tuple)) and any(isinstance(i, Array) for i in a):
return stack(a, allow_unknown_chunksizes=allow_unknown_chunksizes, **kwargs)
elif not isinstance(getattr(a, "shape", None), Iterable):
a = np.asarray(a)
return from_array(a, getitem=getter_inline, **kwargs)
| def asarray(a, allow_unknown_chunksizes=False, **kwargs):
"""Convert the input to a dask array.
Parameters
----------
a : array-like
Input data, in any form that can be converted to a dask array.
Returns
-------
out : dask array
Dask array interpretation of a.
Examples
--------
>>> import dask.array as da
>>> import numpy as np
>>> x = np.arange(3)
>>> da.asarray(x)
dask.array<array, shape=(3,), dtype=int64, chunksize=(3,), chunktype=numpy.ndarray>
>>> y = [[1, 2, 3], [4, 5, 6]]
>>> da.asarray(y)
dask.array<array, shape=(2, 3), dtype=int64, chunksize=(2, 3), chunktype=numpy.ndarray>
"""
if isinstance(a, Array):
return a
elif hasattr(a, "to_dask_array"):
return a.to_dask_array()
elif type(a).__module__.split(".")[0] == "xarray" and hasattr(a, "data"):
return asarray(a.data)
elif isinstance(a, (list, tuple)) and any(isinstance(i, Array) for i in a):
return stack(a, allow_unknown_chunksizes=allow_unknown_chunksizes)
elif not isinstance(getattr(a, "shape", None), Iterable):
a = np.asarray(a)
return from_array(a, getitem=getter_inline, **kwargs)
|
9,417 | def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default=None),
login_password=dict(default=None, no_log=True),
login_host=dict(default='localhost'),
login_port=dict(default='27017'),
login_database=dict(default=None),
replica_set=dict(default=None),
database=dict(required=True, aliases=['db']),
name=dict(required=True, aliases=['user']),
password=dict(aliases=['pass'], no_log=True),
ssl=dict(default=False, type='bool'),
ssl_certfile=dict(default=None),
ssl_keyfile=dict(default=None),
ssl_ca_certs=dict(default=None),
roles=dict(default=None, type='list'),
state=dict(default='present', choices=['absent', 'present']),
update_password=dict(default="always", choices=["always", "on_create"]),
ssl_cert_reqs=dict(default='CERT_REQUIRED', choices=['CERT_NONE', 'CERT_OPTIONAL', 'CERT_REQUIRED']),
),
supports_check_mode=True
)
if not pymongo_found:
module.fail_json(msg='the python pymongo module is required')
login_user = module.params['login_user']
login_password = module.params['login_password']
login_host = module.params['login_host']
login_port = module.params['login_port']
login_database = module.params['login_database']
replica_set = module.params['replica_set']
db_name = module.params['database']
user = module.params['name']
password = module.params['password']
ssl = module.params['ssl']
ssl_certfile = module.params['ssl_certfile']
ssl_keyfile = module.params['ssl_keyfile']
ssl_ca_certs = module.params['ssl_ca_certs']
roles = module.params['roles'] or []
state = module.params['state']
update_password = module.params['update_password']
try:
connection_params = {
"host": login_host,
"port": int(login_port),
}
if replica_set:
connection_params["replicaset"] = replica_set
if ssl:
connection_params["ssl"] = ssl
connection_params["ssl_cert_reqs"] = getattr(ssl_lib, module.params['ssl_cert_reqs'])
if ssl_certfile:
connection_params['ssl_certfile'] = ssl_certfile
if ssl_keyfile:
connection_params['ssl_keyfile'] = ssl_keyfile
if ssl_ca_certs:
connection_params['ssl_ca_certs'] = ssl_ca_certs
client = MongoClient(**connection_params)
# NOTE: this check must be done ASAP.
# We doesn't need to be authenticated (this ability has lost in PyMongo 3.6)
if LooseVersion(PyMongoVersion) <= LooseVersion('3.5'):
check_compatibility(module, client)
if login_user is None and login_password is None:
mongocnf_creds = load_mongocnf()
if mongocnf_creds is not False:
login_user = mongocnf_creds['user']
login_password = mongocnf_creds['password']
elif login_password is None or login_user is None:
module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided')
if login_user is not None and login_password is not None:
client.admin.authenticate(login_user, login_password, source=login_database)
elif LooseVersion(PyMongoVersion) >= LooseVersion('3.0'):
if db_name != "admin":
module.fail_json(msg='The localhost login exception only allows the first admin account to be created')
# else: this has to be the first admin user added
except Exception as e:
module.fail_json(msg='unable to connect to database: %s' % to_native(e), exception=traceback.format_exc())
if state == 'present':
if password is None and update_password == 'always':
module.fail_json(msg='password parameter required when adding a user unless update_password is set to on_create')
try:
if update_password != 'always':
uinfo = user_find(client, user, db_name)
if uinfo:
password = None
if not check_if_roles_changed(uinfo, roles, db_name):
module.exit_json(changed=False, user=user)
if module.check_mode:
module.exit_json(changed=True, user=user)
user_add(module, client, db_name, user, password, roles)
except Exception as e:
module.fail_json(msg='Unable to add or update user: %s' % to_native(e), exception=traceback.format_exc())
# Here we can check password change if mongo provide a query for that : https://jira.mongodb.org/browse/SERVER-22848
# newuinfo = user_find(client, user, db_name)
# if uinfo['role'] == newuinfo['role'] and CheckPasswordHere:
# module.exit_json(changed=False, user=user)
elif state == 'absent':
try:
user_remove(module, client, db_name, user)
except Exception as e:
module.fail_json(msg='Unable to remove user: %s' % to_native(e), exception=traceback.format_exc())
module.exit_json(changed=True, user=user)
| def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default=None),
login_password=dict(default=None, no_log=True),
login_host=dict(default='localhost'),
login_port=dict(default='27017'),
login_database=dict(default=None),
replica_set=dict(default=None),
database=dict(required=True, aliases=['db']),
name=dict(required=True, aliases=['user']),
password=dict(aliases=['pass'], no_log=True),
ssl=dict(default=False, type='bool'),
ssl_certfile=dict(default=None),
ssl_keyfile=dict(default=None),
ssl_ca_certs=dict(type='path'),
roles=dict(default=None, type='list'),
state=dict(default='present', choices=['absent', 'present']),
update_password=dict(default="always", choices=["always", "on_create"]),
ssl_cert_reqs=dict(default='CERT_REQUIRED', choices=['CERT_NONE', 'CERT_OPTIONAL', 'CERT_REQUIRED']),
),
supports_check_mode=True
)
if not pymongo_found:
module.fail_json(msg='the python pymongo module is required')
login_user = module.params['login_user']
login_password = module.params['login_password']
login_host = module.params['login_host']
login_port = module.params['login_port']
login_database = module.params['login_database']
replica_set = module.params['replica_set']
db_name = module.params['database']
user = module.params['name']
password = module.params['password']
ssl = module.params['ssl']
ssl_certfile = module.params['ssl_certfile']
ssl_keyfile = module.params['ssl_keyfile']
ssl_ca_certs = module.params['ssl_ca_certs']
roles = module.params['roles'] or []
state = module.params['state']
update_password = module.params['update_password']
try:
connection_params = {
"host": login_host,
"port": int(login_port),
}
if replica_set:
connection_params["replicaset"] = replica_set
if ssl:
connection_params["ssl"] = ssl
connection_params["ssl_cert_reqs"] = getattr(ssl_lib, module.params['ssl_cert_reqs'])
if ssl_certfile:
connection_params['ssl_certfile'] = ssl_certfile
if ssl_keyfile:
connection_params['ssl_keyfile'] = ssl_keyfile
if ssl_ca_certs:
connection_params['ssl_ca_certs'] = ssl_ca_certs
client = MongoClient(**connection_params)
# NOTE: this check must be done ASAP.
# We doesn't need to be authenticated (this ability has lost in PyMongo 3.6)
if LooseVersion(PyMongoVersion) <= LooseVersion('3.5'):
check_compatibility(module, client)
if login_user is None and login_password is None:
mongocnf_creds = load_mongocnf()
if mongocnf_creds is not False:
login_user = mongocnf_creds['user']
login_password = mongocnf_creds['password']
elif login_password is None or login_user is None:
module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided')
if login_user is not None and login_password is not None:
client.admin.authenticate(login_user, login_password, source=login_database)
elif LooseVersion(PyMongoVersion) >= LooseVersion('3.0'):
if db_name != "admin":
module.fail_json(msg='The localhost login exception only allows the first admin account to be created')
# else: this has to be the first admin user added
except Exception as e:
module.fail_json(msg='unable to connect to database: %s' % to_native(e), exception=traceback.format_exc())
if state == 'present':
if password is None and update_password == 'always':
module.fail_json(msg='password parameter required when adding a user unless update_password is set to on_create')
try:
if update_password != 'always':
uinfo = user_find(client, user, db_name)
if uinfo:
password = None
if not check_if_roles_changed(uinfo, roles, db_name):
module.exit_json(changed=False, user=user)
if module.check_mode:
module.exit_json(changed=True, user=user)
user_add(module, client, db_name, user, password, roles)
except Exception as e:
module.fail_json(msg='Unable to add or update user: %s' % to_native(e), exception=traceback.format_exc())
# Here we can check password change if mongo provide a query for that : https://jira.mongodb.org/browse/SERVER-22848
# newuinfo = user_find(client, user, db_name)
# if uinfo['role'] == newuinfo['role'] and CheckPasswordHere:
# module.exit_json(changed=False, user=user)
elif state == 'absent':
try:
user_remove(module, client, db_name, user)
except Exception as e:
module.fail_json(msg='Unable to remove user: %s' % to_native(e), exception=traceback.format_exc())
module.exit_json(changed=True, user=user)
|
9,615 | def main():
module = AnsibleModule(
argument_spec=dict(
type=dict(required=True, choices=['user', 'group', 'project']),
name=dict(required=False, default=None),
mountpoint=dict(required=True),
bhard=dict(required=False, default=None),
bsoft=dict(required=False, default=None),
ihard=dict(required=False, default=None),
isoft=dict(required=False, default=None),
rtbhard=dict(required=False, default=None),
rtbsoft=dict(required=False, default=None),
state=dict(required=False, default='present', choices=['present', 'absent'])
),
supports_check_mode=True
)
quota_type = module.params['type']
name = module.params['name']
mountpoint = module.params['mountpoint']
bhard = module.params['bhard']
bsoft = module.params['bsoft']
ihard = module.params['ihard']
isoft = module.params['isoft']
rtbhard = module.params['rtbhard']
rtbsoft = module.params['rtbsoft']
state = module.params['state']
if bhard is not None:
bhard = human_to_bytes(bhard)
if bsoft is not None:
bsoft = human_to_bytes(bsoft)
if rtbhard is not None:
rtbhard = human_to_bytes(rtbhard)
if rtbsoft is not None:
rtbsoft = human_to_bytes(rtbsoft)
changed = False
if os.getuid() != 0:
module.fail_json(msg='You need to be root to run this module')
if not os.path.ismount(mountpoint):
module.fail_json(msg='%s is not a mountpoint' % mountpoint)
mp = get_fs_by_mountpoint(mountpoint)
if mp is None:
module.fail_json(msg='%s is not a mountpoint or not located on an xfs filesystem.' % mountpoint)
if quota_type == 'user':
type_arg = '-u'
quota_default = 'root'
if name is None:
name = quota_default
if 'uquota' not in mp['mntopts'] \
and 'usrquota' not in mp['mntopts'] \
and 'quota' not in mp['mntopts'] \
and 'uqnoenforce' not in mp['mntopts'] \
and 'qnoenforce' not in mp['mntopts']:
module.fail_json(
msg='%s is not mounted with the uquota/usrquota/quota/uqnoenforce/qnoenforce option.'
% mountpoint
)
try:
pwd.getpwnam(name)
except KeyError as e:
module.fail_json(msg='User %s doesn\'t exist.' % name)
if quota_type == 'group':
type_arg = '-g'
quota_default = 'root'
if name is None:
name = quota_default
if 'gquota' not in mp['mntopts'] and 'grpquota' not in mp['mntopts'] and 'gqnoenforce' not in mp['mntopts']:
module.fail_json(
msg='%s is not mounted with the gquota/grpquota/gqnoenforce option. (current options: %s)'
% (mountpoint, mp['mntopts'])
)
try:
grp.getgrnam(name)
except KeyError as e:
module.fail_json(msg='User %s doesn\'t exist.' % name)
elif quota_type == 'project':
type_arg = '-p'
quota_default = '#0'
if name is None:
name = quota_default
if 'pquota' not in mp['mntopts'] and 'prjquota' not in mp['mntopts'] and 'pqnoenforce' not in mp['mntopts']:
module.fail_json(msg='%s is not mounted with the pquota/prjquota/pqnoenforce option.' % mountpoint)
if name != quota_default and not os.path.isfile('/etc/projects'):
module.fail_json(msg='/etc/projects doesn\'t exist.')
if name != quota_default and not os.path.isfile('/etc/projid'):
module.fail_json(msg='/etc/projid doesn\'t exist.')
if name != quota_default and name is not None and get_project_id(name) is None:
module.fail_json(msg='%s hasn\'t been defined in /etc/projid.' % name)
prj_set = True
if name != quota_default:
cmd = 'project %s' % name
r = exec_quota(module, cmd, mountpoint)
if r['rc'] != 0:
module.fail_json(msg='Could not get project state.', cmd=cmd, retval=r)
else:
for line in r['stdout']:
if '%s - project identifier is not set' in line:
prj_set = False
break
if not prj_set and not module.check_mode:
cmd = 'project -s'
r = exec_quota(module, cmd, mountpoint)
if r['rc'] != 0:
module.fail_json(msg='Could not get quota realtime block report.', cmd=cmd, retval=r)
else:
changed = True
elif not prj_set and module.check_mode:
changed = True
changed = False
# Set limits
if state == 'absent':
bhard = 0
bsoft = 0
ihard = 0
isoft = 0
rtbhard = 0
rtbsoft = 0
if bsoft is not None or bhard is not None:
current_bsoft, current_bhard = quota_report(module, mountpoint, name, quota_type, 'b')
if isoft is not None or ihard is not None:
current_isoft, current_ihard = quota_report(module, mountpoint, name, quota_type, 'i')
if rtbsoft is not None or rtbhard is not None:
current_rtbsoft, current_rtbhard = quota_report(module, mountpoint, name, quota_type, 'rtb')
limit = []
if bsoft is not None and int(bsoft / 1024) != current_bsoft:
limit.append('bsoft=%s' % bsoft)
if bhard is not None and int(bhard / 1024) != current_bhard:
limit.append('bhard=%s' % bhard)
if isoft is not None and isoft != current_isoft:
limit.append('isoft=%s' % isoft)
if ihard is not None and ihard != current_ihard:
limit.append('ihard=%s' % ihard)
if rtbsoft is not None and int(rtbsoft / 1024) != current_rtbsoft:
limit.append('rtbsoft=%s' % rtbsoft)
if rtbhard is not None and int(rtbhard / 1024) != current_rtbhard:
limit.append('rtbhard=%s' % rtbhard)
if len(limit) > 0 and not module.check_mode:
if name == quota_default:
cmd = 'limit %s -d %s' % (type_arg, ' '.join(limit))
else:
cmd = 'limit %s %s %s' % (type_arg, ' '.join(limit), name)
r = exec_quota(module, cmd, mountpoint)
if r['rc'] != 0:
module.fail_json(msg='Could not set limits.', cmd=cmd, retval=r)
else:
changed = True
elif len(limit) > 0 and module.check_mode:
changed = True
module.exit_json(changed=changed)
return True
| def main():
module = AnsibleModule(
argument_spec=dict(
type=dict(type='str', required=True, choices=['user', 'group', 'project']),
name=dict(required=False, default=None),
mountpoint=dict(required=True),
bhard=dict(required=False, default=None),
bsoft=dict(required=False, default=None),
ihard=dict(required=False, default=None),
isoft=dict(required=False, default=None),
rtbhard=dict(required=False, default=None),
rtbsoft=dict(required=False, default=None),
state=dict(required=False, default='present', choices=['present', 'absent'])
),
supports_check_mode=True
)
quota_type = module.params['type']
name = module.params['name']
mountpoint = module.params['mountpoint']
bhard = module.params['bhard']
bsoft = module.params['bsoft']
ihard = module.params['ihard']
isoft = module.params['isoft']
rtbhard = module.params['rtbhard']
rtbsoft = module.params['rtbsoft']
state = module.params['state']
if bhard is not None:
bhard = human_to_bytes(bhard)
if bsoft is not None:
bsoft = human_to_bytes(bsoft)
if rtbhard is not None:
rtbhard = human_to_bytes(rtbhard)
if rtbsoft is not None:
rtbsoft = human_to_bytes(rtbsoft)
changed = False
if os.getuid() != 0:
module.fail_json(msg='You need to be root to run this module')
if not os.path.ismount(mountpoint):
module.fail_json(msg='%s is not a mountpoint' % mountpoint)
mp = get_fs_by_mountpoint(mountpoint)
if mp is None:
module.fail_json(msg='%s is not a mountpoint or not located on an xfs filesystem.' % mountpoint)
if quota_type == 'user':
type_arg = '-u'
quota_default = 'root'
if name is None:
name = quota_default
if 'uquota' not in mp['mntopts'] \
and 'usrquota' not in mp['mntopts'] \
and 'quota' not in mp['mntopts'] \
and 'uqnoenforce' not in mp['mntopts'] \
and 'qnoenforce' not in mp['mntopts']:
module.fail_json(
msg='%s is not mounted with the uquota/usrquota/quota/uqnoenforce/qnoenforce option.'
% mountpoint
)
try:
pwd.getpwnam(name)
except KeyError as e:
module.fail_json(msg='User %s doesn\'t exist.' % name)
if quota_type == 'group':
type_arg = '-g'
quota_default = 'root'
if name is None:
name = quota_default
if 'gquota' not in mp['mntopts'] and 'grpquota' not in mp['mntopts'] and 'gqnoenforce' not in mp['mntopts']:
module.fail_json(
msg='%s is not mounted with the gquota/grpquota/gqnoenforce option. (current options: %s)'
% (mountpoint, mp['mntopts'])
)
try:
grp.getgrnam(name)
except KeyError as e:
module.fail_json(msg='User %s doesn\'t exist.' % name)
elif quota_type == 'project':
type_arg = '-p'
quota_default = '#0'
if name is None:
name = quota_default
if 'pquota' not in mp['mntopts'] and 'prjquota' not in mp['mntopts'] and 'pqnoenforce' not in mp['mntopts']:
module.fail_json(msg='%s is not mounted with the pquota/prjquota/pqnoenforce option.' % mountpoint)
if name != quota_default and not os.path.isfile('/etc/projects'):
module.fail_json(msg='/etc/projects doesn\'t exist.')
if name != quota_default and not os.path.isfile('/etc/projid'):
module.fail_json(msg='/etc/projid doesn\'t exist.')
if name != quota_default and name is not None and get_project_id(name) is None:
module.fail_json(msg='%s hasn\'t been defined in /etc/projid.' % name)
prj_set = True
if name != quota_default:
cmd = 'project %s' % name
r = exec_quota(module, cmd, mountpoint)
if r['rc'] != 0:
module.fail_json(msg='Could not get project state.', cmd=cmd, retval=r)
else:
for line in r['stdout']:
if '%s - project identifier is not set' in line:
prj_set = False
break
if not prj_set and not module.check_mode:
cmd = 'project -s'
r = exec_quota(module, cmd, mountpoint)
if r['rc'] != 0:
module.fail_json(msg='Could not get quota realtime block report.', cmd=cmd, retval=r)
else:
changed = True
elif not prj_set and module.check_mode:
changed = True
changed = False
# Set limits
if state == 'absent':
bhard = 0
bsoft = 0
ihard = 0
isoft = 0
rtbhard = 0
rtbsoft = 0
if bsoft is not None or bhard is not None:
current_bsoft, current_bhard = quota_report(module, mountpoint, name, quota_type, 'b')
if isoft is not None or ihard is not None:
current_isoft, current_ihard = quota_report(module, mountpoint, name, quota_type, 'i')
if rtbsoft is not None or rtbhard is not None:
current_rtbsoft, current_rtbhard = quota_report(module, mountpoint, name, quota_type, 'rtb')
limit = []
if bsoft is not None and int(bsoft / 1024) != current_bsoft:
limit.append('bsoft=%s' % bsoft)
if bhard is not None and int(bhard / 1024) != current_bhard:
limit.append('bhard=%s' % bhard)
if isoft is not None and isoft != current_isoft:
limit.append('isoft=%s' % isoft)
if ihard is not None and ihard != current_ihard:
limit.append('ihard=%s' % ihard)
if rtbsoft is not None and int(rtbsoft / 1024) != current_rtbsoft:
limit.append('rtbsoft=%s' % rtbsoft)
if rtbhard is not None and int(rtbhard / 1024) != current_rtbhard:
limit.append('rtbhard=%s' % rtbhard)
if len(limit) > 0 and not module.check_mode:
if name == quota_default:
cmd = 'limit %s -d %s' % (type_arg, ' '.join(limit))
else:
cmd = 'limit %s %s %s' % (type_arg, ' '.join(limit), name)
r = exec_quota(module, cmd, mountpoint)
if r['rc'] != 0:
module.fail_json(msg='Could not set limits.', cmd=cmd, retval=r)
else:
changed = True
elif len(limit) > 0 and module.check_mode:
changed = True
module.exit_json(changed=changed)
return True
|
36,980 | def _unwrap_nested_config(parent_name, element):
"""
Function to unwrap nested config such as {"nested": {"a_nested_dict": 1.0}}.
Args:
parent_name (str): The name of the previous level of a nested config. For example, in the nested config file
'{"a_dict": {"nested_element": 1.0}}', the `parent_name` of nested_element is "a_dict".
element (Union[int, float, str, Mapping, Sequence]): The element (value) of the nested config. For example,
in the nested config file '{"a_dict": {"nested_element": 1.0}}', the `element` of is
`{"a_nested_dict": 1.0}`.
Return:
A dictionary with the unwrapped nested config.
"""
if isinstance(element, Mapping):
# Case where the value is another dict (a nested dict)
unwrapped_nested_params = {}
for key, value in element.items():
# We recursively open the element (Dict format type)
unwrapped_nested_params.update(
_unwrap_nested_config(f"{parent_name}.{key}", value)
)
return unwrapped_nested_params
elif isinstance(element, Sequence) and not isinstance(element, str):
# Case where the value is a list
# Since str are sequence we negate it to be logged in the next else
unwrapped_nested_params = {}
for idx, value in enumerate(element):
unwrapped_nested_params.update(
_unwrap_nested_config(f"{parent_name}.{idx}", value)
)
return unwrapped_nested_params
else:
return {parent_name: element}
| def _unwrap_nested_config(parent_name, element):
"""
Function to unwrap nested config such as {"nested": {"a_nested_dict": 1.0}}.
Args:
parent_name (str): The name of the previous level of a nested config. For example, in the nested config file
'{"a_dict": {"nested_element": 1.0}}', the `parent_name` of nested_element is "a_dict".
element (Union[int, float, str, Mapping, Sequence]): The element (value) of the nested config. For example,
in the nested config file '{"a_dict": {"nested_element": 1.0}}', the `element` of is
`{"a_nested_dict": 1.0}`.
Return:
A dictionary with the unwrapped nested config.
"""
if isinstance(element, Mapping):
# Case where the value is another dict (a nested dict)
unwrapped_nested_params = {}
for key, value in element.items():
# We recursively open the element (Dict format type)
unwrapped_nested_params.update(
_unwrap_nested_config(f"{parent_name}.{key}", value)
)
return unwrapped_nested_params
elif isinstance(element, Sequence) and not isinstance(element, (str, bytes)):
# Case where the value is a list
# Since str are sequence we negate it to be logged in the next else
unwrapped_nested_params = {}
for idx, value in enumerate(element):
unwrapped_nested_params.update(
_unwrap_nested_config(f"{parent_name}.{idx}", value)
)
return unwrapped_nested_params
else:
return {parent_name: element}
|
41,034 | def write_comp_figs(ts, mask, comptable, mmix, ref_img, out_dir,
png_cmap):
"""
Creates static figures that highlight certain aspects of tedana processing
This includes a figure for each component showing the component time course,
the spatial weight map and a fast Fourier transform of the time course
Parameters
----------
ts : (S x T) array_like
Time series from which to derive ICA betas
mask : (S,) array_like
Boolean mask array
comptable : (N x 5) array_like
Array with columns denoting (1) index of component, (2) Kappa score of
component, (3) Rho score of component, (4) variance explained by
component, and (5) normalized variance explained by component
mmix : (C x T) array_like
Mixing matrix for converting input data to component space, where `C`
is components and `T` is the same as in `data`
ref_img : :obj:`str` or img_like
Reference image to dictate how outputs are saved to disk
out_dir : :obj:`str`
Figures folder within output directory
png_cmap : :obj:`str`
The name of a matplotlib colormap to use when making figures. Optional.
Default colormap is 'coolwarm'
"""
# Get the lenght of the timeseries
n_vols = len(mmix)
# Check that colormap provided exists
if png_cmap not in plt.colormaps():
LGR.warning('Provided colormap is not recognized, proceeding with default')
png_cmap = 'coolwarm'
# regenerate the beta images
ts_B = model.get_coeffs(ts, mmix, mask)
ts_B = ts_B.reshape(ref_img.shape[:3] + ts_B.shape[1:])
# trim edges from ts_B array
ts_B = trim_edge_zeros(ts_B)
# Mask out remaining zeros
ts_B = np.ma.masked_where(ts_B == 0, ts_B)
# Get repetition time from ref_img
tr = ref_img.header.get_zooms()[-1]
# Create indices for 6 cuts, based on dimensions
cuts = [ts_B.shape[dim] // 6 for dim in range(3)]
expl_text = ''
# Remove trailing ';' from rationale column
comptable['rationale'] = comptable['rationale'].str.rstrip(';')
for compnum in range(0, mmix.shape[1], 1):
if comptable.iloc[compnum]["classification"] == 'accepted':
line_color = 'g'
expl_text = 'accepted'
elif comptable.iloc[compnum]["classification"] == 'rejected':
line_color = 'r'
expl_text = 'rejection reason(s): ' + comptable.iloc[compnum]["rationale"]
elif comptable.iloc[compnum]["classification"] == 'ignored':
line_color = 'k'
expl_text = 'ignored reason(s): ' + comptable.iloc[compnum]["rationale"]
else:
# Classification not added
# If new, this will keep code running
line_color = '0.75'
expl_text = 'other classification'
allplot = plt.figure(figsize=(10, 9))
ax_ts = plt.subplot2grid((5, 6), (0, 0),
rowspan=1, colspan=6,
fig=allplot)
ax_ts.set_xlabel('TRs')
ax_ts.set_xlim(0, n_vols)
plt.yticks([])
# Make a second axis with units of time (s)
max_xticks = 10
xloc = plt.MaxNLocator(max_xticks)
ax_ts.xaxis.set_major_locator(xloc)
ax_ts2 = ax_ts.twiny()
ax1Xs = ax_ts.get_xticks()
ax2Xs = []
for X in ax1Xs:
# Limit to 2 decimal places
seconds_val = round(X * tr, 2)
ax2Xs.append(seconds_val)
ax_ts2.set_xticks(ax1Xs)
ax_ts2.set_xlim(ax_ts.get_xbound())
ax_ts2.set_xticklabels(ax2Xs)
ax_ts2.set_xlabel('seconds')
ax_ts.plot(mmix[:, compnum], color=line_color)
# Title will include variance from comptable
comp_var = "{0:.2f}".format(comptable.iloc[compnum]["variance explained"])
comp_kappa = "{0:.2f}".format(comptable.iloc[compnum]["kappa"])
comp_rho = "{0:.2f}".format(comptable.iloc[compnum]["rho"])
plt_title = 'Comp. {}: variance: {}%, kappa: {}, rho: {}, {}'.format(compnum, comp_var,
comp_kappa, comp_rho,
expl_text)
title = ax_ts.set_title(plt_title)
title.set_y(1.5)
# Set range to ~1/10th of max positive or negative beta
temp_max = np.abs(ts_B[:, :, :, compnum].max())
temp_min = np.abs(ts_B[:, :, :, compnum].min())
imgmax = np.max([temp_max, temp_min]) * .1
imgmin = imgmax * -1
for idx, cut in enumerate(cuts):
for imgslice in range(1, 6):
ax = plt.subplot2grid((5, 6), (idx + 1, imgslice - 1), rowspan=1, colspan=1)
ax.axis('off')
if idx == 0:
to_plot = np.rot90(ts_B[imgslice * cuts[idx], :, :, compnum])
if idx == 1:
to_plot = np.rot90(ts_B[:, imgslice * cuts[idx], :, compnum])
if idx == 2:
to_plot = ts_B[:, :, imgslice * cuts[idx], compnum]
ax_im = ax.imshow(to_plot, vmin=imgmin, vmax=imgmax, aspect='equal',
cmap=png_cmap)
# Add a color bar to the plot.
ax_cbar = allplot.add_axes([0.8, 0.3, 0.03, 0.37])
cbar = allplot.colorbar(ax_im, ax_cbar)
cbar.set_label('Component Beta', rotation=90)
cbar.ax.yaxis.set_label_position('left')
# Get fft and freqs for this subject
# adapted from @dangom
spectrum, freqs = get_spectrum(mmix[:, compnum], tr)
# Plot it
ax_fft = plt.subplot2grid((5, 6), (4, 0), rowspan=1, colspan=6)
ax_fft.plot(freqs, spectrum)
ax_fft.set_title('One Sided fft')
ax_fft.set_xlabel('Hz')
ax_fft.set_xlim(freqs[0], freqs[-1])
plt.yticks([])
# Fix spacing so TR label does overlap with other plots
allplot.subplots_adjust(hspace=0.4)
plot_name = 'comp_{}.png'.format(str(compnum).zfill(3))
compplot_name = os.path.join(out_dir, plot_name)
plt.savefig(compplot_name)
plt.close()
| def write_comp_figs(ts, mask, comptable, mmix, ref_img, out_dir,
png_cmap):
"""
Creates static figures that highlight certain aspects of tedana processing
This includes a figure for each component showing the component time course,
the spatial weight map and a fast Fourier transform of the time course
Parameters
----------
ts : (S x T) array_like
Time series from which to derive ICA betas
mask : (S,) array_like
Boolean mask array
comptable : (N x 5) array_like
Array with columns denoting (1) index of component, (2) Kappa score of
component, (3) Rho score of component, (4) variance explained by
component, and (5) normalized variance explained by component
mmix : (C x T) array_like
Mixing matrix for converting input data to component space, where `C`
is components and `T` is the same as in `data`
ref_img : :obj:`str` or img_like
Reference image to dictate how outputs are saved to disk
out_dir : :obj:`str`
Figures folder within output directory
png_cmap : :obj:`str`
The name of a matplotlib colormap to use when making figures. Optional.
Default colormap is 'coolwarm'
"""
# Get the lenght of the timeseries
n_vols = len(mmix)
# Check that colormap provided exists
if png_cmap not in plt.colormaps():
LGR.warning('Provided colormap is not recognized, proceeding with default')
png_cmap = 'coolwarm'
# regenerate the beta images
ts_B = model.get_coeffs(ts, mmix, mask)
ts_B = ts_B.reshape(ref_img.shape[:3] + ts_B.shape[1:])
# trim edges from ts_B array
ts_B = trim_edge_zeros(ts_B)
# Mask out remaining zeros
ts_B = np.ma.masked_where(ts_B == 0, ts_B)
# Get repetition time from ref_img
tr = ref_img.header.get_zooms()[-1]
# Create indices for 6 cuts, based on dimensions
cuts = [ts_B.shape[dim] // 6 for dim in range(3)]
expl_text = ''
# Remove trailing ';' from rationale column
comptable['rationale'] = comptable['rationale'].str.rstrip(';')
for compnum in range(0, mmix.shape[1], 1):
if comptable.iloc[compnum]["classification"] == 'accepted':
line_color = 'g'
expl_text = 'accepted'
elif comptable.iloc[compnum]["classification"] == 'rejected':
line_color = 'r'
expl_text = 'rejection reason(s): ' + comptable.iloc[compnum]["rationale"]
elif comptable.iloc[compnum]["classification"] == 'ignored':
line_color = 'k'
expl_text = 'ignored reason(s): ' + comptable.iloc[compnum]["rationale"]
else:
# Classification not added
# If new, this will keep code running
line_color = '0.75'
expl_text = 'other classification'
allplot = plt.figure(figsize=(10, 9))
ax_ts = plt.subplot2grid((5, 6), (0, 0),
rowspan=1, colspan=6,
fig=allplot)
ax_ts.set_xlabel('TRs')
ax_ts.set_xlim(0, n_vols)
plt.yticks([])
# Make a second axis with units of time (s)
max_xticks = 10
xloc = plt.MaxNLocator(max_xticks)
ax_ts.xaxis.set_major_locator(xloc)
ax_ts2 = ax_ts.twiny()
ax1Xs = ax_ts.get_xticks()
ax2Xs = []
for X in ax1Xs:
# Limit to 2 decimal places
seconds_val = round(X * tr, 2)
ax2Xs.append(seconds_val)
ax_ts2.set_xticks(ax1Xs)
ax_ts2.set_xlim(ax_ts.get_xbound())
ax_ts2.set_xticklabels(ax2Xs)
ax_ts2.set_xlabel('seconds')
ax_ts.plot(mmix[:, compnum], color=line_color)
# Title will include variance from comptable
comp_var = "{0:.2f}".format(comptable.iloc[compnum]["variance explained"])
comp_kappa = "{0:.2f}".format(comptable.iloc[compnum]["kappa"])
comp_rho = "{0:.2f}".format(comptable.iloc[compnum]["rho"])
plt_title = 'Comp. {}: variance: {}%, kappa: {}, rho: {}, {}'.format(compnum, comp_var,
comp_kappa, comp_rho,
expl_text)
title = ax_ts.set_title(plt_title)
title.set_y(1.5)
# Set range to ~1/10th of max positive or negative beta
temp_max = np.abs(ts_B[:, :, :, compnum].max())
temp_min = np.abs(ts_B[:, :, :, compnum].min())
imgmax = 0.1 * np.abs(ts_B[:, :, :, compnum]).max()
imgmin = imgmax * -1
for idx, cut in enumerate(cuts):
for imgslice in range(1, 6):
ax = plt.subplot2grid((5, 6), (idx + 1, imgslice - 1), rowspan=1, colspan=1)
ax.axis('off')
if idx == 0:
to_plot = np.rot90(ts_B[imgslice * cuts[idx], :, :, compnum])
if idx == 1:
to_plot = np.rot90(ts_B[:, imgslice * cuts[idx], :, compnum])
if idx == 2:
to_plot = ts_B[:, :, imgslice * cuts[idx], compnum]
ax_im = ax.imshow(to_plot, vmin=imgmin, vmax=imgmax, aspect='equal',
cmap=png_cmap)
# Add a color bar to the plot.
ax_cbar = allplot.add_axes([0.8, 0.3, 0.03, 0.37])
cbar = allplot.colorbar(ax_im, ax_cbar)
cbar.set_label('Component Beta', rotation=90)
cbar.ax.yaxis.set_label_position('left')
# Get fft and freqs for this subject
# adapted from @dangom
spectrum, freqs = get_spectrum(mmix[:, compnum], tr)
# Plot it
ax_fft = plt.subplot2grid((5, 6), (4, 0), rowspan=1, colspan=6)
ax_fft.plot(freqs, spectrum)
ax_fft.set_title('One Sided fft')
ax_fft.set_xlabel('Hz')
ax_fft.set_xlim(freqs[0], freqs[-1])
plt.yticks([])
# Fix spacing so TR label does overlap with other plots
allplot.subplots_adjust(hspace=0.4)
plot_name = 'comp_{}.png'.format(str(compnum).zfill(3))
compplot_name = os.path.join(out_dir, plot_name)
plt.savefig(compplot_name)
plt.close()
|
26,232 | def test_bgp_shutdown(duthosts, rand_one_dut_hostname):
duthost=duthosts[rand_one_dut_hostname]
loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix="bgp_shutdown")
loganalyzer.expect_regex=[]
try:
loganalyzer.expect_regex.append(expected_log)
marker=loganalyzer.init()
duthost.command(defined_action)
logger.info("check for expected log in syslog")
loganalyzer.analyze(marker)
except LogAnalyzerError as err:
logger.error("Unable to find expected log in syslog")
raise err | def test_bgp_shutdown(duthosts, rand_one_dut_hostname):
duthost=duthosts[rand_one_dut_hostname]
loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix="bgp_shutdown")
loganalyzer.expect_regex=[expected_log]
try:
loganalyzer.expect_regex.append(expected_log)
marker=loganalyzer.init()
duthost.command(defined_action)
logger.info("check for expected log in syslog")
loganalyzer.analyze(marker)
except LogAnalyzerError as err:
logger.error("Unable to find expected log in syslog")
raise err |
32,223 | def panorama_get_url_category_command(url_cmd: str, url: str, additional_suspicious: list, additional_malicious: list, target: str = None):
"""
Get the url category from Palo Alto URL Filtering
"""
urls = argToList(url)
categories_dict: Dict[str, list] = {}
categories_dict_hr: Dict[str, list] = {}
command_results: List[CommandResults] = []
for url in urls:
err_readable_output = None
try:
category = panorama_get_url_category(url_cmd, url, target)
if category in categories_dict:
categories_dict[category].append(url)
categories_dict_hr[category].append(url)
else:
categories_dict[category] = [url]
categories_dict_hr[category] = [url]
context_urls = populate_url_filter_category_from_context(category)
categories_dict[category] = list((set(categories_dict[category])).union(set(context_urls)))
score = calculate_dbot_score(category.lower(), additional_suspicious, additional_malicious)
except InvalidUrlLengthException as e:
score = 0
category = None
err_readable_output = str(e)
dbot_score = Common.DBotScore(
indicator=url,
indicator_type=DBotScoreType.URL,
integration_name='PAN-OS',
score=score
)
url_obj = Common.URL(
url=url,
dbot_score=dbot_score,
category=category
)
readable_output = err_readable_output or tableToMarkdown('URL', url_obj.to_context())
command_results.append(CommandResults(
indicator=url_obj,
readable_output=readable_output
))
url_category_output_hr = []
for key, value in categories_dict_hr.items():
url_category_output_hr.append({
'Category': key,
'URL': value
})
url_category_output = []
for key, value in categories_dict.items():
url_category_output.append({
'Category': key,
'URL': value
})
title = 'URL Filtering'
if url_cmd == 'url-info-cloud':
title += ' from cloud'
elif url_cmd == 'url-info-host':
title += ' from host'
human_readable = tableToMarkdown(f'{title}:', url_category_output_hr, ['URL', 'Category'], removeNull=True)
command_results.insert(0, CommandResults(
outputs_prefix='Panorama.URLFilter',
outputs_key_field='Category',
outputs=url_category_output,
readable_output=human_readable,
raw_response=categories_dict,
))
return_results(command_results)
| def panorama_get_url_category_command(url_cmd: str, url: str, additional_suspicious: list, additional_malicious: list, target: Optional[str] = None):
"""
Get the url category from Palo Alto URL Filtering
"""
urls = argToList(url)
categories_dict: Dict[str, list] = {}
categories_dict_hr: Dict[str, list] = {}
command_results: List[CommandResults] = []
for url in urls:
err_readable_output = None
try:
category = panorama_get_url_category(url_cmd, url, target)
if category in categories_dict:
categories_dict[category].append(url)
categories_dict_hr[category].append(url)
else:
categories_dict[category] = [url]
categories_dict_hr[category] = [url]
context_urls = populate_url_filter_category_from_context(category)
categories_dict[category] = list((set(categories_dict[category])).union(set(context_urls)))
score = calculate_dbot_score(category.lower(), additional_suspicious, additional_malicious)
except InvalidUrlLengthException as e:
score = 0
category = None
err_readable_output = str(e)
dbot_score = Common.DBotScore(
indicator=url,
indicator_type=DBotScoreType.URL,
integration_name='PAN-OS',
score=score
)
url_obj = Common.URL(
url=url,
dbot_score=dbot_score,
category=category
)
readable_output = err_readable_output or tableToMarkdown('URL', url_obj.to_context())
command_results.append(CommandResults(
indicator=url_obj,
readable_output=readable_output
))
url_category_output_hr = []
for key, value in categories_dict_hr.items():
url_category_output_hr.append({
'Category': key,
'URL': value
})
url_category_output = []
for key, value in categories_dict.items():
url_category_output.append({
'Category': key,
'URL': value
})
title = 'URL Filtering'
if url_cmd == 'url-info-cloud':
title += ' from cloud'
elif url_cmd == 'url-info-host':
title += ' from host'
human_readable = tableToMarkdown(f'{title}:', url_category_output_hr, ['URL', 'Category'], removeNull=True)
command_results.insert(0, CommandResults(
outputs_prefix='Panorama.URLFilter',
outputs_key_field='Category',
outputs=url_category_output,
readable_output=human_readable,
raw_response=categories_dict,
))
return_results(command_results)
|
42,864 | def bloch_messiah(S, tol=10):
r""" Performs the Bloch-Messiah decomposition of a symplectic matrix in terms of
two symplectic unitaries and squeezing transformation.
It automatically sorts the squeezers so that they respect the canonical symplectic form.
Note that it is assumed that the symplectic form is
..math:: \Omega = \begin{bmatrix}0&I\\-I&0\end{bmatrix}
where :math:`I` is the identity matrix and :math:`0` is the zero matrix.
For more info see:
https://math.stackexchange.com/questions/1886038/finding-euler-decomposition-of-a-symplectic-matrix
Args:
S (array): A symplectic matrix S
tol (int): the number of decimal places to use when determining if the matrix is symplectic
Returns:
tuple[array]: Returns the tuple ``(ut1, st1, vt1)``. ``ut1`` and ``vt1`` are symplectic unitaries,
and ``st1`` is diagonal and of the form :math:`= \text{diag}(s1,\dots,s_n, 1/s_1,\dots,1/s_n)`
such that :math:`S = ut1 st1 v1`
"""
(n, m) = S.shape
if n != m:
raise ValueError("The input matrix is not square")
if n%2 != 0:
raise ValueError("The input matrix must have an even number of rows/columns")
n = n//2
omega = sympmat(n)
if np.round(np.linalg.norm(np.transpose(S) @ omega @ S - omega), tol) != 0.0:
raise ValueError("The input matrix is not symplectic")
u, sigma = polar(S, side='left')
ss, uss = takagi(sigma)
## Apply a permutation matrix so that the squeezers appear in the order
## s_1,...,s_n, 1/s_1,...1/s_n
perm = np.array(list(range(0, n)) + list(reversed(range(n, 2*n))))
pmat = np.identity(2*n)[perm, :]
ut = uss @ pmat
## Apply a second permutation matrix to permute s
## (and their corresonding inverses) to get the canonical symplectic form
qomega = np.transpose(ut) @ (omega) @ ut
st = pmat @ np.diag(ss) @ pmat
# Identifying degenrate subspaces
result = []
for _k, g in groupby(np.diag(st)[:n]):
result.append(list(g))
stop_is = list(np.cumsum([len(res) for res in result]))
start_is = [0] + stop_is[:-1]
# Rotation matrices (not permutations) based on svd.
# See Appending B2 of Serafini's book for more details.
u_list, v_list = [], []
for start_i, stop_i in zip(start_is, stop_is):
x = qomega[start_i: stop_i, n + start_i: n + stop_i].real
u_svd, _s_svd, v_svd = np.linalg.svd(x)
u_list = u_list + [u_svd]
v_list = v_list + [v_svd.T]
pmat1 = block_diag(*(u_list + v_list))
st1 = pmat1.T @ pmat @ np.diag(ss) @ pmat @ pmat1
ut1 = uss @ pmat @ pmat1
v1 = np.transpose(ut1) @ u
return ut1, st1, v1
| def bloch_messiah(S, tol=10):
r""" Performs the Bloch-Messiah decomposition of a symplectic matrix in terms of
two symplectic unitaries and squeezing transformation.
It automatically sorts the squeezers so that they respect the canonical symplectic form.
Note that it is assumed that the symplectic form is
..math:: \Omega = \begin{bmatrix}0&I\\-I&0\end{bmatrix}
where :math:`I` is the identity matrix and :math:`0` is the zero matrix.
For more info see:
https://math.stackexchange.com/questions/1886038/finding-euler-decomposition-of-a-symplectic-matrix
Args:
S (array): A symplectic matrix S
tol (int): the number of decimal places to use when determining if the matrix is symplectic
Returns:
tuple[array]: Returns the tuple ``(ut1, st1, vt1)``. ``ut1`` and ``vt1`` are symplectic unitaries,
and ``st1`` is diagonal and of the form :math:`= \text{diag}(s1,\dots,s_n, 1/s_1,\dots,1/s_n)`
such that :math:`S = ut1 st1 v1`
"""
(n, m) = S.shape
if n != m:
raise ValueError("The input matrix is not square")
if n%2 != 0:
raise ValueError("The input matrix must have an even number of rows/columns")
n = n//2
omega = sympmat(n)
if np.round(np.linalg.norm(np.transpose(S) @ omega @ S - omega), tol) != 0.0:
raise ValueError("The input matrix is not symplectic")
u, sigma = polar(S, side='left')
ss, uss = takagi(sigma)
## Apply a permutation matrix so that the squeezers appear in the order
## s_1,...,s_n, 1/s_1,...1/s_n
perm = np.array(list(range(0, n)) + list(reversed(range(n, 2*n))))
pmat = np.identity(2*n)[perm, :]
ut = uss @ pmat
## Apply a second permutation matrix to permute s
## (and their corresonding inverses) to get the canonical symplectic form
qomega = np.transpose(ut) @ (omega) @ ut
st = pmat @ np.diag(ss) @ pmat
# Identifying degenerate subspaces
result = []
for _k, g in groupby(np.diag(st)[:n]):
result.append(list(g))
stop_is = list(np.cumsum([len(res) for res in result]))
start_is = [0] + stop_is[:-1]
# Rotation matrices (not permutations) based on svd.
# See Appending B2 of Serafini's book for more details.
u_list, v_list = [], []
for start_i, stop_i in zip(start_is, stop_is):
x = qomega[start_i: stop_i, n + start_i: n + stop_i].real
u_svd, _s_svd, v_svd = np.linalg.svd(x)
u_list = u_list + [u_svd]
v_list = v_list + [v_svd.T]
pmat1 = block_diag(*(u_list + v_list))
st1 = pmat1.T @ pmat @ np.diag(ss) @ pmat @ pmat1
ut1 = uss @ pmat @ pmat1
v1 = np.transpose(ut1) @ u
return ut1, st1, v1
|
16,423 | def get_device_uptime(uptime: float, last_uptime: datetime | None) -> datetime:
"""Return device uptime string, tolerate up to 5 seconds deviation."""
delta_uptime = utcnow() - timedelta(seconds=uptime)
if (
not last_uptime
or abs((delta_uptime - last_uptime).total_seconds()) > UPTIME_DEVIATION
):
return delta_uptime.replace(microsecond=0)
return last_uptime
| def get_device_uptime(uptime: float, last_uptime: datetime | None) -> datetime:
"""Return device uptime string, tolerate up to 5 seconds deviation."""
delta_uptime = utcnow() - timedelta(seconds=uptime)
if (
not last_uptime
or abs((delta_uptime - last_uptime).total_seconds()) > UPTIME_DEVIATION
):
return delta_uptime
return last_uptime
|
33,267 | def decode(data, encoding='utf-8'):
assert encoding == 'utf-8', "Only UTF-8 encoding is currently supported."
if encoding is not None:
try:
data = data.decode(encoding)
except Exception as e:
if os.environ.get('DEBUG'):
tb = sys.exc_info()[2]
raise e.with_traceback(tb)
logger.warning("An decoding error has occurred... continuing anyway. To capture these errors, rerun the current command prefixed with `DEBUG=1 `.")
data = data.decode(encoding, errors='ignore')
return data
| def decode(data, encoding='utf-8'):
assert encoding == 'utf-8', "Only UTF-8 encoding is currently supported."
if encoding is not None:
try:
data = data.decode(encoding)
except Exception as e:
if os.environ.get('DEBUG'):
tb = sys.exc_info()[2]
raise e.with_traceback(tb=sys.exc_info()[2])
logger.warning("An decoding error has occurred... continuing anyway. To capture these errors, rerun the current command prefixed with `DEBUG=1 `.")
data = data.decode(encoding, errors='ignore')
return data
|
24,890 | def _get_quote_delimiter(string_token: str) -> str:
"""Returns the quote character used to delimit this token string.
This function check whether the token is a well-formed string.
Args:
string_token: The token to be parsed.
Returns:
A string containing solely the first quote delimiter character in the
given string.
Raises:
ValueError: No quote delimiter characters are present.
"""
match = QUOTE_DELIMITER_REGEX.match(string_token)
if not match:
raise ValueError(f"string token {string_token} is not a well-formed string")
return match.group(2)
| def _get_quote_delimiter(string_token: str) -> str:
"""Returns the quote character used to delimit this token string.
This function checks whether the token is a well-formed string.
Args:
string_token: The token to be parsed.
Returns:
A string containing solely the first quote delimiter character in the
given string.
Raises:
ValueError: No quote delimiter characters are present.
"""
match = QUOTE_DELIMITER_REGEX.match(string_token)
if not match:
raise ValueError(f"string token {string_token} is not a well-formed string")
return match.group(2)
|
27,480 | def modify_policy_add_role(crm_service, project_id, role, member):
"""Adds a new role binding to a policy."""
policy = get_policy(crm_service, project_id)
binding = None
for b in policy["bindings"]:
if b["role"] == role:
binding = b
break
if binding != None:
binding["members"].append(member)
else:
binding = {"role": role, "members": [member]}
policy["bindings"].append(binding)
set_policy(crm_service, project_id, policy)
| def modify_policy_add_role(crm_service, project_id, role, member):
"""Adds a new role binding to a policy."""
policy = get_policy(crm_service, project_id)
binding = None
for b in policy["bindings"]:
if b["role"] == role:
binding = b
break
if binding is not None:
binding["members"].append(member)
else:
binding = {"role": role, "members": [member]}
policy["bindings"].append(binding)
set_policy(crm_service, project_id, policy)
|
23,053 | def full(shape, fill_value, dtype=None, order="C", chunks=None):
# np.isscalar has somewhat strange behavior:
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.isscalar.html
if np.ndim(fill_value) != 0:
raise ValueError(f"fill_value must be scalar. Received {fill_value} instead.")
return _full(
shape=shape, fill_value=fill_value, dtype=dtype, order=order, chunks=chunks
)
| def full(shape, fill_value, dtype=None, order="C", chunks=None):
# np.isscalar has somewhat strange behavior:
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.isscalar.html
if np.ndim(fill_value) != 0:
raise ValueError(f"fill_value must be scalar. Received {type(fill_value)} instead.")
return _full(
shape=shape, fill_value=fill_value, dtype=dtype, order=order, chunks=chunks
)
|
955 | def solve(f, *symbols, **flags):
r"""
Algebraically solves equations and systems of equations.
Explanation
===========
Currently supported:
- polynomial
- transcendental
- piecewise combinations of the above
- systems of linear and polynomial equations
- systems containing relational expressions
Examples
========
The output varies according to the input and can be seen by example:
>>> from sympy import solve, Poly, Eq, Function, exp
>>> from sympy.abc import x, y, z, a, b
>>> f = Function('f')
Boolean or univariate Relational:
>>> solve(x < 3)
(-oo < x) & (x < 3)
To always get a list of solution mappings, use flag dict=True:
>>> solve(x - 3, dict=True)
[{x: 3}]
>>> sol = solve([x - 3, y - 1], dict=True)
>>> sol
[{x: 3, y: 1}]
>>> sol[0][x]
3
>>> sol[0][y]
1
To get a list of *symbols* and set of solution(s) use flag set=True:
>>> solve([x**2 - 3, y - 1], set=True)
([x, y], {(-sqrt(3), 1), (sqrt(3), 1)})
Single expression and single symbol that is in the expression:
>>> solve(x - y, x)
[y]
>>> solve(x - 3, x)
[3]
>>> solve(Eq(x, 3), x)
[3]
>>> solve(Poly(x - 3), x)
[3]
>>> solve(x**2 - y**2, x, set=True)
([x], {(-y,), (y,)})
>>> solve(x**4 - 1, x, set=True)
([x], {(-1,), (1,), (-I,), (I,)})
Single expression with no symbol that is in the expression:
>>> solve(3, x)
[]
>>> solve(x - 3, y)
[]
Single expression with no symbol given. In this case, all free *symbols*
will be selected as potential *symbols* to solve for. If the equation is
univariate then a list of solutions is returned; otherwise - as is the case
when *symbols* are given as an iterable of length greater than 1 - a list of
mappings will be returned:
>>> solve(x - 3)
[3]
>>> solve(x**2 - y**2)
[{x: -y}, {x: y}]
>>> solve(z**2*x**2 - z**2*y**2)
[{x: -y}, {x: y}, {z: 0}]
>>> solve(z**2*x - z**2*y**2)
[{x: y**2}, {z: 0}]
When an object other than a Symbol is given as a symbol, it is
isolated algebraically and an implicit solution may be obtained.
This is mostly provided as a convenience to save you from replacing
the object with a Symbol and solving for that Symbol. It will only
work if the specified object can be replaced with a Symbol using the
subs method:
>>> solve(f(x) - x, f(x))
[x]
>>> solve(f(x).diff(x) - f(x) - x, f(x).diff(x))
[x + f(x)]
>>> solve(f(x).diff(x) - f(x) - x, f(x))
[-x + Derivative(f(x), x)]
>>> solve(x + exp(x)**2, exp(x), set=True)
([exp(x)], {(-sqrt(-x),), (sqrt(-x),)})
>>> from sympy import Indexed, IndexedBase, Tuple, sqrt
>>> A = IndexedBase('A')
>>> eqs = Tuple(A[1] + A[2] - 3, A[1] - A[2] + 1)
>>> solve(eqs, eqs.atoms(Indexed))
{A[1]: 1, A[2]: 2}
* To solve for a symbol implicitly, use implicit=True:
>>> solve(x + exp(x), x)
[-LambertW(1)]
>>> solve(x + exp(x), x, implicit=True)
[-exp(x)]
* It is possible to solve for anything that can be targeted with
subs:
>>> solve(x + 2 + sqrt(3), x + 2)
[-sqrt(3)]
>>> solve((x + 2 + sqrt(3), x + 4 + y), y, x + 2)
{y: -2 + sqrt(3), x + 2: -sqrt(3)}
* Nothing heroic is done in this implicit solving so you may end up
with a symbol still in the solution:
>>> eqs = (x*y + 3*y + sqrt(3), x + 4 + y)
>>> solve(eqs, y, x + 2)
{y: -sqrt(3)/(x + 3), x + 2: -2*x/(x + 3) - 6/(x + 3) + sqrt(3)/(x + 3)}
>>> solve(eqs, y*x, x)
{x: -y - 4, x*y: -3*y - sqrt(3)}
* If you attempt to solve for a number remember that the number
you have obtained does not necessarily mean that the value is
equivalent to the expression obtained:
>>> solve(sqrt(2) - 1, 1)
[sqrt(2)]
>>> solve(x - y + 1, 1) # /!\ -1 is targeted, too
[x/(y - 1)]
>>> [_.subs(z, -1) for _ in solve((x - y + 1).subs(-1, z), 1)]
[-x + y]
* To solve for a function within a derivative, use ``dsolve``.
Single expression and more than one symbol:
* When there is a linear solution:
>>> solve(x - y**2, x, y)
[(y**2, y)]
>>> solve(x**2 - y, x, y)
[(x, x**2)]
>>> solve(x**2 - y, x, y, dict=True)
[{y: x**2}]
* If there is no linear solution, then the first successful
attempt for a nonlinear solution will be returned:
>>> solve(x**2 - y**2, x, y, dict=True)
[{x: -y}, {x: y}]
>>> solve(x**2 - y**2/exp(x), x, y, dict=True)
[{x: 2*LambertW(-y/2)}, {x: 2*LambertW(y/2)}]
>>> solve(x**2 - y**2/exp(x), y, x)
[(-x*sqrt(exp(x)), x), (x*sqrt(exp(x)), x)]
* When undetermined coefficients are identified:
This happens when it is possible to form a linear set of
equations in the variables provided from the coefficients
of the expressions in symbols not provided. A single
dictionary with specified values will be returned:
>>> eq = (a + b)*x - b + 2
>>> solve(eq, a, b)
{a: -2, b: 2}
The coefficient system solved was:
>>> list(eq.expand().as_coefficients_dict(x).values())
[a + b, 2 - b]
To obtain an algebraic solution in terms of ``a`` or ``b``
pass the equation in a list:
>>> solve([eq], a, b)
{a: b*(1 - x)/x - 2/x}
Iterable of one or more of the above:
* Involving relationals or bools:
>>> solve([x < 3, x - 2])
Eq(x, 2)
>>> solve([x > 3, x - 2])
False
* When the system is linear:
* With a solution:
>>> solve([x - 3], x)
{x: 3}
>>> solve((x + 5*y - 2, -3*x + 6*y - 15), x, y)
{x: -3, y: 1}
>>> solve((x + 5*y - 2, -3*x + 6*y - 15), x, y, z)
{x: -3, y: 1}
>>> solve((x + 5*y - 2, -3*x + 6*y - z), z, x, y)
{x: 2 - 5*y, z: 21*y - 6}
* Without a solution:
>>> solve([x + 3, x - 3])
[]
* When the system is not linear:
>>> solve([x**2 + y -2, y**2 - 4], x, y, set=True)
([x, y], {(-2, -2), (0, 2), (2, -2)})
* If no *symbols* are given, all free *symbols* will be selected and a
list of mappings returned:
>>> solve([x - 2, x**2 + y])
[{x: 2, y: -4}]
>>> solve([x - 2, x**2 + f(x)], {f(x), x})
[{x: 2, f(x): -4}]
* If any equation does not depend on the symbol(s) given, it will be
eliminated from the equation set and an answer may be given
implicitly in terms of variables that were not of interest:
>>> solve([x - y, y - 3], x)
{x: y}
**Additional Examples**
``solve()`` with check=True (default) will run through the symbol tags to
eliminate unwanted solutions. If no assumptions are included, all possible
solutions will be returned:
>>> from sympy import Symbol, solve
>>> x = Symbol("x")
>>> solve(x**2 - 1)
[-1, 1]
By using the positive tag, only one solution will be returned:
>>> pos = Symbol("pos", positive=True)
>>> solve(pos**2 - 1)
[1]
Assumptions are not checked when ``solve()`` input involves
relationals or bools.
When the solutions are checked, those that make any denominator zero
are automatically excluded. If you do not want to exclude such solutions,
then use the check=False option:
>>> from sympy import sin, limit
>>> solve(sin(x)/x) # 0 is excluded
[pi]
If check=False, then a solution to the numerator being zero is found: x = 0.
In this case, this is a spurious solution since $\sin(x)/x$ has the well
known limit (without dicontinuity) of 1 at x = 0:
>>> solve(sin(x)/x, check=False)
[0, pi]
In the following case, however, the limit exists and is equal to the
value of x = 0 that is excluded when check=True:
>>> eq = x**2*(1/x - z**2/x)
>>> solve(eq, x)
[]
>>> solve(eq, x, check=False)
[0]
>>> limit(eq, x, 0, '-')
0
>>> limit(eq, x, 0, '+')
0
**Disabling High-Order Explicit Solutions**
When solving polynomial expressions, you might not want explicit solutions
(which can be quite long). If the expression is univariate, ``CRootOf``
instances will be returned instead:
>>> solve(x**3 - x + 1)
[-1/((-1/2 - sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)) -
(-1/2 - sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)/3,
-(-1/2 + sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)/3 -
1/((-1/2 + sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)),
-(3*sqrt(69)/2 + 27/2)**(1/3)/3 -
1/(3*sqrt(69)/2 + 27/2)**(1/3)]
>>> solve(x**3 - x + 1, cubics=False)
[CRootOf(x**3 - x + 1, 0),
CRootOf(x**3 - x + 1, 1),
CRootOf(x**3 - x + 1, 2)]
If the expression is multivariate, no solution might be returned:
>>> solve(x**3 - x + a, x, cubics=False)
[]
Sometimes solutions will be obtained even when a flag is False because the
expression could be factored. In the following example, the equation can
be factored as the product of a linear and a quadratic factor so explicit
solutions (which did not require solving a cubic expression) are obtained:
>>> eq = x**3 + 3*x**2 + x - 1
>>> solve(eq, cubics=False)
[-1, -1 + sqrt(2), -sqrt(2) - 1]
**Solving Equations Involving Radicals**
Because of SymPy's use of the principle root, some solutions
to radical equations will be missed unless check=False:
>>> from sympy import root
>>> eq = root(x**3 - 3*x**2, 3) + 1 - x
>>> solve(eq)
[]
>>> solve(eq, check=False)
[1/3]
In the above example, there is only a single solution to the
equation. Other expressions will yield spurious roots which
must be checked manually; roots which give a negative argument
to odd-powered radicals will also need special checking:
>>> from sympy import real_root, S
>>> eq = root(x, 3) - root(x, 5) + S(1)/7
>>> solve(eq) # this gives 2 solutions but misses a 3rd
[CRootOf(7*x**5 - 7*x**3 + 1, 1)**15,
CRootOf(7*x**5 - 7*x**3 + 1, 2)**15]
>>> sol = solve(eq, check=False)
>>> [abs(eq.subs(x,i).n(2)) for i in sol]
[0.48, 0.e-110, 0.e-110, 0.052, 0.052]
The first solution is negative so ``real_root`` must be used to see that it
satisfies the expression:
>>> abs(real_root(eq.subs(x, sol[0])).n(2))
0.e-110
If the roots of the equation are not real then more care will be
necessary to find the roots, especially for higher order equations.
Consider the following expression:
>>> expr = root(x, 3) - root(x, 5)
We will construct a known value for this expression at x = 3 by selecting
the 1-th root for each radical:
>>> expr1 = root(x, 3, 1) - root(x, 5, 1)
>>> v = expr1.subs(x, -3)
The ``solve`` function is unable to find any exact roots to this equation:
>>> eq = Eq(expr, v); eq1 = Eq(expr1, v)
>>> solve(eq, check=False), solve(eq1, check=False)
([], [])
The function ``unrad``, however, can be used to get a form of the equation
for which numerical roots can be found:
>>> from sympy.solvers.solvers import unrad
>>> from sympy import nroots
>>> e, (p, cov) = unrad(eq)
>>> pvals = nroots(e)
>>> inversion = solve(cov, x)[0]
>>> xvals = [inversion.subs(p, i) for i in pvals]
Although ``eq`` or ``eq1`` could have been used to find ``xvals``, the
solution can only be verified with ``expr1``:
>>> z = expr - v
>>> [xi.n(chop=1e-9) for xi in xvals if abs(z.subs(x, xi).n()) < 1e-9]
[]
>>> z1 = expr1 - v
>>> [xi.n(chop=1e-9) for xi in xvals if abs(z1.subs(x, xi).n()) < 1e-9]
[-3.0]
Parameters
==========
f :
- a single Expr or Poly that must be zero
- an Equality
- a Relational expression
- a Boolean
- iterable of one or more of the above
symbols : (object(s) to solve for) specified as
- none given (other non-numeric objects will be used)
- single symbol
- denested list of symbols
(e.g., ``solve(f, x, y)``)
- ordered iterable of symbols
(e.g., ``solve(f, [x, y])``)
flags :
dict=True (default is False)
Return list (perhaps empty) of solution mappings.
set=True (default is False)
Return list of symbols and set of tuple(s) of solution(s).
exclude=[] (default)
Do not try to solve for any of the free symbols in exclude;
if expressions are given, the free symbols in them will
be extracted automatically.
check=True (default)
If False, do not do any testing of solutions. This can be
useful if you want to include solutions that make any
denominator zero.
numerical=True (default)
Do a fast numerical check if *f* has only one symbol.
minimal=True (default is False)
A very fast, minimal testing.
warn=True (default is False)
Show a warning if ``checksol()`` could not conclude.
simplify=True (default)
Simplify all but polynomials of order 3 or greater before
returning them and (if check is not False) use the
general simplify function on the solutions and the
expression obtained when they are substituted into the
function which should be zero.
force=True (default is False)
Make positive all symbols without assumptions regarding sign.
rational=True (default)
Recast Floats as Rational; if this option is not used, the
system containing Floats may fail to solve because of issues
with polys. If rational=None, Floats will be recast as
rationals but the answer will be recast as Floats. If the
flag is False then nothing will be done to the Floats.
manual=True (default is False)
Do not use the polys/matrix method to solve a system of
equations, solve them one at a time as you might "manually."
implicit=True (default is False)
Allows ``solve`` to return a solution for a pattern in terms of
other functions that contain that pattern; this is only
needed if the pattern is inside of some invertible function
like cos, exp, ect.
particular=True (default is False)
Instructs ``solve`` to try to find a particular solution to
a linear system with as many zeros as possible; this is very
expensive.
quick=True (default is False; ``particular`` must be True)
Selects a fast heuristic to find a solution with many zeros
whereas a value of False uses the very slow method guaranteed
to find the largest number of zeros possible.
cubics=True (default)
Return explicit solutions when cubic expressions are encountered.
When False, quartics and quintics are disabled, too.
quartics=True (default)
Return explicit solutions when quartic expressions are encountered.
When False, quintics are disabled, too.
quintics=True (default)
Return explicit solutions (if possible) when quintic expressions
are encountered.
See Also
========
rsolve: For solving recurrence relationships
dsolve: For solving differential equations
"""
from .inequalities import reduce_inequalities
# set solver types explicitly; as soon as one is False
# all the rest will be False
###########################################################################
hints = ('cubics', 'quartics', 'quintics')
default = True
for k in hints:
default = flags.setdefault(k, bool(flags.get(k, default)))
# keeping track of how f was passed since if it is a list
# a dictionary of results will be returned.
###########################################################################
def _sympified_list(w):
return list(map(sympify, w if iterable(w) else [w]))
bare_f = not iterable(f)
# check flag usage for particular/quick which should only be used
# with systems of equations
if flags.get('quick', None) is not None:
if not flags.get('particular', None):
raise ValueError('when using `quick`, `particular` should be True')
if flags.get('particular', False) and bare_f:
raise ValueError(filldedent("""
The 'particular/quick' flag is usually used with systems of
equations. Either pass your equation in a list or
consider using a solver like `diophantine` if you are
looking for a solution in integers."""))
f, symbols = (_sympified_list(w) for w in [f, symbols])
if isinstance(f, list):
f = [s for s in f if s is not S.true and s is not True]
implicit = flags.get('implicit', False)
# preprocess symbol(s)
###########################################################################
ordered_symbols = None # were the symbols in a well defined order?
if not symbols:
# get symbols from equations
symbols = set().union(*[fi.free_symbols for fi in f])
if len(symbols) < len(f):
for fi in f:
pot = preorder_traversal(fi)
for p in pot:
if isinstance(p, AppliedUndef):
flags['dict'] = True # better show symbols
symbols.add(p)
pot.skip() # don't go any deeper
ordered_symbols = False
symbols = list(ordered(symbols)) # to make it canonical
else:
if len(symbols) == 1 and iterable(symbols[0]):
symbols = symbols[0]
ordered_symbols = symbols and is_sequence(symbols,
include=GeneratorType)
_symbols = list(uniq(symbols))
if len(_symbols) != len(symbols):
ordered_symbols = False
symbols = list(ordered(symbols))
else:
symbols = _symbols
# check for duplicates
if len(symbols) != len(set(symbols)):
raise ValueError('duplicate symbols given')
# remove those not of interest
exclude = flags.pop('exclude', set())
if exclude:
if isinstance(exclude, Expr):
exclude = [exclude]
exclude = set().union(*[e.free_symbols for e in sympify(exclude)])
symbols = [s for s in symbols if s not in exclude]
# preprocess equation(s)
###########################################################################
for i, fi in enumerate(f):
if isinstance(fi, (Eq, Ne)):
if 'ImmutableDenseMatrix' in [type(a).__name__ for a in fi.args]:
fi = fi.lhs - fi.rhs
else:
L, R = fi.args
if isinstance(R, BooleanAtom):
L, R = R, L
if isinstance(L, BooleanAtom):
if isinstance(fi, Ne):
L = ~L
if R.is_Relational:
fi = ~R if L is S.false else R
elif R.is_Symbol:
return L
elif R.is_Boolean and (~R).is_Symbol:
return ~L
else:
raise NotImplementedError(filldedent('''
Unanticipated argument of Eq when other arg
is True or False.
'''))
else:
fi = fi.rewrite(Add, evaluate=False)
f[i] = fi
if fi.is_Relational:
return reduce_inequalities(f, symbols=symbols)
if isinstance(fi, Poly):
f[i] = fi.as_expr()
# rewrite hyperbolics in terms of exp if they have symbols of
# interest
f[i] = f[i].replace(lambda w: isinstance(w, HyperbolicFunction) and \
w.has_free(*symbols), lambda w: w.rewrite(exp))
# if we have a Matrix, we need to iterate over its elements again
if f[i].is_Matrix:
bare_f = False
f.extend(list(f[i]))
f[i] = S.Zero
# if we can split it into real and imaginary parts then do so
freei = f[i].free_symbols
if freei and all(s.is_extended_real or s.is_imaginary for s in freei):
fr, fi = f[i].as_real_imag()
# accept as long as new re, im, arg or atan2 are not introduced
had = f[i].atoms(re, im, arg, atan2)
if fr and fi and fr != fi and not any(
i.atoms(re, im, arg, atan2) - had for i in (fr, fi)):
if bare_f:
bare_f = False
f[i: i + 1] = [fr, fi]
# real/imag handling -----------------------------
if any(isinstance(fi, (bool, BooleanAtom)) for fi in f):
if flags.get('set', False):
return [], set()
return []
for i, fi in enumerate(f):
# Abs
while True:
was = fi
fi = fi.replace(Abs, lambda arg:
separatevars(Abs(arg)).rewrite(Piecewise) if arg.has(*symbols)
else Abs(arg))
if was == fi:
break
for e in fi.find(Abs):
if e.has(*symbols):
raise NotImplementedError('solving %s when the argument '
'is not real or imaginary.' % e)
# arg
fi = fi.replace(arg, lambda a: arg(a).rewrite(atan2).rewrite(atan))
# save changes
f[i] = fi
# see if re(s) or im(s) appear
freim = [fi for fi in f if fi.has(re, im)]
if freim:
irf = []
for s in symbols:
if s.is_real or s.is_imaginary:
continue # neither re(x) nor im(x) will appear
# if re(s) or im(s) appear, the auxiliary equation must be present
if any(fi.has(re(s), im(s)) for fi in freim):
irf.append((s, re(s) + S.ImaginaryUnit*im(s)))
if irf:
for s, rhs in irf:
f = [fi.xreplace({s: rhs}) for fi in f] + [s - rhs]
symbols.extend([re(s), im(s)])
if bare_f:
bare_f = False
flags['dict'] = True
# end of real/imag handling -----------------------------
# we can solve for non-symbol entities by replacing them with Dummy symbols
f, symbols, swap_sym = recast_to_symbols(f, symbols)
# udpate symset
symset = set(symbols)
# get rid of equations that have no symbols of interest; we don't
# try to solve them because the user didn't ask and they might be
# hard to solve; this means that solutions may be given in terms
# of the eliminated equations e.g. solve((x-y, y-3), x) -> {x: y}
newf = []
for fi in f:
# let the solver handle equations that..
# - have no symbols but are expressions
# - have symbols of interest
# - have no symbols of interest but are constant
# but when an expression is not constant and has no symbols of
# interest, it can't change what we obtain for a solution from
# the remaining equations so we don't include it; and if it's
# zero it can be removed and if it's not zero, there is no
# solution for the equation set as a whole
#
# The reason for doing this filtering is to allow an answer
# to be obtained to queries like solve((x - y, y), x); without
# this mod the return value is []
ok = False
if fi.free_symbols & symset:
ok = True
else:
if fi.is_number:
if fi.is_Number:
if fi.is_zero:
continue
return []
ok = True
else:
if fi.is_constant():
ok = True
if ok:
newf.append(fi)
if not newf:
return []
f = newf
del newf
# mask off any Object that we aren't going to invert: Derivative,
# Integral, etc... so that solving for anything that they contain will
# give an implicit solution
seen = set()
non_inverts = set()
for fi in f:
pot = preorder_traversal(fi)
for p in pot:
if not isinstance(p, Expr) or isinstance(p, Piecewise):
pass
elif (isinstance(p, bool) or
not p.args or
p in symset or
p.is_Add or p.is_Mul or
p.is_Pow and not implicit or
p.is_Function and not implicit) and p.func not in (re, im):
continue
elif p not in seen:
seen.add(p)
if p.free_symbols & symset:
non_inverts.add(p)
else:
continue
pot.skip()
del seen
non_inverts = dict(list(zip(non_inverts, [Dummy() for _ in non_inverts])))
f = [fi.subs(non_inverts) for fi in f]
# Both xreplace and subs are needed below: xreplace to force substitution
# inside Derivative, subs to handle non-straightforward substitutions
non_inverts = [(v, k.xreplace(swap_sym).subs(swap_sym)) for k, v in non_inverts.items()]
# rationalize Floats
floats = False
if flags.get('rational', True) is not False:
for i, fi in enumerate(f):
if fi.has(Float):
floats = True
f[i] = nsimplify(fi, rational=True)
# capture any denominators before rewriting since
# they may disappear after the rewrite, e.g. issue 14779
flags['_denominators'] = _simple_dens(f[0], symbols)
# Any embedded piecewise functions need to be brought out to the
# top level so that the appropriate strategy gets selected.
# However, this is necessary only if one of the piecewise
# functions depends on one of the symbols we are solving for.
def _has_piecewise(e):
if e.is_Piecewise:
return e.has(*symbols)
return any(_has_piecewise(a) for a in e.args)
for i, fi in enumerate(f):
if _has_piecewise(fi):
f[i] = piecewise_fold(fi)
#
# try to get a solution
###########################################################################
if bare_f:
solution = _solve(f[0], *symbols, **flags)
# solution is:
# dict for coeficient system with one solution
# list of values
# list of dicts
else:
solution = _solve_system(f, symbols, **flags)
# solution is:
# dict for linear/monotonic solution
# list of dicts
# list of tuples
#
# postprocessing
###########################################################################
# Restore masked-off objects
if non_inverts:
def _do_dict(solution):
return {k: v.subs(non_inverts) for k, v in
solution.items()}
if isinstance(solution, dict):
solution = _do_dict(solution)
elif solution and isinstance(solution, list):
if isinstance(solution[0], dict):
solution = [_do_dict(s) for s in solution]
elif isinstance(solution[0], tuple):
solution = [tuple([v.subs(non_inverts) for v in s]) for s
in solution]
else:
solution = [v.subs(non_inverts) for v in solution]
elif not solution:
pass
else:
raise NotImplementedError(filldedent('''
no handling of %s was implemented''' % solution))
# Restore original "symbols" if a dictionary is returned.
# This is not necessary for
# - the single univariate equation case
# since the symbol will have been removed from the solution;
# - the nonlinear poly_system since that only supports zero-dimensional
# systems and those results come back as a list
#
# ** unless there were Derivatives with the symbols, but those were handled
# above.
if swap_sym:
symbols = [swap_sym.get(k, k) for k in symbols]
if isinstance(solution, dict):
solution = {swap_sym.get(k, k): v.subs(swap_sym)
for k, v in solution.items()}
elif solution and isinstance(solution, list) and isinstance(solution[0], dict):
for i, sol in enumerate(solution):
solution[i] = {swap_sym.get(k, k): v.subs(swap_sym)
for k, v in sol.items()}
# Get assumptions about symbols, to filter solutions.
# Note that if assumptions about a solution can't be verified, it is still
# returned.
check = flags.get('check', True)
# restore floats
if floats and solution and flags.get('rational', None) is None:
solution = nfloat(solution, exponent=False)
if check and solution: # assumption checking
warn = flags.get('warn', False)
got_None = [] # solutions for which one or more symbols gave None
no_False = [] # solutions for which no symbols gave False
if isinstance(solution, tuple):
# this has already been checked and is in as_set form
return solution
elif isinstance(solution, list):
if isinstance(solution[0], tuple):
for sol in solution:
for symb, val in zip(symbols, sol):
test = check_assumptions(val, **symb.assumptions0)
if test is False:
break
if test is None:
got_None.append(sol)
else:
no_False.append(sol)
elif isinstance(solution[0], dict):
for sol in solution:
a_None = False
for symb, val in sol.items():
test = check_assumptions(val, **symb.assumptions0)
if test:
continue
if test is False:
break
a_None = True
else:
no_False.append(sol)
if a_None:
got_None.append(sol)
else: # list of expressions
for sol in solution:
test = check_assumptions(sol, **symbols[0].assumptions0)
if test is False:
continue
no_False.append(sol)
if test is None:
got_None.append(sol)
elif isinstance(solution, dict):
a_None = False
for symb, val in solution.items():
test = check_assumptions(val, **symb.assumptions0)
if test:
continue
if test is False:
no_False = None
break
a_None = True
else:
no_False = solution
if a_None:
got_None.append(solution)
elif isinstance(solution, (Relational, And, Or)):
if len(symbols) != 1:
raise ValueError("Length should be 1")
if warn and symbols[0].assumptions0:
warnings.warn(filldedent("""
\tWarning: assumptions about variable '%s' are
not handled currently.""" % symbols[0]))
# TODO: check also variable assumptions for inequalities
else:
raise TypeError('Unrecognized solution') # improve the checker
solution = no_False
if warn and got_None:
warnings.warn(filldedent("""
\tWarning: assumptions concerning following solution(s)
cannot be checked:""" + '\n\t' +
', '.join(str(s) for s in got_None)))
#
# done
###########################################################################
as_dict = flags.get('dict', False)
as_set = flags.get('set', False)
if solution is not None and type(solution) not in (list, dict):
return solution
if not solution:
if as_set:
return symbols, set()
return []
if (
# undo the dictionary solutions returned when the system was
# only partially solved with poly-system
not as_dict and
ordered_symbols and
type(solution) is list and
type(solution[0]) is dict
):
solution = [tuple([r.get(s, s) for s in symbols]) for r in solution]
# make orderings canonical for:
# - dict
# - list of
# * values
# * tuples
# * dicts
if type(solution) is dict:
solution = {k: solution[k] for k in ordered(solution.keys())}
elif not as_set: # for set, no point in ordering
solution.sort(key=default_sort_key)
if solution and type(solution[0]) is tuple:
# XXX is it better to handle at source of introduction?
# if we don't do it then (or now) then
# solve([x**2 + y -2, y**2 - 4], x, y) would
# otherwise have (0, 2) appearing twice
solution = list(uniq(solution))
if not (as_set or as_dict):
return solution
# convert all input to list of dicts
if type(solution) is list and type(solution[0]) is dict:
LOD = solution
else:
LOD = None
if as_dict or not LOD:
if isinstance(solution, dict):
LOD = [solution] # dict was made canonical above
elif type(solution[0]) is tuple:
LOD = [dict(zip(symbols, s)) for s in solution]
elif type(solution[0]) is dict:
if not as_set:
# put the keys in order within each dict
LOD = [{k: s[k] for k in ordered(s)} for s in solution]
else:
LOD = solution # we will order after unifying keys
else:
assert len(symbols) == 1, 'logical error'
LOD = [{symbols[0]: s} for s in solution]
else:
LOD = solution
if as_dict:
return LOD
# set output: (symbols, {t1, t2, ...}) from list of dictionaries;
# include all symbols for those that like a verbose solution
# and to resolve any differences in dictionary keys.
#
# The set results can easily be used to make a verbose dict as
# k, v = solve(eqs, syms, set=True)
# sol = [dict(zip(k,i)) for i in v]
#
if ordered_symbols:
k = symbols # keep preferred order
else:
# just unify the symbols for which solutions were found
k = list(ordered(set(flatten(tuple(i.keys()) for i in LOD))))
return k, {tuple([s.get(ki, ki) for ki in k]) for s in LOD}
| def solve(f, *symbols, **flags):
r"""
Algebraically solves equations and systems of equations.
Explanation
===========
Currently supported:
- polynomial
- transcendental
- piecewise combinations of the above
- systems of linear and polynomial equations
- systems containing relational expressions
Examples
========
The output varies according to the input and can be seen by example:
>>> from sympy import solve, Poly, Eq, Function, exp
>>> from sympy.abc import x, y, z, a, b
>>> f = Function('f')
Boolean or univariate Relational:
>>> solve(x < 3)
(-oo < x) & (x < 3)
To always get a list of solution mappings, use flag dict=True:
>>> solve(x - 3, dict=True)
[{x: 3}]
>>> sol = solve([x - 3, y - 1], dict=True)
>>> sol
[{x: 3, y: 1}]
>>> sol[0][x]
3
>>> sol[0][y]
1
To get a list of *symbols* and set of solution(s) use flag set=True:
>>> solve([x**2 - 3, y - 1], set=True)
([x, y], {(-sqrt(3), 1), (sqrt(3), 1)})
Single expression and single symbol that is in the expression:
>>> solve(x - y, x)
[y]
>>> solve(x - 3, x)
[3]
>>> solve(Eq(x, 3), x)
[3]
>>> solve(Poly(x - 3), x)
[3]
>>> solve(x**2 - y**2, x, set=True)
([x], {(-y,), (y,)})
>>> solve(x**4 - 1, x, set=True)
([x], {(-1,), (1,), (-I,), (I,)})
Single expression with no symbol that is in the expression:
>>> solve(3, x)
[]
>>> solve(x - 3, y)
[]
Single expression with no symbol given. In this case, all free *symbols*
will be selected as potential *symbols* to solve for. If the equation is
univariate then a list of solutions is returned; otherwise - as is the case
when *symbols* are given as an iterable of length greater than 1 - a list of
mappings will be returned:
>>> solve(x - 3)
[3]
>>> solve(x**2 - y**2)
[{x: -y}, {x: y}]
>>> solve(z**2*x**2 - z**2*y**2)
[{x: -y}, {x: y}, {z: 0}]
>>> solve(z**2*x - z**2*y**2)
[{x: y**2}, {z: 0}]
When an object other than a Symbol is given as a symbol, it is
isolated algebraically and an implicit solution may be obtained.
This is mostly provided as a convenience to save you from replacing
the object with a Symbol and solving for that Symbol. It will only
work if the specified object can be replaced with a Symbol using the
subs method:
>>> solve(f(x) - x, f(x))
[x]
>>> solve(f(x).diff(x) - f(x) - x, f(x).diff(x))
[x + f(x)]
>>> solve(f(x).diff(x) - f(x) - x, f(x))
[-x + Derivative(f(x), x)]
>>> solve(x + exp(x)**2, exp(x), set=True)
([exp(x)], {(-sqrt(-x),), (sqrt(-x),)})
>>> from sympy import Indexed, IndexedBase, Tuple, sqrt
>>> A = IndexedBase('A')
>>> eqs = Tuple(A[1] + A[2] - 3, A[1] - A[2] + 1)
>>> solve(eqs, eqs.atoms(Indexed))
{A[1]: 1, A[2]: 2}
* To solve for a symbol implicitly, use implicit=True:
>>> solve(x + exp(x), x)
[-LambertW(1)]
>>> solve(x + exp(x), x, implicit=True)
[-exp(x)]
* It is possible to solve for anything that can be targeted with
subs:
>>> solve(x + 2 + sqrt(3), x + 2)
[-sqrt(3)]
>>> solve((x + 2 + sqrt(3), x + 4 + y), y, x + 2)
{y: -2 + sqrt(3), x + 2: -sqrt(3)}
* Nothing heroic is done in this implicit solving so you may end up
with a symbol still in the solution:
>>> eqs = (x*y + 3*y + sqrt(3), x + 4 + y)
>>> solve(eqs, y, x + 2)
{y: -sqrt(3)/(x + 3), x + 2: -2*x/(x + 3) - 6/(x + 3) + sqrt(3)/(x + 3)}
>>> solve(eqs, y*x, x)
{x: -y - 4, x*y: -3*y - sqrt(3)}
* If you attempt to solve for a number remember that the number
you have obtained does not necessarily mean that the value is
equivalent to the expression obtained:
>>> solve(sqrt(2) - 1, 1)
[sqrt(2)]
>>> solve(x - y + 1, 1) # /!\ -1 is targeted, too
[x/(y - 1)]
>>> [_.subs(z, -1) for _ in solve((x - y + 1).subs(-1, z), 1)]
[-x + y]
* To solve for a function within a derivative, use ``dsolve``.
Single expression and more than one symbol:
* When there is a linear solution:
>>> solve(x - y**2, x, y)
[(y**2, y)]
>>> solve(x**2 - y, x, y)
[(x, x**2)]
>>> solve(x**2 - y, x, y, dict=True)
[{y: x**2}]
* If there is no linear solution, then the first successful
attempt for a nonlinear solution will be returned:
>>> solve(x**2 - y**2, x, y, dict=True)
[{x: -y}, {x: y}]
>>> solve(x**2 - y**2/exp(x), x, y, dict=True)
[{x: 2*LambertW(-y/2)}, {x: 2*LambertW(y/2)}]
>>> solve(x**2 - y**2/exp(x), y, x)
[(-x*sqrt(exp(x)), x), (x*sqrt(exp(x)), x)]
* When undetermined coefficients are identified:
This happens when it is possible to form a linear set of
equations in the variables provided from the coefficients
of the expressions in symbols not provided. A single
dictionary with specified values will be returned:
>>> eq = (a + b)*x - b + 2
>>> solve(eq, a, b)
{a: -2, b: 2}
The coefficient system solved was:
>>> list(eq.expand().as_coefficients_dict(x).values())
[a + b, 2 - b]
To obtain an algebraic solution in terms of ``a`` or ``b``
pass the equation in a list:
>>> solve([eq], a, b)
{a: b*(1 - x)/x - 2/x}
Iterable of one or more of the above:
* Involving relationals or bools:
>>> solve([x < 3, x - 2])
Eq(x, 2)
>>> solve([x > 3, x - 2])
False
* When the system is linear:
* With a solution:
>>> solve([x - 3], x)
{x: 3}
>>> solve((x + 5*y - 2, -3*x + 6*y - 15), x, y)
{x: -3, y: 1}
>>> solve((x + 5*y - 2, -3*x + 6*y - 15), x, y, z)
{x: -3, y: 1}
>>> solve((x + 5*y - 2, -3*x + 6*y - z), z, x, y)
{x: 2 - 5*y, z: 21*y - 6}
* Without a solution:
>>> solve([x + 3, x - 3])
[]
* When the system is not linear:
>>> solve([x**2 + y -2, y**2 - 4], x, y, set=True)
([x, y], {(-2, -2), (0, 2), (2, -2)})
* If no *symbols* are given, all free *symbols* will be selected and a
list of mappings returned:
>>> solve([x - 2, x**2 + y])
[{x: 2, y: -4}]
>>> solve([x - 2, x**2 + f(x)], {f(x), x})
[{x: 2, f(x): -4}]
* If any equation does not depend on the symbol(s) given, it will be
eliminated from the equation set and an answer may be given
implicitly in terms of variables that were not of interest:
>>> solve([x - y, y - 3], x)
{x: y}
**Additional Examples**
``solve()`` with check=True (default) will run through the symbol tags to
eliminate unwanted solutions. If no assumptions are included, all possible
solutions will be returned:
>>> from sympy import Symbol, solve
>>> x = Symbol("x")
>>> solve(x**2 - 1)
[-1, 1]
By using the positive tag, only one solution will be returned:
>>> pos = Symbol("pos", positive=True)
>>> solve(pos**2 - 1)
[1]
Assumptions are not checked when ``solve()`` input involves
relationals or bools.
When the solutions are checked, those that make any denominator zero
are automatically excluded. If you do not want to exclude such solutions,
then use the check=False option:
>>> from sympy import sin, limit
>>> solve(sin(x)/x) # 0 is excluded
[pi]
If check=False, then a solution to the numerator being zero is found: x = 0.
In this case, this is a spurious solution since $\sin(x)/x$ has the well
known limit (without dicontinuity) of 1 at x = 0:
>>> solve(sin(x)/x, check=False)
[0, pi]
In the following case, however, the limit exists and is equal to the
value of x = 0 that is excluded when check=True:
>>> eq = x**2*(1/x - z**2/x)
>>> solve(eq, x)
[]
>>> solve(eq, x, check=False)
[0]
>>> limit(eq, x, 0, '-')
0
>>> limit(eq, x, 0, '+')
0
**Disabling High-Order Explicit Solutions**
When solving polynomial expressions, you might not want explicit solutions
(which can be quite long). If the expression is univariate, ``CRootOf``
instances will be returned instead:
>>> solve(x**3 - x + 1)
[-1/((-1/2 - sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)) -
(-1/2 - sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)/3,
-(-1/2 + sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)/3 -
1/((-1/2 + sqrt(3)*I/2)*(3*sqrt(69)/2 + 27/2)**(1/3)),
-(3*sqrt(69)/2 + 27/2)**(1/3)/3 -
1/(3*sqrt(69)/2 + 27/2)**(1/3)]
>>> solve(x**3 - x + 1, cubics=False)
[CRootOf(x**3 - x + 1, 0),
CRootOf(x**3 - x + 1, 1),
CRootOf(x**3 - x + 1, 2)]
If the expression is multivariate, no solution might be returned:
>>> solve(x**3 - x + a, x, cubics=False)
[]
Sometimes solutions will be obtained even when a flag is False because the
expression could be factored. In the following example, the equation can
be factored as the product of a linear and a quadratic factor so explicit
solutions (which did not require solving a cubic expression) are obtained:
>>> eq = x**3 + 3*x**2 + x - 1
>>> solve(eq, cubics=False)
[-1, -1 + sqrt(2), -sqrt(2) - 1]
**Solving Equations Involving Radicals**
Because of SymPy's use of the principle root, some solutions
to radical equations will be missed unless check=False:
>>> from sympy import root
>>> eq = root(x**3 - 3*x**2, 3) + 1 - x
>>> solve(eq)
[]
>>> solve(eq, check=False)
[1/3]
In the above example, there is only a single solution to the
equation. Other expressions will yield spurious roots which
must be checked manually; roots which give a negative argument
to odd-powered radicals will also need special checking:
>>> from sympy import real_root, S
>>> eq = root(x, 3) - root(x, 5) + S(1)/7
>>> solve(eq) # this gives 2 solutions but misses a 3rd
[CRootOf(7*x**5 - 7*x**3 + 1, 1)**15,
CRootOf(7*x**5 - 7*x**3 + 1, 2)**15]
>>> sol = solve(eq, check=False)
>>> [abs(eq.subs(x,i).n(2)) for i in sol]
[0.48, 0.e-110, 0.e-110, 0.052, 0.052]
The first solution is negative so ``real_root`` must be used to see that it
satisfies the expression:
>>> abs(real_root(eq.subs(x, sol[0])).n(2))
0.e-110
If the roots of the equation are not real then more care will be
necessary to find the roots, especially for higher order equations.
Consider the following expression:
>>> expr = root(x, 3) - root(x, 5)
We will construct a known value for this expression at x = 3 by selecting
the 1-th root for each radical:
>>> expr1 = root(x, 3, 1) - root(x, 5, 1)
>>> v = expr1.subs(x, -3)
The ``solve`` function is unable to find any exact roots to this equation:
>>> eq = Eq(expr, v); eq1 = Eq(expr1, v)
>>> solve(eq, check=False), solve(eq1, check=False)
([], [])
The function ``unrad``, however, can be used to get a form of the equation
for which numerical roots can be found:
>>> from sympy.solvers.solvers import unrad
>>> from sympy import nroots
>>> e, (p, cov) = unrad(eq)
>>> pvals = nroots(e)
>>> inversion = solve(cov, x)[0]
>>> xvals = [inversion.subs(p, i) for i in pvals]
Although ``eq`` or ``eq1`` could have been used to find ``xvals``, the
solution can only be verified with ``expr1``:
>>> z = expr - v
>>> [xi.n(chop=1e-9) for xi in xvals if abs(z.subs(x, xi).n()) < 1e-9]
[]
>>> z1 = expr1 - v
>>> [xi.n(chop=1e-9) for xi in xvals if abs(z1.subs(x, xi).n()) < 1e-9]
[-3.0]
Parameters
==========
f :
- a single Expr or Poly that must be zero
- an Equality
- a Relational expression
- a Boolean
- iterable of one or more of the above
symbols : (object(s) to solve for) specified as
- none given (other non-numeric objects will be used)
- single symbol
- denested list of symbols
(e.g., ``solve(f, x, y)``)
- ordered iterable of symbols
(e.g., ``solve(f, [x, y])``)
flags :
dict=True (default is False)
Return list (perhaps empty) of solution mappings.
set=True (default is False)
Return list of symbols and set of tuple(s) of solution(s).
exclude=[] (default)
Do not try to solve for any of the free symbols in exclude;
if expressions are given, the free symbols in them will
be extracted automatically.
check=True (default)
If False, do not do any testing of solutions. This can be
useful if you want to include solutions that make any
denominator zero.
numerical=True (default)
Do a fast numerical check if *f* has only one symbol.
minimal=True (default is False)
A very fast, minimal testing.
warn=True (default is False)
Show a warning if ``checksol()`` could not conclude.
simplify=True (default)
Simplify all but polynomials of order 3 or greater before
returning them and (if check is not False) use the
general simplify function on the solutions and the
expression obtained when they are substituted into the
function which should be zero.
force=True (default is False)
Make positive all symbols without assumptions regarding sign.
rational=True (default)
Recast Floats as Rational; if this option is not used, the
system containing Floats may fail to solve because of issues
with polys. If rational=None, Floats will be recast as
rationals but the answer will be recast as Floats. If the
flag is False then nothing will be done to the Floats.
manual=True (default is False)
Do not use the polys/matrix method to solve a system of
equations, solve them one at a time as you might "manually."
implicit=True (default is False)
Allows ``solve`` to return a solution for a pattern in terms of
other functions that contain that pattern; this is only
needed if the pattern is inside of some invertible function
like cos, exp, ect.
particular=True (default is False)
Instructs ``solve`` to try to find a particular solution to
a linear system with as many zeros as possible; this is very
expensive.
quick=True (default is False; ``particular`` must be True)
Selects a fast heuristic to find a solution with many zeros
whereas a value of False uses the very slow method guaranteed
to find the largest number of zeros possible.
cubics=True (default)
Return explicit solutions when cubic expressions are encountered.
When False, quartics and quintics are disabled, too.
quartics=True (default)
Return explicit solutions when quartic expressions are encountered.
When False, quintics are disabled, too.
quintics=True (default)
Return explicit solutions (if possible) when quintic expressions
are encountered.
See Also
========
rsolve: For solving recurrence relationships
dsolve: For solving differential equations
"""
from .inequalities import reduce_inequalities
# set solver types explicitly; as soon as one is False
# all the rest will be False
###########################################################################
hints = ('cubics', 'quartics', 'quintics')
default = True
for k in hints:
default = flags.setdefault(k, bool(flags.get(k, default)))
# keeping track of how f was passed since if it is a list
# a dictionary of results will be returned.
###########################################################################
def _sympified_list(w):
return list(map(sympify, w if iterable(w) else [w]))
bare_f = not iterable(f)
# check flag usage for particular/quick which should only be used
# with systems of equations
if flags.get('quick', None) is not None:
if not flags.get('particular', None):
raise ValueError('when using `quick`, `particular` should be True')
if flags.get('particular', False) and bare_f:
raise ValueError(filldedent("""
The 'particular/quick' flag is usually used with systems of
equations. Either pass your equation in a list or
consider using a solver like `diophantine` if you are
looking for a solution in integers."""))
f, symbols = (_sympified_list(w) for w in [f, symbols])
if isinstance(f, list):
f = [s for s in f if s is not S.true and s is not True]
implicit = flags.get('implicit', False)
# preprocess symbol(s)
###########################################################################
ordered_symbols = None # were the symbols in a well defined order?
if not symbols:
# get symbols from equations
symbols = set().union(*[fi.free_symbols for fi in f])
if len(symbols) < len(f):
for fi in f:
pot = preorder_traversal(fi)
for p in pot:
if isinstance(p, AppliedUndef):
flags['dict'] = True # better show symbols
symbols.add(p)
pot.skip() # don't go any deeper
ordered_symbols = False
symbols = list(ordered(symbols)) # to make it canonical
else:
if len(symbols) == 1 and iterable(symbols[0]):
symbols = symbols[0]
ordered_symbols = symbols and is_sequence(symbols,
include=GeneratorType)
_symbols = list(uniq(symbols))
if len(_symbols) != len(symbols):
ordered_symbols = False
symbols = list(ordered(symbols))
else:
symbols = _symbols
# check for duplicates
if len(symbols) != len(set(symbols)):
raise ValueError('duplicate symbols given')
# remove those not of interest
exclude = flags.pop('exclude', set())
if exclude:
if isinstance(exclude, Expr):
exclude = [exclude]
exclude = set().union(*[e.free_symbols for e in sympify(exclude)])
symbols = [s for s in symbols if s not in exclude]
# preprocess equation(s)
###########################################################################
for i, fi in enumerate(f):
if isinstance(fi, (Eq, Ne)):
if 'ImmutableDenseMatrix' in [type(a).__name__ for a in fi.args]:
fi = fi.lhs - fi.rhs
else:
L, R = fi.args
if isinstance(R, BooleanAtom):
L, R = R, L
if isinstance(L, BooleanAtom):
if isinstance(fi, Ne):
L = ~L
if R.is_Relational:
fi = ~R if L is S.false else R
elif R.is_Symbol:
return L
elif R.is_Boolean and (~R).is_Symbol:
return ~L
else:
raise NotImplementedError(filldedent('''
Unanticipated argument of Eq when other arg
is True or False.
'''))
else:
fi = fi.rewrite(Add, evaluate=False)
f[i] = fi
if fi.is_Relational:
return reduce_inequalities(f, symbols=symbols)
if isinstance(fi, Poly):
f[i] = fi.as_expr()
# rewrite hyperbolics in terms of exp if they have symbols of
# interest
f[i] = f[i].replace(lambda w: isinstance(w, HyperbolicFunction) and \
w.has_free(*symbols), lambda w: w.rewrite(exp))
# if we have a Matrix, we need to iterate over its elements again
if f[i].is_Matrix:
bare_f = False
f.extend(list(f[i]))
f[i] = S.Zero
# if we can split it into real and imaginary parts then do so
freei = f[i].free_symbols
if freei and all(s.is_extended_real or s.is_imaginary for s in freei):
fr, fi = f[i].as_real_imag()
# accept as long as new re, im, arg or atan2 are not introduced
had = f[i].atoms(re, im, arg, atan2)
if fr and fi and fr != fi and not any(
i.atoms(re, im, arg, atan2) - had for i in (fr, fi)):
if bare_f:
bare_f = False
f[i: i + 1] = [fr, fi]
# real/imag handling -----------------------------
if any(isinstance(fi, (bool, BooleanAtom)) for fi in f):
if flags.get('set', False):
return [], set()
return []
for i, fi in enumerate(f):
# Abs
while True:
was = fi
fi = fi.replace(Abs, lambda arg:
separatevars(Abs(arg)).rewrite(Piecewise) if arg.has(*symbols)
else Abs(arg))
if was == fi:
break
for e in fi.find(Abs):
if e.has(*symbols):
raise NotImplementedError('solving %s when the argument '
'is not real or imaginary.' % e)
# arg
fi = fi.replace(arg, lambda a: arg(a).rewrite(atan2).rewrite(atan))
# save changes
f[i] = fi
# see if re(s) or im(s) appear
freim = [fi for fi in f if fi.has(re, im)]
if freim:
irf = []
for s in symbols:
if s.is_real or s.is_imaginary:
continue # neither re(x) nor im(x) will appear
# if re(s) or im(s) appear, the auxiliary equation must be present
if any(fi.has(re(s), im(s)) for fi in freim):
irf.append((s, re(s) + S.ImaginaryUnit*im(s)))
if irf:
for s, rhs in irf:
f = [fi.xreplace({s: rhs}) for fi in f] + [s - rhs]
symbols.extend([re(s), im(s)])
if bare_f:
bare_f = False
flags['dict'] = True
# end of real/imag handling -----------------------------
# we can solve for non-symbol entities by replacing them with Dummy symbols
f, symbols, swap_sym = recast_to_symbols(f, symbols)
# this set of symbols (perhaps recast) is needed below
symset = set(symbols)
# get rid of equations that have no symbols of interest; we don't
# try to solve them because the user didn't ask and they might be
# hard to solve; this means that solutions may be given in terms
# of the eliminated equations e.g. solve((x-y, y-3), x) -> {x: y}
newf = []
for fi in f:
# let the solver handle equations that..
# - have no symbols but are expressions
# - have symbols of interest
# - have no symbols of interest but are constant
# but when an expression is not constant and has no symbols of
# interest, it can't change what we obtain for a solution from
# the remaining equations so we don't include it; and if it's
# zero it can be removed and if it's not zero, there is no
# solution for the equation set as a whole
#
# The reason for doing this filtering is to allow an answer
# to be obtained to queries like solve((x - y, y), x); without
# this mod the return value is []
ok = False
if fi.free_symbols & symset:
ok = True
else:
if fi.is_number:
if fi.is_Number:
if fi.is_zero:
continue
return []
ok = True
else:
if fi.is_constant():
ok = True
if ok:
newf.append(fi)
if not newf:
return []
f = newf
del newf
# mask off any Object that we aren't going to invert: Derivative,
# Integral, etc... so that solving for anything that they contain will
# give an implicit solution
seen = set()
non_inverts = set()
for fi in f:
pot = preorder_traversal(fi)
for p in pot:
if not isinstance(p, Expr) or isinstance(p, Piecewise):
pass
elif (isinstance(p, bool) or
not p.args or
p in symset or
p.is_Add or p.is_Mul or
p.is_Pow and not implicit or
p.is_Function and not implicit) and p.func not in (re, im):
continue
elif p not in seen:
seen.add(p)
if p.free_symbols & symset:
non_inverts.add(p)
else:
continue
pot.skip()
del seen
non_inverts = dict(list(zip(non_inverts, [Dummy() for _ in non_inverts])))
f = [fi.subs(non_inverts) for fi in f]
# Both xreplace and subs are needed below: xreplace to force substitution
# inside Derivative, subs to handle non-straightforward substitutions
non_inverts = [(v, k.xreplace(swap_sym).subs(swap_sym)) for k, v in non_inverts.items()]
# rationalize Floats
floats = False
if flags.get('rational', True) is not False:
for i, fi in enumerate(f):
if fi.has(Float):
floats = True
f[i] = nsimplify(fi, rational=True)
# capture any denominators before rewriting since
# they may disappear after the rewrite, e.g. issue 14779
flags['_denominators'] = _simple_dens(f[0], symbols)
# Any embedded piecewise functions need to be brought out to the
# top level so that the appropriate strategy gets selected.
# However, this is necessary only if one of the piecewise
# functions depends on one of the symbols we are solving for.
def _has_piecewise(e):
if e.is_Piecewise:
return e.has(*symbols)
return any(_has_piecewise(a) for a in e.args)
for i, fi in enumerate(f):
if _has_piecewise(fi):
f[i] = piecewise_fold(fi)
#
# try to get a solution
###########################################################################
if bare_f:
solution = _solve(f[0], *symbols, **flags)
# solution is:
# dict for coeficient system with one solution
# list of values
# list of dicts
else:
solution = _solve_system(f, symbols, **flags)
# solution is:
# dict for linear/monotonic solution
# list of dicts
# list of tuples
#
# postprocessing
###########################################################################
# Restore masked-off objects
if non_inverts:
def _do_dict(solution):
return {k: v.subs(non_inverts) for k, v in
solution.items()}
if isinstance(solution, dict):
solution = _do_dict(solution)
elif solution and isinstance(solution, list):
if isinstance(solution[0], dict):
solution = [_do_dict(s) for s in solution]
elif isinstance(solution[0], tuple):
solution = [tuple([v.subs(non_inverts) for v in s]) for s
in solution]
else:
solution = [v.subs(non_inverts) for v in solution]
elif not solution:
pass
else:
raise NotImplementedError(filldedent('''
no handling of %s was implemented''' % solution))
# Restore original "symbols" if a dictionary is returned.
# This is not necessary for
# - the single univariate equation case
# since the symbol will have been removed from the solution;
# - the nonlinear poly_system since that only supports zero-dimensional
# systems and those results come back as a list
#
# ** unless there were Derivatives with the symbols, but those were handled
# above.
if swap_sym:
symbols = [swap_sym.get(k, k) for k in symbols]
if isinstance(solution, dict):
solution = {swap_sym.get(k, k): v.subs(swap_sym)
for k, v in solution.items()}
elif solution and isinstance(solution, list) and isinstance(solution[0], dict):
for i, sol in enumerate(solution):
solution[i] = {swap_sym.get(k, k): v.subs(swap_sym)
for k, v in sol.items()}
# Get assumptions about symbols, to filter solutions.
# Note that if assumptions about a solution can't be verified, it is still
# returned.
check = flags.get('check', True)
# restore floats
if floats and solution and flags.get('rational', None) is None:
solution = nfloat(solution, exponent=False)
if check and solution: # assumption checking
warn = flags.get('warn', False)
got_None = [] # solutions for which one or more symbols gave None
no_False = [] # solutions for which no symbols gave False
if isinstance(solution, tuple):
# this has already been checked and is in as_set form
return solution
elif isinstance(solution, list):
if isinstance(solution[0], tuple):
for sol in solution:
for symb, val in zip(symbols, sol):
test = check_assumptions(val, **symb.assumptions0)
if test is False:
break
if test is None:
got_None.append(sol)
else:
no_False.append(sol)
elif isinstance(solution[0], dict):
for sol in solution:
a_None = False
for symb, val in sol.items():
test = check_assumptions(val, **symb.assumptions0)
if test:
continue
if test is False:
break
a_None = True
else:
no_False.append(sol)
if a_None:
got_None.append(sol)
else: # list of expressions
for sol in solution:
test = check_assumptions(sol, **symbols[0].assumptions0)
if test is False:
continue
no_False.append(sol)
if test is None:
got_None.append(sol)
elif isinstance(solution, dict):
a_None = False
for symb, val in solution.items():
test = check_assumptions(val, **symb.assumptions0)
if test:
continue
if test is False:
no_False = None
break
a_None = True
else:
no_False = solution
if a_None:
got_None.append(solution)
elif isinstance(solution, (Relational, And, Or)):
if len(symbols) != 1:
raise ValueError("Length should be 1")
if warn and symbols[0].assumptions0:
warnings.warn(filldedent("""
\tWarning: assumptions about variable '%s' are
not handled currently.""" % symbols[0]))
# TODO: check also variable assumptions for inequalities
else:
raise TypeError('Unrecognized solution') # improve the checker
solution = no_False
if warn and got_None:
warnings.warn(filldedent("""
\tWarning: assumptions concerning following solution(s)
cannot be checked:""" + '\n\t' +
', '.join(str(s) for s in got_None)))
#
# done
###########################################################################
as_dict = flags.get('dict', False)
as_set = flags.get('set', False)
if solution is not None and type(solution) not in (list, dict):
return solution
if not solution:
if as_set:
return symbols, set()
return []
if (
# undo the dictionary solutions returned when the system was
# only partially solved with poly-system
not as_dict and
ordered_symbols and
type(solution) is list and
type(solution[0]) is dict
):
solution = [tuple([r.get(s, s) for s in symbols]) for r in solution]
# make orderings canonical for:
# - dict
# - list of
# * values
# * tuples
# * dicts
if type(solution) is dict:
solution = {k: solution[k] for k in ordered(solution.keys())}
elif not as_set: # for set, no point in ordering
solution.sort(key=default_sort_key)
if solution and type(solution[0]) is tuple:
# XXX is it better to handle at source of introduction?
# if we don't do it then (or now) then
# solve([x**2 + y -2, y**2 - 4], x, y) would
# otherwise have (0, 2) appearing twice
solution = list(uniq(solution))
if not (as_set or as_dict):
return solution
# convert all input to list of dicts
if type(solution) is list and type(solution[0]) is dict:
LOD = solution
else:
LOD = None
if as_dict or not LOD:
if isinstance(solution, dict):
LOD = [solution] # dict was made canonical above
elif type(solution[0]) is tuple:
LOD = [dict(zip(symbols, s)) for s in solution]
elif type(solution[0]) is dict:
if not as_set:
# put the keys in order within each dict
LOD = [{k: s[k] for k in ordered(s)} for s in solution]
else:
LOD = solution # we will order after unifying keys
else:
assert len(symbols) == 1, 'logical error'
LOD = [{symbols[0]: s} for s in solution]
else:
LOD = solution
if as_dict:
return LOD
# set output: (symbols, {t1, t2, ...}) from list of dictionaries;
# include all symbols for those that like a verbose solution
# and to resolve any differences in dictionary keys.
#
# The set results can easily be used to make a verbose dict as
# k, v = solve(eqs, syms, set=True)
# sol = [dict(zip(k,i)) for i in v]
#
if ordered_symbols:
k = symbols # keep preferred order
else:
# just unify the symbols for which solutions were found
k = list(ordered(set(flatten(tuple(i.keys()) for i in LOD))))
return k, {tuple([s.get(ki, ki) for ki in k]) for s in LOD}
|
37,486 | def _parse_common_args(
backend,
qobj_id,
qobj_header,
shots,
memory,
max_credits,
seed_simulator,
init_qubits,
use_measure_esp,
rep_delay,
qubit_lo_freq,
meas_lo_freq,
qubit_lo_range,
meas_lo_range,
schedule_los,
**run_config,
):
"""Resolve the various types of args allowed to the assemble() function through
duck typing, overriding args, etc. Refer to the assemble() docstring for details on
what types of inputs are allowed.
Here the args are resolved by converting them to standard instances, and prioritizing
them in case a run option is passed through multiple args (explicitly setting an arg
has more priority than the arg set by backend)
Returns:
RunConfig: a run config, which is a standardized object that configures the qobj
and determines the runtime environment.
Raises:
QiskitError:
- If the memory arg is True and the backend does not support memory.
- If ``shots`` exceeds ``max_shots`` for the configured backend.
- If ``shots`` are not int type.
- If any of qubit or meas lo's, or associated ranges do not have length equal to
``n_qubits``.
- If qubit or meas lo's do not fit into perscribed ranges.
- If ``use_measure_esp`` is set to ``True`` on a device which does not support ESP
readout.
"""
# grab relevant info from backend if it exists
backend_config = None
backend_defaults = None
n_qubits = None
if backend:
backend_config = backend.configuration()
n_qubits = backend_config.n_qubits
# check for memory flag applied to backend that does not support memory
if memory and not backend_config.memory:
raise QiskitError(
"memory not supported by backend {}".format(backend_config.backend_name)
)
# try to set defaults for pulse, other leave as None
if backend_config.open_pulse:
try:
backend_defaults = backend.defaults()
except AttributeError:
pass
# an identifier for the Qobj
qobj_id = qobj_id or str(uuid.uuid4())
# The header that goes at the top of the Qobj (and later Result)
# we process it as dict, then write entries that are not None to a QobjHeader object
qobj_header = qobj_header or {}
if isinstance(qobj_header, QobjHeader):
qobj_header = qobj_header.to_dict()
backend_name = getattr(backend_config, "backend_name", None)
backend_version = getattr(backend_config, "backend_version", None)
qobj_header = {
**dict(backend_name=backend_name, backend_version=backend_version),
**qobj_header,
}
qobj_header = QobjHeader(**{k: v for k, v in qobj_header.items() if v is not None})
max_shots = getattr(backend_config, "max_shots", None)
if shots is None:
if max_shots:
shots = min(1024, max_shots)
else:
shots = 1024
elif not isinstance(shots, int):
raise QiskitError("Argument 'shots' should be of type 'int'")
elif max_shots and max_shots < shots:
raise QiskitError(
"Number of shots specified: %s exceeds max_shots property of the "
"backend: %s." % (shots, max_shots)
)
dynamic_reprate_enabled = getattr(backend_config, "dynamic_reprate_enabled", False)
if dynamic_reprate_enabled:
default_rep_delay = getattr(backend_config, "default_rep_delay", None)
rep_delay_range = getattr(backend_config, "rep_delay_range", None)
rep_delay = _parse_rep_delay(rep_delay, default_rep_delay, rep_delay_range)
else:
if rep_delay is not None:
rep_delay = None
warnings.warn(
"Dynamic rep rates not supported on this backend, cannot use rep_delay.",
RuntimeWarning,
)
qubit_lo_freq = qubit_lo_freq or getattr(backend_defaults, "qubit_freq_est", None)
meas_lo_freq = meas_lo_freq or getattr(backend_defaults, "meas_freq_est", None)
qubit_lo_range = qubit_lo_range or getattr(backend_config, "qubit_lo_range", None)
meas_lo_range = meas_lo_range or getattr(backend_config, "meas_lo_range", None)
# check that LO frequencies are in the perscribed range
_check_lo_freqs(qubit_lo_freq, qubit_lo_range, "qubit")
_check_lo_freqs(meas_lo_freq, meas_lo_range, "meas")
# configure experiment level LO frequencies
schedule_los = schedule_los or []
if isinstance(schedule_los, (LoConfig, dict)):
schedule_los = [schedule_los]
# Convert to LoConfig if LO configuration supplied as dictionary
schedule_los = [
lo_config if isinstance(lo_config, LoConfig) else LoConfig(lo_config)
for lo_config in schedule_los
]
measure_esp_enabled = getattr(backend_config, "measure_esp_enabled", False)
if use_measure_esp is None:
use_measure_esp = True if measure_esp_enabled else False # default use of esp readout
if not measure_esp_enabled and use_measure_esp:
raise QiskitError("ESP readout not supported on this device. Please make sure the flag "
"'use_measure_esp' is set to 'False' or not used.")
# create run configuration and populate
run_config_dict = dict(
shots=shots,
memory=memory,
max_credits=max_credits,
seed_simulator=seed_simulator,
init_qubits=init_qubits,
use_measure_esp=use_measure_esp,
rep_delay=rep_delay,
qubit_lo_freq=qubit_lo_freq,
meas_lo_freq=meas_lo_freq,
qubit_lo_range=qubit_lo_range,
meas_lo_range=meas_lo_range,
schedule_los=schedule_los,
n_qubits=n_qubits,
**run_config,
)
return qobj_id, qobj_header, run_config_dict
| def _parse_common_args(
backend,
qobj_id,
qobj_header,
shots,
memory,
max_credits,
seed_simulator,
init_qubits,
use_measure_esp,
rep_delay,
qubit_lo_freq,
meas_lo_freq,
qubit_lo_range,
meas_lo_range,
schedule_los,
**run_config,
):
"""Resolve the various types of args allowed to the assemble() function through
duck typing, overriding args, etc. Refer to the assemble() docstring for details on
what types of inputs are allowed.
Here the args are resolved by converting them to standard instances, and prioritizing
them in case a run option is passed through multiple args (explicitly setting an arg
has more priority than the arg set by backend)
Returns:
RunConfig: a run config, which is a standardized object that configures the qobj
and determines the runtime environment.
Raises:
QiskitError:
- If the memory arg is True and the backend does not support memory.
- If ``shots`` exceeds ``max_shots`` for the configured backend.
- If ``shots`` are not int type.
- If any of qubit or meas lo's, or associated ranges do not have length equal to
``n_qubits``.
- If qubit or meas lo's do not fit into perscribed ranges.
- If ``use_measure_esp`` is set to ``True`` on a device which does not support ESP
readout.
"""
# grab relevant info from backend if it exists
backend_config = None
backend_defaults = None
n_qubits = None
if backend:
backend_config = backend.configuration()
n_qubits = backend_config.n_qubits
# check for memory flag applied to backend that does not support memory
if memory and not backend_config.memory:
raise QiskitError(
"memory not supported by backend {}".format(backend_config.backend_name)
)
# try to set defaults for pulse, other leave as None
if backend_config.open_pulse:
try:
backend_defaults = backend.defaults()
except AttributeError:
pass
# an identifier for the Qobj
qobj_id = qobj_id or str(uuid.uuid4())
# The header that goes at the top of the Qobj (and later Result)
# we process it as dict, then write entries that are not None to a QobjHeader object
qobj_header = qobj_header or {}
if isinstance(qobj_header, QobjHeader):
qobj_header = qobj_header.to_dict()
backend_name = getattr(backend_config, "backend_name", None)
backend_version = getattr(backend_config, "backend_version", None)
qobj_header = {
**dict(backend_name=backend_name, backend_version=backend_version),
**qobj_header,
}
qobj_header = QobjHeader(**{k: v for k, v in qobj_header.items() if v is not None})
max_shots = getattr(backend_config, "max_shots", None)
if shots is None:
if max_shots:
shots = min(1024, max_shots)
else:
shots = 1024
elif not isinstance(shots, int):
raise QiskitError("Argument 'shots' should be of type 'int'")
elif max_shots and max_shots < shots:
raise QiskitError(
"Number of shots specified: %s exceeds max_shots property of the "
"backend: %s." % (shots, max_shots)
)
dynamic_reprate_enabled = getattr(backend_config, "dynamic_reprate_enabled", False)
if dynamic_reprate_enabled:
default_rep_delay = getattr(backend_config, "default_rep_delay", None)
rep_delay_range = getattr(backend_config, "rep_delay_range", None)
rep_delay = _parse_rep_delay(rep_delay, default_rep_delay, rep_delay_range)
else:
if rep_delay is not None:
rep_delay = None
warnings.warn(
"Dynamic rep rates not supported on this backend, cannot use rep_delay.",
RuntimeWarning,
)
qubit_lo_freq = qubit_lo_freq or getattr(backend_defaults, "qubit_freq_est", None)
meas_lo_freq = meas_lo_freq or getattr(backend_defaults, "meas_freq_est", None)
qubit_lo_range = qubit_lo_range or getattr(backend_config, "qubit_lo_range", None)
meas_lo_range = meas_lo_range or getattr(backend_config, "meas_lo_range", None)
# check that LO frequencies are in the perscribed range
_check_lo_freqs(qubit_lo_freq, qubit_lo_range, "qubit")
_check_lo_freqs(meas_lo_freq, meas_lo_range, "meas")
# configure experiment level LO frequencies
schedule_los = schedule_los or []
if isinstance(schedule_los, (LoConfig, dict)):
schedule_los = [schedule_los]
# Convert to LoConfig if LO configuration supplied as dictionary
schedule_los = [
lo_config if isinstance(lo_config, LoConfig) else LoConfig(lo_config)
for lo_config in schedule_los
]
measure_esp_enabled = getattr(backend_config, "measure_esp_enabled", False)
if use_measure_esp is None:
use_measure_esp = True if measure_esp_enabled else False # default use of esp readout
if not measure_esp_enabled and use_measure_esp:
raise QiskitError("ESP readout not supported on this device. Please make sure the flag "
"'use_measure_esp' is unset or set to 'False'.")
# create run configuration and populate
run_config_dict = dict(
shots=shots,
memory=memory,
max_credits=max_credits,
seed_simulator=seed_simulator,
init_qubits=init_qubits,
use_measure_esp=use_measure_esp,
rep_delay=rep_delay,
qubit_lo_freq=qubit_lo_freq,
meas_lo_freq=meas_lo_freq,
qubit_lo_range=qubit_lo_range,
meas_lo_range=meas_lo_range,
schedule_los=schedule_los,
n_qubits=n_qubits,
**run_config,
)
return qobj_id, qobj_header, run_config_dict
|
20,020 | def spatial_clustering(mask, algorithm="OPTICS", min_cluster_size=5, max_distance=0, njobs=-1):
"""Counts and segments portions of an image based on distance between two pixels.
Masks showing all clusters, plus masks of individual clusters, are returned.
img: Image to segment.
Algorithm: Algorithm to use for segregating different clusters.
Currently supporting OPTICS and DBSCAN. (Default="OPTICS")
min_cluster_size: The minimum size a section of a mask must be (in pixels)
before it can be considered its own cluster. (Default=5)
max_distance: The total distance between two pixels for them to be considered a part
of the same cluster. For the DBSCAN algorithm, value must be between
0 and 1. For OPTICS, the value is in pixels and depends on the size
of your picture. (Default=0)
njobs: The number of processors to use for calculation of the clusters.
Default is all available processors.
"""
al_upper = algorithm.upper()
if "OPTICS" in al_upper:
max_distance = np.inf
elif "DBSCAN" in al_upper:
max_distance = 0.2
else:
raise NameError("Please use only 'OPTICS' or 'DBSCAN' ")
if not max_distance == 0:
max_distance = max_distance
vis = mask
backtorgb = cv2.cvtColor(vis, cv2.COLOR_GRAY2RGB)
x, y = np.where(np.all(backtorgb == [255, 255, 255], axis=2))
zipped = np.column_stack((x, y))
if "OPTICS" in al_upper:
scaled = StandardScaler(with_mean=False, with_std=False).fit_transform(zipped)
db = OPTICS(max_eps=max_distance, min_samples=min_cluster_size, n_jobs=njobs).fit(scaled)
elif "DBSCAN" in al_upper:
scaled = StandardScaler().fit_transform(zipped)
db = DBSCAN(eps=max_distance, min_samples=min_cluster_size, n_jobs=njobs).fit(scaled)
n_clusters_ = len(set(db.labels_)) - (1 if -1 in db.labels_ else 0)
colors = color_palette(n_clusters_ + 1)
dict_of_colors = {}
sub_mask = []
h, w = backtorgb.shape[:2]
image = np.zeros((h, w, 3), np.uint8)
for y in range(-1, n_clusters_ + 1):
dict_of_colors[str(y)] = colors[y]
for y in range(0, n_clusters_):
sub_mask.append(np.zeros((h, w, 3), np.uint8))
dict_of_colors[str(-1)] = (255, 255, 255)
for z in range(0, len(db.labels_)):
if not db.labels_[z] == -1:
sub_mask[db.labels_[z]][zipped[z][0], zipped[z][1]] = (dict_of_colors[str(db.labels_[z])][2],
dict_of_colors[str(db.labels_[z])][1],
dict_of_colors[str(db.labels_[z])][0])
image[zipped[z][0], zipped[z][1]] = (dict_of_colors[str(db.labels_[z])][2],
dict_of_colors[str(db.labels_[z])][1],
dict_of_colors[str(db.labels_[z])][0])
params.device += 1
if params.debug == 'print':
print_image(image, "full_image_mask.png")
for c in range(0, len(sub_mask)):
print_image(sub_mask[c], "subimage_cluster_" + str(c) + ".png")
elif params.debug == 'plot':
plot_image(image)
for c in range(0, len(sub_mask)):
plot_image(sub_mask[c])
return image, sub_mask
| def spatial_clustering(mask, algorithm="OPTICS", min_cluster_size=5, max_distance=0, njobs=-1):
"""Counts and segments portions of an image based on distance between two pixels.
Masks showing all clusters, plus masks of individual clusters, are returned.
img: Image to segment.
Algorithm: Algorithm to use for segregating different clusters.
Currently supporting OPTICS and DBSCAN. (Default="OPTICS")
min_cluster_size: The minimum size a section of a mask must be (in pixels)
before it can be considered its own cluster. (Default=5)
max_distance: The total distance between two pixels for them to be considered a part
of the same cluster. For the DBSCAN algorithm, value must be between
0 and 1. For OPTICS, the value is in pixels and depends on the size
of your picture. (Default=0)
njobs: The number of processors to use for calculation of the clusters.
Default is all available processors.
"""
al_upper = algorithm.upper()
if "OPTICS" in al_upper:
max_distance = np.inf
elif "DBSCAN" in al_upper:
max_distance = 0.2
else:
raise NameError("Please use only 'OPTICS' or 'DBSCAN' ")
if not max_distance == 0:
max_distance = max_distance
vis = mask
backtorgb = cv2.cvtColor(vis, cv2.COLOR_GRAY2RGB)
x, y = np.where(np.all(backtorgb == [255, 255, 255], axis=2))
zipped = np.column_stack((x, y))
if "OPTICS" in al_upper:
scaled = StandardScaler(with_mean=False, with_std=False).fit_transform(zipped)
db = OPTICS(max_eps=max_distance, min_samples=min_cluster_size, n_jobs=njobs).fit(scaled)
elif "DBSCAN" in al_upper:
scaled = StandardScaler().fit_transform(zipped)
db = DBSCAN(eps=max_distance, min_samples=min_cluster_size, n_jobs=njobs).fit(scaled)
n_clusters_ = len(set(db.labels_)) - (1 if -1 in db.labels_ else 0)
colors = color_palette(n_clusters_ + 1)
dict_of_colors = {}
sub_mask = []
h, w = backtorgb.shape[:2]
image = np.zeros((h, w, 3), np.uint8)
for y in range(-1, n_clusters_ + 1):
dict_of_colors[str(y)] = colors[y]
for y in range(0, n_clusters_):
sub_mask.append(np.zeros((h, w, 3), np.uint8))
dict_of_colors[str(-1)] = (255, 255, 255)
for z in range(0, len(db.labels_)):
if not db.labels_[z] == -1:
sub_mask[db.labels_[z]][zipped[z][0], zipped[z][1]] = 255
image[zipped[z][0], zipped[z][1]] = (dict_of_colors[str(db.labels_[z])][2],
dict_of_colors[str(db.labels_[z])][1],
dict_of_colors[str(db.labels_[z])][0])
params.device += 1
if params.debug == 'print':
print_image(image, "full_image_mask.png")
for c in range(0, len(sub_mask)):
print_image(sub_mask[c], "subimage_cluster_" + str(c) + ".png")
elif params.debug == 'plot':
plot_image(image)
for c in range(0, len(sub_mask)):
plot_image(sub_mask[c])
return image, sub_mask
|
31,443 | def close_false_positive_command(client: Client, args: dict):
alert_ids = args.get('alert_ids')
custom_filter = args.get('custom_filter')
comment = args.get('comment')
reason = CLOSE_FALSE_POSITIVE_REASON_OPTIONS.get(str(args.get('reason')))
sendFeedback = bool(args.get('sendFeedback'))
feedbackText = args.get('feedbackText')
allowContact = bool(args.get('allowContact'))
contactEmail = args.get('contactEmail')
request_data = args_to_filter_close_alerts(alert_ids, custom_filter, comment, reason,
sendFeedback, feedbackText, allowContact, contactEmail)
closed_false_positive_data = {}
try:
closed_false_positive_data = client.close_false_positive(request_data)
except Exception as e:
if 'alertsNotFound' in str(e):
raise DemistoException('Error: This alert id is already closed or does not exist.')
number_of_closed_false_positive_alerts = closed_false_positive_data['closed_false_positive']
return CommandResults(
readable_output=f'{number_of_closed_false_positive_alerts} alerts are classified as closed false positive',
outputs_prefix='MicrosoftCloudAppSecurity.Alerts',
outputs_key_field='_id',
outputs=closed_false_positive_data)
| def close_false_positive_command(client: Client, args: dict):
alert_ids = args.get('alert_ids')
custom_filter = args.get('custom_filter')
comment = args.get('comment')
reason = CLOSE_FALSE_POSITIVE_REASON_OPTIONS.get(str(args.get('reason')))
sendFeedback = bool(args.get('sendFeedback'))
feedbackText = args.get('feedbackText')
allowContact = argToBoolean(args.get('allowContact'))
contactEmail = args.get('contactEmail')
request_data = args_to_filter_close_alerts(alert_ids, custom_filter, comment, reason,
sendFeedback, feedbackText, allowContact, contactEmail)
closed_false_positive_data = {}
try:
closed_false_positive_data = client.close_false_positive(request_data)
except Exception as e:
if 'alertsNotFound' in str(e):
raise DemistoException('Error: This alert id is already closed or does not exist.')
number_of_closed_false_positive_alerts = closed_false_positive_data['closed_false_positive']
return CommandResults(
readable_output=f'{number_of_closed_false_positive_alerts} alerts are classified as closed false positive',
outputs_prefix='MicrosoftCloudAppSecurity.Alerts',
outputs_key_field='_id',
outputs=closed_false_positive_data)
|
6,659 | def run_bom_job(
doc: "BOMUpdateLog", boms: Optional[Dict] = None, update_type: Optional[str] = "Replace BOM"
) -> None:
try:
doc.db_set("status", "In Progress")
if not frappe.flags.in_test:
frappe.db.commit()
frappe.db.auto_commit_on_many_writes = 1
boms = frappe._dict(boms or {})
if update_type == "Replace BOM":
replace_bom(boms)
else:
update_cost()
doc.db_set("status", "Completed")
except (Exception, JobTimeoutException):
frappe.db.rollback()
error_log = frappe.log_error(message=frappe.get_traceback(), title=_("BOM Update Tool Error"))
doc.db_set("status", "Failed")
doc.db_set("error_log", error_log.name)
finally:
frappe.db.auto_commit_on_many_writes = 0
frappe.db.commit() # nosemgrep
| def run_bom_job(
doc: "BOMUpdateLog", boms: Optional[Dict[str, str]] = None, update_type: str = "Replace BOM"
) -> None:
try:
doc.db_set("status", "In Progress")
if not frappe.flags.in_test:
frappe.db.commit()
frappe.db.auto_commit_on_many_writes = 1
boms = frappe._dict(boms or {})
if update_type == "Replace BOM":
replace_bom(boms)
else:
update_cost()
doc.db_set("status", "Completed")
except (Exception, JobTimeoutException):
frappe.db.rollback()
error_log = frappe.log_error(message=frappe.get_traceback(), title=_("BOM Update Tool Error"))
doc.db_set("status", "Failed")
doc.db_set("error_log", error_log.name)
finally:
frappe.db.auto_commit_on_many_writes = 0
frappe.db.commit() # nosemgrep
|
42,029 | def _get_param_importance_plot(
study: Study,
evaluator: Optional[BaseImportanceEvaluator] = None,
params: Optional[List[str]] = None,
target: Optional[Callable[[FrozenTrial], float]] = None,
target_name: str = "Objective Value",
) -> "Axes":
# Set up the graph style.
plt.style.use("ggplot") # Use ggplot style sheet for similar outputs to plotly.
fig, ax = plt.subplots()
ax.set_title("Hyperparameter Importances")
ax.set_xlabel(f"Importance for {target_name}")
ax.set_ylabel("Hyperparameter")
# Prepare data for plotting.
# Importances cannot be evaluated without completed trials.
# Return an empty figure for consistency with other visualization functions.
trials = [trial for trial in study.trials if trial.state == TrialState.COMPLETE]
if len(trials) == 0:
_logger.warning("Study instance does not contain completed trials.")
return ax
importances = optuna.importance.get_param_importances(
study, evaluator=evaluator, params=params, target=target
)
importances = OrderedDict(reversed(list(importances.items())))
importance_values = list(importances.values())
param_names = list(importances.keys())
pos = np.arange(len(param_names))
# Draw horizontal bars.
ax.barh(
pos,
importance_values,
align="center",
color=cm.get_cmap("tab20c")(0),
tick_label=param_names,
)
renderer = fig.canvas.get_renderer()
for idx, val in enumerate(importance_values):
label = str(round(val, 2)) if val >= 0.01 else "<0.01"
text = ax.text(val, idx, label, va="center")
# Sometimes horizontal axis needs to be re-scaled
# to avoid text going over plot area.
bbox = text.get_window_extent(renderer)
bbox = bbox.transformed(ax.transData.inverted())
_, plot_xmax = ax.get_xlim()
bbox_xmax = bbox.x1
if bbox_xmax > plot_xmax:
ax.set_xlim(xmax=AXES_PADDING_RATIO * bbox_xmax)
return ax
| def _get_param_importance_plot(
study: Study,
evaluator: Optional[BaseImportanceEvaluator] = None,
params: Optional[List[str]] = None,
target: Optional[Callable[[FrozenTrial], float]] = None,
target_name: str = "Objective Value",
) -> "Axes":
# Set up the graph style.
plt.style.use("ggplot") # Use ggplot style sheet for similar outputs to plotly.
fig, ax = plt.subplots()
ax.set_title("Hyperparameter Importances")
ax.set_xlabel(f"Importance for {target_name}")
ax.set_ylabel("Hyperparameter")
# Prepare data for plotting.
# Importances cannot be evaluated without completed trials.
# Return an empty figure for consistency with other visualization functions.
trials = [trial for trial in study.trials if trial.state == TrialState.COMPLETE]
if len(trials) == 0:
_logger.warning("Study instance does not contain completed trials.")
return ax
importances = optuna.importance.get_param_importances(
study, evaluator=evaluator, params=params, target=target
)
importances = OrderedDict(reversed(list(importances.items())))
importance_values = list(importances.values())
param_names = list(importances.keys())
pos = np.arange(len(param_names))
# Draw horizontal bars.
ax.barh(
pos,
importance_values,
align="center",
color=cm.get_cmap("tab20c")(0),
tick_label=param_names,
)
renderer = fig.canvas.get_renderer()
for idx, val in enumerate(importance_values):
label = str(round(val, 2)) if val >= 0.01 else "<0.01"
text = ax.text(val, idx, label, va="center")
# Sometimes horizontal axis needs to be re-scaled
# to avoid text going over plot area.
bbox = text.get_window_extent(renderer)
bbox = bbox.transformed(ax.transData.inverted())
_, plot_xmax = ax.get_xlim()
bbox_xmax = bbox.xmax
if bbox_xmax > plot_xmax:
ax.set_xlim(xmax=AXES_PADDING_RATIO * bbox_xmax)
return ax
|
17,798 | def check_num_args(root, benchmark_name, func, min_num_args, max_num_args=None):
if max_num_args is None:
max_num_args = min_num_args
try:
if sys.version_info[0] >= 3:
info = inspect.getfullargspec(func)
else:
info = inspect.getargspec(func)
except Exception as exc:
print(f"{benchmark_name !s}: failed to check "
f"({func !r}{_get_sourceline_info(func, root) !s}): {exc !s}")
return True
max_args = len(info.args)
if inspect.ismethod(func):
max_args -= 1
if info.defaults is not None:
min_args = max_args - len(info.defaults)
else:
min_args = max_args
if info.varargs is not None:
max_args = float('inf')
ok = (min_args <= max_num_args) and (min_num_args <= max_args)
if not ok:
if min_args == max_args:
args_str = min_args
else:
args_str = f"{min_args}-{max_args}"
if min_num_args == max_num_args:
num_args_str = min_num_args
else:
num_args_str = f"{min_num_args}-{max_num_args}"
print(f"{benchmark_name !s}: wrong number of arguments"
f"(for {func !r}{_get_sourceline_info(func, root) !s}): expected {num_args_str}, "
f"has {args_str}")
return ok
| def check_num_args(root, benchmark_name, func, min_num_args, max_num_args=None):
if max_num_args is None:
max_num_args = min_num_args
try:
if sys.version_info[0] >= 3:
info = inspect.getfullargspec(func)
else:
info = inspect.getargspec(func)
except Exception as exc:
print(f"{benchmark_name !s}: failed to check "
f"({func !r}{_get_sourceline_info(func, root) !s}): {exc !s}")
return True
max_args = len(info.args)
if inspect.ismethod(func):
max_args -= 1
if info.defaults is not None:
min_args = max_args - len(info.defaults)
else:
min_args = max_args
if info.varargs is not None:
max_args = float('inf')
ok = (min_args <= max_num_args) and (min_num_args <= max_args)
if not ok:
if min_args == max_args:
args_str = min_args
else:
args_str = f"{min_args}-{max_args}"
if min_num_args == max_num_args:
num_args_str = min_num_args
else:
num_args_str = f"{min_num_args}-{max_num_args}"
print(f"{benchmark_name !s}: wrong number of arguments "
f"(for {func !r}{_get_sourceline_info(func, root) !s}): expected {num_args_str}, "
f"has {args_str}")
return ok
|
1,669 | def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False,
check_input=True, **params):
"""
Compute elastic net path with coordinate descent.
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray of shape (n_samples,) or (n_samples, n_outputs)
Target values.
l1_ratio : float, default=0.5
Number between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso.
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, default=100
Number of alphas along the regularization path.
alphas : ndarray, default=None
List of alphas where to compute the models.
If None alphas are set automatically.
precompute : True | False | 'auto' | array-like, default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, default=None
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array of shape (n_features, ), default=None
The initial values of the coefficients.
verbose : bool or int, default=False
Amount of verbosity.
return_n_iter : bool, default=False
Whether to return the number of iterations or not.
positive : bool, default=False
If set to True, forces coefficients to be positive.
(Only allowed when ``y.ndim == 1``).
check_input : bool, default=True
Skip input validation checks, including the Gram matrix when provided
assuming there are handled by the caller when check_input=False.
**params : kwargs
Keyword arguments passed to the coordinate descent solver.
Returns
-------
alphas : array of shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array of shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array of shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like of shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
See Also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
Notes
-----
For an example, see
:ref:`examples/linear_model/plot_lasso_coordinate_descent_path.py
<sphx_glr_auto_examples_linear_model_plot_lasso_coordinate_descent_path.py>`.
"""
# We expect X and y to be already Fortran ordered when bypassing
# checks
if check_input:
X = check_array(X, 'csc', dtype=[np.float64, np.float32],
order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=X.dtype.type, order='F', copy=False,
ensure_2d=False)
if Xy is not None:
# Xy should be a 1d contiguous array or a 2D C ordered array
Xy = check_array(Xy, dtype=X.dtype.type, order='C', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
if multi_output and positive:
raise ValueError('positive=True is not allowed for multi-output'
' (y.ndim != 1)')
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_offset' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_offset'] / params['X_scale']
X_sparse_scaling = np.asarray(X_sparse_scaling, dtype=X.dtype)
else:
X_sparse_scaling = np.zeros(n_features, dtype=X.dtype)
# X should be normalized and fit already if function is called
# from ElasticNet.fit
if check_input:
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False,
fit_intercept=False, copy=False, check_input=check_input)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=X.dtype)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=X.dtype)
if coef_init is None:
coef_ = np.zeros(coefs.shape[:-1], dtype=X.dtype, order='F')
else:
coef_ = np.asfortranarray(coef_init, dtype=X.dtype)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, dtype=X.dtype.type,
order='C')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like. Got %r" % precompute)
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
| def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False,
check_input=True, **params):
"""
Compute elastic net path with coordinate descent.
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray of shape (n_samples,) or (n_samples, n_outputs)
Target values.
l1_ratio : float, default=0.5
Number between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso.
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, default=100
Number of alphas along the regularization path.
alphas : ndarray, default=None
List of alphas where to compute the models.
If None alphas are set automatically.
precompute : "auto", bool or array-like, default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, default=None
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array of shape (n_features, ), default=None
The initial values of the coefficients.
verbose : bool or int, default=False
Amount of verbosity.
return_n_iter : bool, default=False
Whether to return the number of iterations or not.
positive : bool, default=False
If set to True, forces coefficients to be positive.
(Only allowed when ``y.ndim == 1``).
check_input : bool, default=True
Skip input validation checks, including the Gram matrix when provided
assuming there are handled by the caller when check_input=False.
**params : kwargs
Keyword arguments passed to the coordinate descent solver.
Returns
-------
alphas : array of shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array of shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array of shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like of shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
See Also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
Notes
-----
For an example, see
:ref:`examples/linear_model/plot_lasso_coordinate_descent_path.py
<sphx_glr_auto_examples_linear_model_plot_lasso_coordinate_descent_path.py>`.
"""
# We expect X and y to be already Fortran ordered when bypassing
# checks
if check_input:
X = check_array(X, 'csc', dtype=[np.float64, np.float32],
order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=X.dtype.type, order='F', copy=False,
ensure_2d=False)
if Xy is not None:
# Xy should be a 1d contiguous array or a 2D C ordered array
Xy = check_array(Xy, dtype=X.dtype.type, order='C', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
if multi_output and positive:
raise ValueError('positive=True is not allowed for multi-output'
' (y.ndim != 1)')
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_offset' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_offset'] / params['X_scale']
X_sparse_scaling = np.asarray(X_sparse_scaling, dtype=X.dtype)
else:
X_sparse_scaling = np.zeros(n_features, dtype=X.dtype)
# X should be normalized and fit already if function is called
# from ElasticNet.fit
if check_input:
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False,
fit_intercept=False, copy=False, check_input=check_input)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=X.dtype)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=X.dtype)
if coef_init is None:
coef_ = np.zeros(coefs.shape[:-1], dtype=X.dtype, order='F')
else:
coef_ = np.asfortranarray(coef_init, dtype=X.dtype)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, dtype=X.dtype.type,
order='C')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like. Got %r" % precompute)
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
|
6,439 | def get_customer_stats(filters, tree_view=False):
""" Calculates number of new and repeated customers and revenue. """
company_condition = ''
if filters.get('company'):
company_condition = ' and company=%(company)s'
customers = []
customers_in = {}
for si in frappe.db.sql('''select territory, posting_date, customer, base_grand_total from `tabSales Invoice`
where docstatus=1 and posting_date <= %(to_date)s
{company_condition} order by posting_date'''.format(company_condition=company_condition),
filters, as_dict=1):
key = si.territory if tree_view else si.posting_date.strftime('%Y-%m')
new_or_repeat = 'new' if si.customer not in customers else 'repeat'
customers_in.setdefault(key, {'new': [0, 0.0], 'repeat': [0, 0.0]})
if filters.from_date <= si.posting_date.strftime('%Y-%m-%d'):
customers_in[key][new_or_repeat][0] += 1
customers_in[key][new_or_repeat][1] += si.base_grand_total
if new_or_repeat == 'new':
customers.append(si.customer)
return customers_in
| def get_customer_stats(filters, tree_view=False):
""" Calculates number of new and repeated customers and revenue. """
company_condition = ''
if filters.get('company'):
company_condition = ' and company=%(company)s'
customers = []
customers_in = {}
for si in frappe.db.sql('''select territory, posting_date, customer, base_grand_total from `tabSales Invoice`
where docstatus=1 and posting_date <= %(to_date)s
{company_condition} order by posting_date'''.format(company_condition=company_condition),
filters, as_dict=1):
key = si.territory if tree_view else si.posting_date.strftime('%Y-%m')
new_or_repeat = 'new' if si.customer not in customers else 'repeat'
customers_in.setdefault(key, {'new': [0, 0.0], 'repeat': [0, 0.0]})
if getdate(filters.from_date) <= getdate(si.posting_date):
customers_in[key][new_or_repeat][0] += 1
customers_in[key][new_or_repeat][1] += si.base_grand_total
if new_or_repeat == 'new':
customers.append(si.customer)
return customers_in
|
4,125 | def approx_pi(n: cython.int=10000000):
val: cython.double = 0.
k: cython.int
for k in range(1, n + 1):
val += recip_square(k)
return (6 * val) ** .5
| def approx_pi(n: cython.int = 10000000):
val: cython.double = 0.
k: cython.int
for k in range(1, n + 1):
val += recip_square(k)
return (6 * val) ** .5
|
30,721 | def main():
"""Main function
"""
cmd = demisto.command()
LOG('Command being called is {}'.format(cmd))
try:
if cmd in COMMANDS:
COMMANDS[cmd]()
else:
LOG('Command {} not implemented'.format(cmd))
# Log exceptions
except Exception as e:
import traceback
LOG(traceback.format_exc())
if demisto.command() == 'fetch-incidents':
LOG(str(e))
LOG.print_log()
raise
else:
return_error('An error occurred: {}'.format(str(e)))
| def main():
"""Main function
"""
cmd = demisto.command()
demisto.debug('Command being called is {}'.format(cmd))
try:
if cmd in COMMANDS:
COMMANDS[cmd]()
else:
LOG('Command {} not implemented'.format(cmd))
# Log exceptions
except Exception as e:
import traceback
LOG(traceback.format_exc())
if demisto.command() == 'fetch-incidents':
LOG(str(e))
LOG.print_log()
raise
else:
return_error('An error occurred: {}'.format(str(e)))
|
32,054 | def get_incidents_long_running_execution(client: Client, offenses_per_fetch: int, user_query: str, fetch_mode: str,
events_columns: str, events_limit: int, ip_enrich: bool, asset_enrich: bool,
last_highest_id: int, incident_type: Optional[str],
mirror_direction: Optional[str]) -> Tuple[Optional[List[Dict]], Optional[int]]:
"""
Gets offenses from QRadar service, and transforms them to incidents in a long running execution.
Args:
client (Client): Client to perform the API calls.
offenses_per_fetch (int): Maximum number of offenses to be fetched.
user_query (str): If given, the user filters for fetching offenses from QRadar service.
fetch_mode (str): Fetch mode of the offenses.
Can be 'Fetch Without Events', 'Fetch With All Events', 'Fetch Correlation Events Only'
events_columns (str): Events columns to extract by search query for each offense. Only used when fetch mode
is not 'Fetch Without Events'.
events_limit (int): Number of events to be fetched for each offense. Only used when fetch mode is not
'Fetch Without Events'.
ip_enrich (bool): Whether to enrich offense by changing IP IDs of each offense to its IP value.
asset_enrich (bool): Whether to enrich offense with assets
last_highest_id (int): The highest ID of all the offenses that have been fetched from QRadar service.
incident_type (Optional[str]): Incident type.
mirror_direction (Optional[str]): Whether mirror in is activated or not.
Returns:
(List[Dict], int): List of the incidents, and the new highest ID for next fetch.
(None, None): if reset was triggered
"""
offense_highest_id = get_minimum_id_to_fetch(last_highest_id, user_query)
user_query = f' AND {user_query}' if user_query else ''
filter_fetch_query = f'id>{offense_highest_id}{user_query}'
print_debug_msg(f'Filter query to QRadar: {filter_fetch_query}')
range_max = offenses_per_fetch - 1 if offenses_per_fetch else MAXIMUM_OFFENSES_PER_FETCH - 1
range_ = f'items=0-{range_max}'
# if fails here we can't recover, retry again later
raw_offenses = client.offenses_list(range_, filter_=filter_fetch_query, sort=ASCENDING_ID_ORDER)
if raw_offenses:
raw_offenses_len = len(raw_offenses)
print_debug_msg(f'raw_offenses size: {raw_offenses_len}')
else:
print_debug_msg('empty raw_offenses')
new_highest_offense_id = raw_offenses[-1].get('id') if raw_offenses else offense_highest_id
print_debug_msg(f'New highest ID returned from QRadar offenses: {new_highest_offense_id}')
offenses = []
if fetch_mode != FetchMode.no_events.value:
try:
futures = []
for offense in raw_offenses:
futures.append(EXECUTOR.submit(
enrich_offense_with_events,
client=client,
offense=offense,
fetch_mode=fetch_mode,
events_columns=events_columns,
events_limit=events_limit,
))
offenses = [future.result(timeout=DEFAULT_EVENTS_TIMEOUT * 60) for future in futures]
except concurrent.futures.TimeoutError as e:
print_debug_msg(
f"Error while enriching mirrored offenses with events: {str(e)} \n {traceback.format_exc()}")
update_missing_offenses_from_raw_offenses(raw_offenses, offenses)
else:
offenses = raw_offenses
if is_reset_triggered():
return None, None
offenses_with_mirror = [
dict(offense, mirror_direction=mirror_direction, mirror_instance=demisto.integrationInstance())
for offense in offenses] if mirror_direction else offenses
enriched_offenses = enrich_offenses_result(client, offenses_with_mirror, ip_enrich, asset_enrich)
final_offenses = sanitize_outputs(enriched_offenses)
incidents = create_incidents_from_offenses(final_offenses, incident_type)
return incidents, new_highest_offense_id
| def get_incidents_long_running_execution(client: Client, offenses_per_fetch: int, user_query: str, fetch_mode: str,
events_columns: str, events_limit: int, ip_enrich: bool, asset_enrich: bool,
last_highest_id: int, incident_type: Optional[str],
mirror_direction: Optional[str]) -> Tuple[Optional[List[Dict]], Optional[int]]:
"""
Gets offenses from QRadar service, and transforms them to incidents in a long running execution.
Args:
client (Client): Client to perform the API calls.
offenses_per_fetch (int): Maximum number of offenses to be fetched.
user_query (str): If given, the user filters for fetching offenses from QRadar service.
fetch_mode (str): Fetch mode of the offenses.
Can be 'Fetch Without Events', 'Fetch With All Events', 'Fetch Correlation Events Only'
events_columns (str): Events columns to extract by search query for each offense. Only used when fetch mode
is not 'Fetch Without Events'.
events_limit (int): Number of events to be fetched for each offense. Only used when fetch mode is not
'Fetch Without Events'.
ip_enrich (bool): Whether to enrich offense by changing IP IDs of each offense to its IP value.
asset_enrich (bool): Whether to enrich offense with assets
last_highest_id (int): The highest ID of all the offenses that have been fetched from QRadar service.
incident_type (Optional[str]): Incident type.
mirror_direction (Optional[str]): Whether mirror in is activated or not.
Returns:
(List[Dict], int): List of the incidents, and the new highest ID for next fetch.
(None, None): if reset was triggered
"""
offense_highest_id = get_minimum_id_to_fetch(last_highest_id, user_query)
user_query = f' AND {user_query}' if user_query else ''
filter_fetch_query = f'id>{offense_highest_id}{user_query}'
print_debug_msg(f'Filter query to QRadar: {filter_fetch_query}')
range_max = offenses_per_fetch - 1 if offenses_per_fetch else MAXIMUM_OFFENSES_PER_FETCH - 1
range_ = f'items=0-{range_max}'
# if it fails here we can't recover, retry again later
raw_offenses = client.offenses_list(range_, filter_=filter_fetch_query, sort=ASCENDING_ID_ORDER)
if raw_offenses:
raw_offenses_len = len(raw_offenses)
print_debug_msg(f'raw_offenses size: {raw_offenses_len}')
else:
print_debug_msg('empty raw_offenses')
new_highest_offense_id = raw_offenses[-1].get('id') if raw_offenses else offense_highest_id
print_debug_msg(f'New highest ID returned from QRadar offenses: {new_highest_offense_id}')
offenses = []
if fetch_mode != FetchMode.no_events.value:
try:
futures = []
for offense in raw_offenses:
futures.append(EXECUTOR.submit(
enrich_offense_with_events,
client=client,
offense=offense,
fetch_mode=fetch_mode,
events_columns=events_columns,
events_limit=events_limit,
))
offenses = [future.result(timeout=DEFAULT_EVENTS_TIMEOUT * 60) for future in futures]
except concurrent.futures.TimeoutError as e:
print_debug_msg(
f"Error while enriching mirrored offenses with events: {str(e)} \n {traceback.format_exc()}")
update_missing_offenses_from_raw_offenses(raw_offenses, offenses)
else:
offenses = raw_offenses
if is_reset_triggered():
return None, None
offenses_with_mirror = [
dict(offense, mirror_direction=mirror_direction, mirror_instance=demisto.integrationInstance())
for offense in offenses] if mirror_direction else offenses
enriched_offenses = enrich_offenses_result(client, offenses_with_mirror, ip_enrich, asset_enrich)
final_offenses = sanitize_outputs(enriched_offenses)
incidents = create_incidents_from_offenses(final_offenses, incident_type)
return incidents, new_highest_offense_id
|
53,305 | def vector_space(
x_arr=None,
y_arr=None,
z_arr=None,
x_range=[0, 1],
y_range=[0, 1],
z_range=[0, 1],
u_arr=None,
v_arr=None,
w_arr=None,
func=(lambda x, y, z: [x, y, z]),
precision=[0.05, 0.05, 0.05],
):
r"""
Returns a vector space in the form of a multi-dimensional array.
Parameters
----------
x_arr: array_like
The array representing the coordinates in the x-dimension.
If not given, then range values are used to construct a
uniform array on that interval.
y_arr: array_like
The array representing the coordinates in the y-dimension.
If not given, then range values are used to construct a
uniform array on that interval.
z_arr: array_like
The array representing the coordinates in the z-dimension.
If not given, then range values are used to construct a
uniform array on that interval.
x_range: array_like
A 1 by 2 array containing the range of x-vlaues for the vector spaces.
If not given, the default interval [0,1] is assumed.
y_range: array_like
A 1 by 2 array containing the range of y-vlaues for the vector spaces.
If not given, the default interval [0,1] is assumed.
z_range: array_like
A 1 by 2 array containing the range of z-vlaues for the vector spaces.
If not given, the default interval [0,1] is assumed.
u_arr: array_like
A 3D array containing the x-component of the vector values for the vector
space. If not given, the vector values are generated over the vector space
using the function func.
v_arr: array_like
A 3D array containing the y-component of the vector values for the vector
space. If not given, the vector values are generated over the vector space
using the function func.
w_arr: array_like
A 3D array containing the z-component of the vector values for the vector
space. If not given, the vector values are generated over the vector space
using the function func.
func: <class 'function'>
A function that takes in 3 arguments, respectively representing a x, y, and z
coordinate of a point and returns the vector value for that point in the form
of a 1 by 3 array.
precision: array_like
A 1 by 3 array containing the approximate precision values for each dimension,
in the case where uniform arrays are being used.
The default value is [0.05, 0.05, 0.05].
Returns
-------
ndarray
A 1 by 3 array with
the first element containing the coordinates.
the second element containing the vector values.
and the third element containing the delta values for each dimension.
Raises
------
This function does not raise any exceptions.
Warns
-----
This function does not raise any warnings.
Notes
-----
N/A
"""
# Constructing the Meshgrid
if (
not isinstance(x_arr, type(None))
and not isinstance(y_arr, type(None))
and not isinstance(z_arr, type(None))
):
x, y, z = np.meshgrid(
x_arr,
y_arr,
z_arr,
indexing="ij",
)
dx = np.diff(x_arr)
dy = np.diff(y_arr)
dz = np.diff(z_arr)
else:
x_den = int(np.around((x_range[1] - x_range[0]) / precision[0]) + 1)
y_den = int(np.around((y_range[1] - y_range[0]) / precision[1]) + 1)
z_den = int(np.around((z_range[1] - z_range[0]) / precision[2]) + 1)
# dx = np.double((x_range[1] - x_range[0]) / (x_den - 1))
# dy = np.double((y_range[1] - y_range[0]) / (y_den - 1))
# dz = np.double((z_range[1] - z_range[0]) / (z_den - 1))
dx = np.diff(np.linspace(x_range[0], x_range[1], x_den))
dy = np.diff(np.linspace(y_range[0], y_range[1], y_den))
dz = np.diff(np.linspace(z_range[0], z_range[1], z_den))
x, y, z = np.meshgrid(
np.linspace(x_range[0], x_range[1], x_den),
np.linspace(y_range[0], y_range[1], y_den),
np.linspace(z_range[0], z_range[1], z_den),
indexing="ij",
)
# Calculating the vector values
if (
not isinstance(u_arr, type(None))
and not isinstance(v_arr, type(None))
and not isinstance(w_arr, type(None))
):
u = u_arr
v = v_arr
w = w_arr
else:
u, v, w = func(x, y, z)
return np.array([x, y, z]), np.array([u, v, w]), np.array([dx, dy, dz])
| def vector_space(
x_arr=None,
y_arr=None,
z_arr=None,
x_range=[0, 1],
y_range=[0, 1],
z_range=[0, 1],
u_arr=None,
v_arr=None,
w_arr=None,
func=(lambda x, y, z: [x, y, z]),
precision=[0.05, 0.05, 0.05],
):
r"""
Returns a vector space in the form of a multi-dimensional array.
Parameters
----------
x_arr: array_like
The array representing the coordinates in the x-dimension.
If not given, then range values are used to construct a
uniform array on that interval.
y_arr: array_like
The array representing the coordinates in the y-dimension.
If not given, then range values are used to construct a
uniform array on that interval.
z_arr: array_like
The array representing the coordinates in the z-dimension.
If not given, then range values are used to construct a
uniform array on that interval.
x_range: array_like
A 1 by 2 array containing the range of x-values for the vector spaces.
If not given, the default interval [0,1] is assumed.
y_range: array_like
A 1 by 2 array containing the range of y-values for the vector spaces.
If not given, the default interval [0,1] is assumed.
z_range: array_like
A 1 by 2 array containing the range of z-values for the vector spaces.
If not given, the default interval [0,1] is assumed.
u_arr: array_like
A 3D array containing the x-component of the vector values for the vector
space. If not given, the vector values are generated over the vector space
using the function func.
v_arr: array_like
A 3D array containing the y-component of the vector values for the vector
space. If not given, the vector values are generated over the vector space
using the function func.
w_arr: array_like
A 3D array containing the z-component of the vector values for the vector
space. If not given, the vector values are generated over the vector space
using the function func.
func: <class 'function'>
A function that takes in 3 arguments, respectively representing a x, y, and z
coordinate of a point and returns the vector value for that point in the form
of a 1 by 3 array.
precision: array_like
A 1 by 3 array containing the approximate precision values for each dimension,
in the case where uniform arrays are being used.
The default value is [0.05, 0.05, 0.05].
Returns
-------
ndarray
A 1 by 3 array with
the first element containing the coordinates.
the second element containing the vector values.
and the third element containing the delta values for each dimension.
Raises
------
This function does not raise any exceptions.
Warns
-----
This function does not raise any warnings.
Notes
-----
N/A
"""
# Constructing the Meshgrid
if (
not isinstance(x_arr, type(None))
and not isinstance(y_arr, type(None))
and not isinstance(z_arr, type(None))
):
x, y, z = np.meshgrid(
x_arr,
y_arr,
z_arr,
indexing="ij",
)
dx = np.diff(x_arr)
dy = np.diff(y_arr)
dz = np.diff(z_arr)
else:
x_den = int(np.around((x_range[1] - x_range[0]) / precision[0]) + 1)
y_den = int(np.around((y_range[1] - y_range[0]) / precision[1]) + 1)
z_den = int(np.around((z_range[1] - z_range[0]) / precision[2]) + 1)
# dx = np.double((x_range[1] - x_range[0]) / (x_den - 1))
# dy = np.double((y_range[1] - y_range[0]) / (y_den - 1))
# dz = np.double((z_range[1] - z_range[0]) / (z_den - 1))
dx = np.diff(np.linspace(x_range[0], x_range[1], x_den))
dy = np.diff(np.linspace(y_range[0], y_range[1], y_den))
dz = np.diff(np.linspace(z_range[0], z_range[1], z_den))
x, y, z = np.meshgrid(
np.linspace(x_range[0], x_range[1], x_den),
np.linspace(y_range[0], y_range[1], y_den),
np.linspace(z_range[0], z_range[1], z_den),
indexing="ij",
)
# Calculating the vector values
if (
not isinstance(u_arr, type(None))
and not isinstance(v_arr, type(None))
and not isinstance(w_arr, type(None))
):
u = u_arr
v = v_arr
w = w_arr
else:
u, v, w = func(x, y, z)
return np.array([x, y, z]), np.array([u, v, w]), np.array([dx, dy, dz])
|
36,267 | def normalize_total(
adata: AnnData,
target_sum: Optional[float] = None,
exclude_highly_expressed: bool = False,
max_fraction: float = 0.05,
key_added: Optional[str] = None,
layer: Optional[str] = None,
layers: Union[Literal['all'], Iterable[str]] = None,
layer_norm: Optional[str] = None,
inplace: bool = True,
copy: bool = False,
) -> Optional[Dict[str, np.ndarray]]:
"""\
Normalize counts per cell.
Normalize each cell by total counts over all genes,
so that every cell has the same total count after normalization.
If choosing `target_sum=1e6`, this is CPM normalization.
If `exclude_highly_expressed=True`, very highly expressed genes are excluded
from the computation of the normalization factor (size factor) for each
cell. This is meaningful as these can strongly influence the resulting
normalized values for all other genes [Weinreb17]_.
Similar functions are used, for example, by Seurat [Satija15]_, Cell Ranger
[Zheng17]_ or SPRING [Weinreb17]_.
Params
------
adata
The annotated data matrix of shape `n_obs` × `n_vars`.
Rows correspond to cells and columns to genes.
target_sum
If `None`, after normalization, each observation (cell) has a total
count equal to the median of total counts for observations (cells)
before normalization.
exclude_highly_expressed
Exclude (very) highly expressed genes for the computation of the
normalization factor (size factor) for each cell. A gene is considered
highly expressed, if it has more than `max_fraction` of the total counts
in at least one cell. The not-excluded genes will sum up to
`target_sum`.
max_fraction
If `exclude_highly_expressed=True`, consider cells as highly expressed
that have more counts than `max_fraction` of the original total counts
in at least one cell.
key_added
Name of the field in `adata.obs` where the normalization factor is
stored.
layer
Layer to normalize instead of `X`. If `None`, `X` is normalized.
inplace
Whether to update `adata` or return dictionary with normalized copies of
`adata.X` and `adata.layers`.
copy
Whether to modify copied input object. Not compatible with inplace=False.
Returns
-------
Returns dictionary with normalized copies of `adata.X` and `adata.layers`
or updates `adata` with normalized version of the original
`adata.X` and `adata.layers`, depending on `inplace`.
Example
--------
>>> from anndata import AnnData
>>> import scanpy as sc
>>> sc.settings.verbosity = 2
>>> np.set_printoptions(precision=2)
>>> adata = AnnData(np.array([
... [3, 3, 3, 6, 6],
... [1, 1, 1, 2, 2],
... [1, 22, 1, 2, 2],
... ]))
>>> adata.X
array([[ 3., 3., 3., 6., 6.],
[ 1., 1., 1., 2., 2.],
[ 1., 22., 1., 2., 2.]], dtype=float32)
>>> X_norm = sc.pp.normalize_total(adata, target_sum=1, inplace=False)['X']
>>> X_norm
array([[0.14, 0.14, 0.14, 0.29, 0.29],
[0.14, 0.14, 0.14, 0.29, 0.29],
[0.04, 0.79, 0.04, 0.07, 0.07]], dtype=float32)
>>> X_norm = sc.pp.normalize_total(
... adata, target_sum=1, exclude_highly_expressed=True,
... max_fraction=0.2, inplace=False
... )['X']
The following highly-expressed genes are not considered during normalization factor computation:
['1', '3', '4']
>>> X_norm
array([[ 0.5, 0.5, 0.5, 1. , 1. ],
[ 0.5, 0.5, 0.5, 1. , 1. ],
[ 0.5, 11. , 0.5, 1. , 1. ]], dtype=float32)
"""
if copy:
if not inplace:
raise ValueError()
adata = adata.copy()
if max_fraction < 0 or max_fraction > 1:
raise ValueError('Choose max_fraction between 0 and 1.')
# Deprecated features
if layers is not None:
warn(
FutureWarning(
"The `layers` argument is deprecated. Instead, specify individual "
"layers to normalize with `layer`."
)
)
if layer_norm is not None:
warn(
FutureWarning(
"The `layer_norm` argument is deprecated. Specify the target size "
"factor directly with `target_sum`."
)
)
if layers == 'all':
layers = adata.layers.keys()
elif isinstance(layers, str):
raise ValueError(
f"`layers` needs to be a list of strings or 'all', not {layers!r}"
)
view_to_actual(adata)
X = _get_obs_rep(adata, layer=layer)
gene_subset = None
msg = 'normalizing counts per cell'
if exclude_highly_expressed:
counts_per_cell = X.sum(1) # original counts per cell
counts_per_cell = np.ravel(counts_per_cell)
# at least one cell as more than max_fraction of counts per cell
gene_subset = (X > counts_per_cell[:, None] * max_fraction).sum(0)
gene_subset = np.ravel(gene_subset) == 0
msg += (
' The following highly-expressed genes are not considered during '
f'normalization factor computation:\n{adata.var_names[~gene_subset].tolist()}'
)
counts_per_cell = X[:, gene_subset].sum(1)
else:
counts_per_cell = X.sum(1)
start = logg.info(msg)
counts_per_cell = np.ravel(counts_per_cell)
cell_subset = counts_per_cell > 0
if not np.all(cell_subset):
warn(UserWarning('Some cells have zero counts'))
if inplace:
if key_added is not None:
adata.obs[key_added] = counts_per_cell
_set_obs_rep(
adata, _normalize_data(X, counts_per_cell, target_sum), layer=layer
)
else:
# not recarray because need to support sparse
dat = dict(
X=_normalize_data(X, counts_per_cell, target_sum, copy=True),
norm_factor=counts_per_cell,
)
# Deprecated features
if layer_norm == 'after':
after = target_sum
elif layer_norm == 'X':
after = np.median(counts_per_cell[cell_subset])
elif layer_norm is None:
after = None
else:
raise ValueError('layer_norm should be "after", "X" or None')
for layer_to_norm in layers if layers is not None else ():
res = normalize_total(
adata, layer=layer_to_norm, target_sum=after, inplace=inplace
)
if not inplace:
dat[layer_to_norm] = res["X"]
logg.info(
' finished ({time_passed})',
time=start,
)
if key_added is not None:
logg.debug(
f'and added {key_added!r}, counts per cell before normalization (adata.obs)'
)
if copy:
return adata
elif not inplace:
return dat
| def normalize_total(
adata: AnnData,
target_sum: Optional[float] = None,
exclude_highly_expressed: bool = False,
max_fraction: float = 0.05,
key_added: Optional[str] = None,
layer: Optional[str] = None,
layers: Union[Literal['all'], Iterable[str]] = None,
layer_norm: Optional[str] = None,
inplace: bool = True,
copy: bool = False,
) -> Optional[Dict[str, np.ndarray]]:
"""\
Normalize counts per cell.
Normalize each cell by total counts over all genes,
so that every cell has the same total count after normalization.
If choosing `target_sum=1e6`, this is CPM normalization.
If `exclude_highly_expressed=True`, very highly expressed genes are excluded
from the computation of the normalization factor (size factor) for each
cell. This is meaningful as these can strongly influence the resulting
normalized values for all other genes [Weinreb17]_.
Similar functions are used, for example, by Seurat [Satija15]_, Cell Ranger
[Zheng17]_ or SPRING [Weinreb17]_.
Params
------
adata
The annotated data matrix of shape `n_obs` × `n_vars`.
Rows correspond to cells and columns to genes.
target_sum
If `None`, after normalization, each observation (cell) has a total
count equal to the median of total counts for observations (cells)
before normalization.
exclude_highly_expressed
Exclude (very) highly expressed genes for the computation of the
normalization factor (size factor) for each cell. A gene is considered
highly expressed, if it has more than `max_fraction` of the total counts
in at least one cell. The not-excluded genes will sum up to
`target_sum`.
max_fraction
If `exclude_highly_expressed=True`, consider cells as highly expressed
that have more counts than `max_fraction` of the original total counts
in at least one cell.
key_added
Name of the field in `adata.obs` where the normalization factor is
stored.
layer
Layer to normalize instead of `X`. If `None`, `X` is normalized.
inplace
Whether to update `adata` or return dictionary with normalized copies of
`adata.X` and `adata.layers`.
copy
Whether to modify copied input object. Not compatible with inplace=False.
Returns
-------
Returns dictionary with normalized copies of `adata.X` and `adata.layers`
or updates `adata` with normalized version of the original
`adata.X` and `adata.layers`, depending on `inplace`.
Example
--------
>>> from anndata import AnnData
>>> import scanpy as sc
>>> sc.settings.verbosity = 2
>>> np.set_printoptions(precision=2)
>>> adata = AnnData(np.array([
... [3, 3, 3, 6, 6],
... [1, 1, 1, 2, 2],
... [1, 22, 1, 2, 2],
... ]))
>>> adata.X
array([[ 3., 3., 3., 6., 6.],
[ 1., 1., 1., 2., 2.],
[ 1., 22., 1., 2., 2.]], dtype=float32)
>>> X_norm = sc.pp.normalize_total(adata, target_sum=1, inplace=False)['X']
>>> X_norm
array([[0.14, 0.14, 0.14, 0.29, 0.29],
[0.14, 0.14, 0.14, 0.29, 0.29],
[0.04, 0.79, 0.04, 0.07, 0.07]], dtype=float32)
>>> X_norm = sc.pp.normalize_total(
... adata, target_sum=1, exclude_highly_expressed=True,
... max_fraction=0.2, inplace=False
... )['X']
The following highly-expressed genes are not considered during normalization factor computation:
['1', '3', '4']
>>> X_norm
array([[ 0.5, 0.5, 0.5, 1. , 1. ],
[ 0.5, 0.5, 0.5, 1. , 1. ],
[ 0.5, 11. , 0.5, 1. , 1. ]], dtype=float32)
"""
if copy:
if not inplace:
raise ValueError("`copy=True` cannot be used with `inplace=False`.")
adata = adata.copy()
if max_fraction < 0 or max_fraction > 1:
raise ValueError('Choose max_fraction between 0 and 1.')
# Deprecated features
if layers is not None:
warn(
FutureWarning(
"The `layers` argument is deprecated. Instead, specify individual "
"layers to normalize with `layer`."
)
)
if layer_norm is not None:
warn(
FutureWarning(
"The `layer_norm` argument is deprecated. Specify the target size "
"factor directly with `target_sum`."
)
)
if layers == 'all':
layers = adata.layers.keys()
elif isinstance(layers, str):
raise ValueError(
f"`layers` needs to be a list of strings or 'all', not {layers!r}"
)
view_to_actual(adata)
X = _get_obs_rep(adata, layer=layer)
gene_subset = None
msg = 'normalizing counts per cell'
if exclude_highly_expressed:
counts_per_cell = X.sum(1) # original counts per cell
counts_per_cell = np.ravel(counts_per_cell)
# at least one cell as more than max_fraction of counts per cell
gene_subset = (X > counts_per_cell[:, None] * max_fraction).sum(0)
gene_subset = np.ravel(gene_subset) == 0
msg += (
' The following highly-expressed genes are not considered during '
f'normalization factor computation:\n{adata.var_names[~gene_subset].tolist()}'
)
counts_per_cell = X[:, gene_subset].sum(1)
else:
counts_per_cell = X.sum(1)
start = logg.info(msg)
counts_per_cell = np.ravel(counts_per_cell)
cell_subset = counts_per_cell > 0
if not np.all(cell_subset):
warn(UserWarning('Some cells have zero counts'))
if inplace:
if key_added is not None:
adata.obs[key_added] = counts_per_cell
_set_obs_rep(
adata, _normalize_data(X, counts_per_cell, target_sum), layer=layer
)
else:
# not recarray because need to support sparse
dat = dict(
X=_normalize_data(X, counts_per_cell, target_sum, copy=True),
norm_factor=counts_per_cell,
)
# Deprecated features
if layer_norm == 'after':
after = target_sum
elif layer_norm == 'X':
after = np.median(counts_per_cell[cell_subset])
elif layer_norm is None:
after = None
else:
raise ValueError('layer_norm should be "after", "X" or None')
for layer_to_norm in layers if layers is not None else ():
res = normalize_total(
adata, layer=layer_to_norm, target_sum=after, inplace=inplace
)
if not inplace:
dat[layer_to_norm] = res["X"]
logg.info(
' finished ({time_passed})',
time=start,
)
if key_added is not None:
logg.debug(
f'and added {key_added!r}, counts per cell before normalization (adata.obs)'
)
if copy:
return adata
elif not inplace:
return dat
|
24,713 | def spin_once(
node: 'Node',
*,
executor: Optional[Executor] = None,
timeout_sec: Optional[float] = None) -> None:
"""
Execute one item of work or wait until a timeout expires.
One callback will be executed by the provided executor as long as that callback is ready
before the timeout expires.
If no executor is provided (ie. ``None``), then the global executor is used.
It is possible the work done is for a node other than the one provided if the global executor
has a partially completed coroutine.
:param node: A node to add to the executor to check for work.
:param executor: The executor to use, or the global executor if ``None``.
:param timeout_sec: Seconds to wait. Block forever if ``None`` or negative. Don't wait if 0.
"""
executor = get_global_executor() if executor is None else executor
try:
executor.add_node(node)
executor.spin_once(timeout_sec=timeout_sec)
finally:
executor.remove_node(node)
| def spin_once(
node: 'Node',
*,
executor: Optional['Executor'] = None,
timeout_sec: Optional[float] = None) -> None:
"""
Execute one item of work or wait until a timeout expires.
One callback will be executed by the provided executor as long as that callback is ready
before the timeout expires.
If no executor is provided (ie. ``None``), then the global executor is used.
It is possible the work done is for a node other than the one provided if the global executor
has a partially completed coroutine.
:param node: A node to add to the executor to check for work.
:param executor: The executor to use, or the global executor if ``None``.
:param timeout_sec: Seconds to wait. Block forever if ``None`` or negative. Don't wait if 0.
"""
executor = get_global_executor() if executor is None else executor
try:
executor.add_node(node)
executor.spin_once(timeout_sec=timeout_sec)
finally:
executor.remove_node(node)
|
6,992 | def extract_javascript(code, keywords=("__"), options=None):
"""Extract messages from JavaScript source code.
This is a modified version of babel's JS parser. Reused under BSD license.
License: https://github.com/python-babel/babel/blob/master/LICENSE
Changes from upstream:
- Preserve arguments, babel's parser flattened all values in args,
we need order because we use different syntax for translation
which can contain 2nd arg which is array of many values. If
argument is non-primitive type then value is NOT returned in
args.
E.g. __("0", ["1", "2"], "3") -> ("0", None, "3")
- remove comments support
- changed signature to accept string directly.
:param code: code as string
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param options: a dictionary of additional options (optional)
Supported options are:
* `template_string` -- set to false to disable ES6
template string support.
"""
if options is None:
options = {}
funcname = message_lineno = None
messages = []
last_argument = None
concatenate_next = False
last_token = None
call_stack = -1
# Tree level = depth inside function call tree
# Example: __("0", ["1", "2"], "3")
# Depth __()
# / | \
# 0 "0" [...] "3" <- only 0th level strings matter
# / \
# 1 "1" "2"
tree_level = 0
opening_operators = {"[", "{"}
closing_operators = {"]", "}"}
all_container_operators = opening_operators.union(closing_operators)
dotted = any("." in kw for kw in keywords)
for token in tokenize(
code,
jsx=True,
template_string=options.get("template_string", True),
dotted=dotted,
):
if ( # Turn keyword`foo` expressions into keyword("foo") calls:
funcname
and (last_token and last_token.type == "name") # have a keyword...
and token.type # we've seen nothing after the keyword...
== "template_string" # this is a template string
):
message_lineno = token.lineno
messages = [unquote_string(token.value)]
call_stack = 0
tree_level = 0
token = Token("operator", ")", token.lineno)
if token.type == "operator" and token.value == "(":
if funcname:
message_lineno = token.lineno
call_stack += 1
elif call_stack >= 0 and token.type == "operator" and token.value in all_container_operators:
if token.value in opening_operators:
tree_level += 1
if token.value in closing_operators:
tree_level -= 1
elif call_stack == -1 and token.type == "linecomment" or token.type == "multilinecomment":
pass # ignore comments
elif funcname and call_stack == 0:
if token.type == "operator" and token.value == ")":
if last_argument is not None:
messages.append(last_argument)
if len(messages) > 1:
messages = tuple(messages)
elif messages:
messages = messages[0]
else:
messages = None
if messages is not None:
yield (message_lineno, funcname, messages)
funcname = message_lineno = last_argument = None
concatenate_next = False
messages = []
call_stack = -1
tree_level = 0
elif token.type in ("string", "template_string"):
new_value = unquote_string(token.value)
if tree_level > 0:
pass
elif concatenate_next:
last_argument = (last_argument or "") + new_value
concatenate_next = False
else:
last_argument = new_value
elif token.type == "operator":
if token.value == ",":
if last_argument is not None:
messages.append(last_argument)
last_argument = None
else:
if tree_level == 0:
messages.append(None)
concatenate_next = False
elif token.value == "+":
concatenate_next = True
elif call_stack > 0 and token.type == "operator" and token.value == ")":
call_stack -= 1
tree_level = 0
elif funcname and call_stack == -1:
funcname = None
elif (
call_stack == -1
and token.type == "name"
and token.value in keywords
and (last_token is None or last_token.type != "name" or last_token.value != "function")
):
funcname = token.value
last_token = token
| def extract_javascript(code, keywords=("__",), options=None):
"""Extract messages from JavaScript source code.
This is a modified version of babel's JS parser. Reused under BSD license.
License: https://github.com/python-babel/babel/blob/master/LICENSE
Changes from upstream:
- Preserve arguments, babel's parser flattened all values in args,
we need order because we use different syntax for translation
which can contain 2nd arg which is array of many values. If
argument is non-primitive type then value is NOT returned in
args.
E.g. __("0", ["1", "2"], "3") -> ("0", None, "3")
- remove comments support
- changed signature to accept string directly.
:param code: code as string
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param options: a dictionary of additional options (optional)
Supported options are:
* `template_string` -- set to false to disable ES6
template string support.
"""
if options is None:
options = {}
funcname = message_lineno = None
messages = []
last_argument = None
concatenate_next = False
last_token = None
call_stack = -1
# Tree level = depth inside function call tree
# Example: __("0", ["1", "2"], "3")
# Depth __()
# / | \
# 0 "0" [...] "3" <- only 0th level strings matter
# / \
# 1 "1" "2"
tree_level = 0
opening_operators = {"[", "{"}
closing_operators = {"]", "}"}
all_container_operators = opening_operators.union(closing_operators)
dotted = any("." in kw for kw in keywords)
for token in tokenize(
code,
jsx=True,
template_string=options.get("template_string", True),
dotted=dotted,
):
if ( # Turn keyword`foo` expressions into keyword("foo") calls:
funcname
and (last_token and last_token.type == "name") # have a keyword...
and token.type # we've seen nothing after the keyword...
== "template_string" # this is a template string
):
message_lineno = token.lineno
messages = [unquote_string(token.value)]
call_stack = 0
tree_level = 0
token = Token("operator", ")", token.lineno)
if token.type == "operator" and token.value == "(":
if funcname:
message_lineno = token.lineno
call_stack += 1
elif call_stack >= 0 and token.type == "operator" and token.value in all_container_operators:
if token.value in opening_operators:
tree_level += 1
if token.value in closing_operators:
tree_level -= 1
elif call_stack == -1 and token.type == "linecomment" or token.type == "multilinecomment":
pass # ignore comments
elif funcname and call_stack == 0:
if token.type == "operator" and token.value == ")":
if last_argument is not None:
messages.append(last_argument)
if len(messages) > 1:
messages = tuple(messages)
elif messages:
messages = messages[0]
else:
messages = None
if messages is not None:
yield (message_lineno, funcname, messages)
funcname = message_lineno = last_argument = None
concatenate_next = False
messages = []
call_stack = -1
tree_level = 0
elif token.type in ("string", "template_string"):
new_value = unquote_string(token.value)
if tree_level > 0:
pass
elif concatenate_next:
last_argument = (last_argument or "") + new_value
concatenate_next = False
else:
last_argument = new_value
elif token.type == "operator":
if token.value == ",":
if last_argument is not None:
messages.append(last_argument)
last_argument = None
else:
if tree_level == 0:
messages.append(None)
concatenate_next = False
elif token.value == "+":
concatenate_next = True
elif call_stack > 0 and token.type == "operator" and token.value == ")":
call_stack -= 1
tree_level = 0
elif funcname and call_stack == -1:
funcname = None
elif (
call_stack == -1
and token.type == "name"
and token.value in keywords
and (last_token is None or last_token.type != "name" or last_token.value != "function")
):
funcname = token.value
last_token = token
|
32,256 | def get_mapping_fields_command() -> GetMappingFieldsResponse:
"""
Returns the list of fields for an incident type.
Args:
client: XSOAR client to use
Returns: Dictionary with keys as field names
"""
incident_type_scheme = SchemeTypeMapping(type_name="incident")
for field in GLPI_ARGS:
incident_type_scheme.add_field(field)
mapping_response = GetMappingFieldsResponse()
mapping_response.add_scheme_type(incident_type_scheme)
return mapping_response
| def get_mapping_fields_command() -> GetMappingFieldsResponse:
"""
Returns the list of fields for an incident type.
Args:
client: XSOAR client to use
Returns: Dictionary with keys as field names
"""
incident_type_scheme = SchemeTypeMapping(type_name="GLPI Ticket")
for field in GLPI_ARGS:
incident_type_scheme.add_field(field)
mapping_response = GetMappingFieldsResponse()
mapping_response.add_scheme_type(incident_type_scheme)
return mapping_response
|
9,080 | def _build_egg(egg, tarball, to_dir):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn("Extracting in %s", tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn("Now working in %s", subdir)
# building an egg
log.warn("Building a Setuptools egg in %s", to_dir)
_python_cmd("setup.py", "-q", "bdist_egg", "--dist-dir", to_dir)
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError("Could not build the egg.")
| def _build_egg(egg, tarball, to_dir):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn(f"Extracting in {tmpdir}")
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn("Now working in %s", subdir)
# building an egg
log.warn("Building a Setuptools egg in %s", to_dir)
_python_cmd("setup.py", "-q", "bdist_egg", "--dist-dir", to_dir)
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError("Could not build the egg.")
|
27,788 | def test_subclassing_node_with_item_warns() -> None:
with pytest.warns(
PytestWarning, match="SoWrong is a Item subclass and should not be a collector"
):
class SoWrong(nodes.Item, nodes.File):
pass
| def test_subclassing_both_item_and_collector_warns() -> None:
with pytest.warns(
PytestWarning, match="SoWrong is a Item subclass and should not be a collector"
):
class SoWrong(nodes.Item, nodes.File):
pass
|
13,258 | def deprecated_posargs(func: C) -> C:
"""Insert a `*__deprecated_posargs,` shim in place of the `*,` for kwonly args.
This turns out to be fairly tricky to get right with our preferred style of
error handling (exhaustive) and various function-rewriting wrappers.
"""
if READTHEDOCS: # pragma: no cover
# Documentation should show the new signatures without deprecation helpers.
return func
signature = inspect.signature(func)
parameters = list(signature.parameters.values())
vararg = inspect.Parameter(
name="__deprecated_posargs",
kind=inspect.Parameter.VAR_POSITIONAL,
annotation=Any,
)
# If we're passed any VAR_POSITIONAL args, we'll use this sequence of
# params to match them up with the correct KEYWORD_ONLY argument after
# checking that it has it's default value - if you're passing by name,
# can't have also been passing positionally before we deprecated that.
deprecated = []
for i, arg in enumerate(tuple(parameters)):
if arg.kind == inspect.Parameter.KEYWORD_ONLY:
if not deprecated:
parameters.insert(i, vararg)
deprecated.append(arg)
func.__signature__ = signature.replace(parameters=parameters)
@proxies(func)
def accept(*args, **kwargs):
bound = func.__signature__.bind_partial(*args, **kwargs)
bad_posargs = bound.arguments.pop("__deprecated_posargs", None) or ()
if len(bad_posargs) > len(deprecated):
# We have more positional arguments than the wrapped func has parameters,
# so there's no way this ever worked. We know that this bind will fail
# but attempting it will raise a nice descriptive TypeError.
signature.bind_partial(*args, **kwargs)
for param, pos in zip(deprecated, bad_posargs):
# Unfortunately, another layer of our function-wrapping logic passes in
# all the default arguments as explicit arguments. This means that if
# someone explicitly passes some value for a parameter as a positional
# argument and *the default value* as a keyword argument, we'll emit a
# deprecation warning but not an immediate error. Ah well...
if bound.arguments.get(param.name, param.default) != param.default:
raise TypeError(
"Cannot pass {name}={p} positionally and {name}={n} by name!".format(
name=param.name, p=pos, n=bound.arguments[param.name]
)
)
from hypothesis._settings import note_deprecation
note_deprecation(
"%s was passed %s=%r as a positional argument, which will be a "
"keyword-only argument in a future version."
% (qualname(func), param.name, pos),
since="RELEASEDAY",
)
bound.arguments[param.name] = pos
return func(*bound.args, **bound.kwargs)
# We use these in convert_positional_arguments, to ensure that the LazyStrategy
# repr of strategy objects look sensible (and will work without this shim).
accept.__deprecated_posargs = tuple(deprecated)
return accept
| def deprecated_posargs(func: C) -> C:
"""Insert a `*__deprecated_posargs,` shim in place of the `*,` for kwonly args.
This turns out to be fairly tricky to get right with our preferred style of
error handling (exhaustive) and various function-rewriting wrappers.
"""
if READTHEDOCS: # pragma: no cover
# Documentation should show the new signatures without deprecation helpers.
return func
signature = inspect.signature(func)
parameters = list(signature.parameters.values())
vararg = inspect.Parameter(
name="__deprecated_posargs",
kind=inspect.Parameter.VAR_POSITIONAL,
annotation=Any,
)
# If we're passed any VAR_POSITIONAL args, we'll use this sequence of
# params to match them up with the correct KEYWORD_ONLY argument after
# checking that it has its default value - if you're passing by name,
# can't have also been passing positionally before we deprecated that.
deprecated = []
for i, arg in enumerate(tuple(parameters)):
if arg.kind == inspect.Parameter.KEYWORD_ONLY:
if not deprecated:
parameters.insert(i, vararg)
deprecated.append(arg)
func.__signature__ = signature.replace(parameters=parameters)
@proxies(func)
def accept(*args, **kwargs):
bound = func.__signature__.bind_partial(*args, **kwargs)
bad_posargs = bound.arguments.pop("__deprecated_posargs", None) or ()
if len(bad_posargs) > len(deprecated):
# We have more positional arguments than the wrapped func has parameters,
# so there's no way this ever worked. We know that this bind will fail
# but attempting it will raise a nice descriptive TypeError.
signature.bind_partial(*args, **kwargs)
for param, pos in zip(deprecated, bad_posargs):
# Unfortunately, another layer of our function-wrapping logic passes in
# all the default arguments as explicit arguments. This means that if
# someone explicitly passes some value for a parameter as a positional
# argument and *the default value* as a keyword argument, we'll emit a
# deprecation warning but not an immediate error. Ah well...
if bound.arguments.get(param.name, param.default) != param.default:
raise TypeError(
"Cannot pass {name}={p} positionally and {name}={n} by name!".format(
name=param.name, p=pos, n=bound.arguments[param.name]
)
)
from hypothesis._settings import note_deprecation
note_deprecation(
"%s was passed %s=%r as a positional argument, which will be a "
"keyword-only argument in a future version."
% (qualname(func), param.name, pos),
since="RELEASEDAY",
)
bound.arguments[param.name] = pos
return func(*bound.args, **bound.kwargs)
# We use these in convert_positional_arguments, to ensure that the LazyStrategy
# repr of strategy objects look sensible (and will work without this shim).
accept.__deprecated_posargs = tuple(deprecated)
return accept
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.