id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
45,191 | def test_drop_duplicates():
frame_data = {
"A": list(range(3)) * 2,
"B": list(range(1, 4)) * 2,
"C": list(range(6)),
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data) # noqa F841
df_equals(
modin_df.drop_duplicates(subset=["A", "B"], keep="first", inplace=False),
pandas_df.drop_duplicates(subset=["A", "B"], keep="first", inplace=False),
)
df_equals(
modin_df.drop_duplicates(subset=["A", "B"], keep="last", inplace=False),
pandas_df.drop_duplicates(subset=["A", "B"], keep="last", inplace=False),
)
df_equals(
modin_df.drop_duplicates(subset=["A", "B"], keep=False, inplace=False),
pandas_df.drop_duplicates(subset=["A", "B"], keep=False, inplace=False),
)
df_equals(modin_df.drop_duplicates(inplace=False), pandas_df)
modin_df.drop_duplicates(subset=["A", "B"], inplace=True)
df_equals(modin_df, pandas_df.drop_duplicates(subset=["A", "B"], inplace=False))
modin_df = pd.DataFrame(frame_data)
modin_df.drop_duplicates(subset=["A", "B"], keep=False, inplace=True)
df_equals(modin_df, pandas.DataFrame({"A": [], "B": [], "C": []}))
| def test_drop_duplicates():
frame_data = {
"A": list(range(3)) * 2,
"B": list(range(1, 4)) * 2,
"C": list(range(6)),
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
df_equals(
modin_df.drop_duplicates(subset=["A", "B"], keep="first", inplace=False),
pandas_df.drop_duplicates(subset=["A", "B"], keep="first", inplace=False),
)
df_equals(
modin_df.drop_duplicates(subset=["A", "B"], keep="last", inplace=False),
pandas_df.drop_duplicates(subset=["A", "B"], keep="last", inplace=False),
)
df_equals(
modin_df.drop_duplicates(subset=["A", "B"], keep=False, inplace=False),
pandas_df.drop_duplicates(subset=["A", "B"], keep=False, inplace=False),
)
df_equals(modin_df.drop_duplicates(inplace=False), pandas_df)
modin_df.drop_duplicates(subset=["A", "B"], inplace=True)
df_equals(modin_df, pandas_df.drop_duplicates(subset=["A", "B"], inplace=False))
modin_df = pd.DataFrame(frame_data)
modin_df.drop_duplicates(subset=["A", "B"], keep=False, inplace=True)
df_equals(modin_df, pandas.DataFrame({"A": [], "B": [], "C": []}))
|
9,363 | def test_wrap_var_set():
assert not isinstance(wrap_var(set(['foo'])), AnsibleUnsafe)
for item in wrap_var(set(['foo'])):
assert isinstance(item, AnsibleUnsafe)
| def test_wrap_var_set():
assert isinstance(wrap_var(set(['foo'])), set)
for item in wrap_var(set(['foo'])):
assert isinstance(item, AnsibleUnsafe)
|
24,704 | def _declare_qos_parameteres(
entity_type: Union[Type[Publisher], Type[Subscription]],
node: 'Node',
topic_name: Text,
qos: QoSProfile,
options: QoSOverridingOptions
) -> QoSProfile:
"""
Declare qos parameters for a Publisher or a Subscription.
:param entity_type: Either `rclpy.node.Publisher` or `rclpy.node.Subscription`.
:param node: Node used to declare the parameters.
:param topic_name: Topic name of the entity being created.
:param qos: Default qos settings of the entity being created, that will be overriden
with the user provided qos parameter overrides.
:param options: Options that indicates which parameters are going to be declared.
"""
if not issubclass(entity_type, (Publisher, Subscription)):
raise TypeError('Argument `entity_type` should be a subclass of Publisher or Subscription')
entity_type_str = 'publisher' if issubclass(entity_type, Publisher) else Subscription
id_suffix = '' if options.entity_id is None else f'_{options.entity_id}'
name = f'qos_overrides.{topic_name}.{entity_type_str}{id_suffix}.' '{}'
description = '{}' f' for {entity_type_str} `{topic_name}` with id `{options.entity_id}`'
allowed_policies = _get_allowed_policies(entity_type)
for policy in options.policy_kinds:
if policy not in allowed_policies:
continue
policy_name = policy.name.lower()
descriptor = ParameterDescriptor()
descriptor.description = description.format(policy_name)
descriptor.read_only = True
param = node.declare_parameter(
name.format(policy_name),
_get_qos_policy_parameter(qos, policy),
descriptor)
_override_qos_policy_with_param(qos, policy, param)
if options.callback is not None and not options.callback(qos):
raise InvalidQosOverridesError(
description.format('Provided qos overrides') + ', are not valid')
| def _declare_qos_parameters(
entity_type: Union[Type[Publisher], Type[Subscription]],
node: 'Node',
topic_name: Text,
qos: QoSProfile,
options: QoSOverridingOptions
) -> QoSProfile:
"""
Declare qos parameters for a Publisher or a Subscription.
:param entity_type: Either `rclpy.node.Publisher` or `rclpy.node.Subscription`.
:param node: Node used to declare the parameters.
:param topic_name: Topic name of the entity being created.
:param qos: Default qos settings of the entity being created, that will be overriden
with the user provided qos parameter overrides.
:param options: Options that indicates which parameters are going to be declared.
"""
if not issubclass(entity_type, (Publisher, Subscription)):
raise TypeError('Argument `entity_type` should be a subclass of Publisher or Subscription')
entity_type_str = 'publisher' if issubclass(entity_type, Publisher) else Subscription
id_suffix = '' if options.entity_id is None else f'_{options.entity_id}'
name = f'qos_overrides.{topic_name}.{entity_type_str}{id_suffix}.' '{}'
description = '{}' f' for {entity_type_str} `{topic_name}` with id `{options.entity_id}`'
allowed_policies = _get_allowed_policies(entity_type)
for policy in options.policy_kinds:
if policy not in allowed_policies:
continue
policy_name = policy.name.lower()
descriptor = ParameterDescriptor()
descriptor.description = description.format(policy_name)
descriptor.read_only = True
param = node.declare_parameter(
name.format(policy_name),
_get_qos_policy_parameter(qos, policy),
descriptor)
_override_qos_policy_with_param(qos, policy, param)
if options.callback is not None and not options.callback(qos):
raise InvalidQosOverridesError(
description.format('Provided qos overrides') + ', are not valid')
|
3,123 | def test_win_type_freq_return_deprecation():
freq_roll = Series(range(2), index=date_range("2020", periods=2)).rolling("2s")
with tm.assert_produces_warning(FutureWarning):
freq_roll.win_type
| def test_win_type_freq_return_deprecation():
freq_roll = Series(range(2), index=date_range("2020", periods=2)).rolling("2s")
with tm.assert_produces_warning(FutureWarning):
assert freq_roll.win_type == "freq"
|
19,831 | def populate_counts(sf, schema, objs_cached, logger):
objects_to_count = [objname for objname in objs_cached]
counts, transports_errors, salesforce_errors = count_sobjects(sf, objects_to_count)
errors = transports_errors + salesforce_errors
for error in errors[0:10]:
logger.warning(f"Error counting SObjects: {error}")
if len(errors) > 10:
logger.warning(f"{len(errors)} more counting errors surpressed")
for objname, count in counts.items():
schema[objname].count = count
schema.session.flush()
return counts.items()
| def populate_counts(sf, schema, objs_cached, logger):
objects_to_count = [objname for objname in objs_cached]
counts, transports_errors, salesforce_errors = count_sobjects(sf, objects_to_count)
errors = transports_errors + salesforce_errors
for error in errors[0:10]:
logger.warning(f"Error counting SObjects: {error}")
if len(errors) > 10:
logger.warning(f"{len(errors)} more counting errors suppressed")
for objname, count in counts.items():
schema[objname].count = count
schema.session.flush()
return counts.items()
|
31,982 | def main():
install_logging('Prepare_Content_Packs_For_Testing.log', logger=logging)
option = option_handler()
packs_artifacts_path = option.packs_artifacts_path
id_set_path = option.id_set_path
extract_destination_path = option.extract_path
storage_bucket_name = option.bucket_name
service_account = option.service_account
target_packs = option.pack_names if option.pack_names else ""
build_number = option.ci_build_number if option.ci_build_number else str(uuid.uuid4())
override_all_packs = option.override_all_packs
signature_key = option.key_string
packs_dependencies_mapping = load_json(option.pack_dependencies) if option.pack_dependencies else {}
storage_base_path = option.storage_base_path
remove_test_playbooks = option.remove_test_playbooks
is_bucket_upload_flow = option.bucket_upload
private_bucket_name = option.private_bucket_name
ci_branch = option.ci_branch
force_upload = option.force_upload
marketplace = option.marketplace
is_create_dependencies_zip = option.create_dependencies_zip
# google cloud storage client initialized
storage_client = init_storage_client(service_account)
storage_bucket = storage_client.bucket(storage_bucket_name)
# Relevant when triggering test upload flow
if storage_bucket_name:
GCPConfig.PRODUCTION_BUCKET = storage_bucket_name
# download and extract index from public bucket
index_folder_path, index_blob, index_generation = download_and_extract_index(storage_bucket,
extract_destination_path,
storage_base_path)
# content repo client initialized
content_repo = get_content_git_client(CONTENT_ROOT_PATH)
current_commit_hash, previous_commit_hash = get_recent_commits_data(content_repo, index_folder_path,
is_bucket_upload_flow, ci_branch)
# detect packs to upload
pack_names = get_packs_names(target_packs, previous_commit_hash)
extract_packs_artifacts(packs_artifacts_path, extract_destination_path)
packs_list = [Pack(pack_name, os.path.join(extract_destination_path, pack_name), marketplace) for pack_name in pack_names
if os.path.exists(os.path.join(extract_destination_path, pack_name))]
diff_files_list = content_repo.commit(current_commit_hash).diff(content_repo.commit(previous_commit_hash))
# taking care of private packs
is_private_content_updated, private_packs, updated_private_packs_ids = handle_private_content(
index_folder_path, private_bucket_name, extract_destination_path, storage_client, pack_names, storage_base_path
)
if not option.override_all_packs:
check_if_index_is_updated(index_folder_path, content_repo, current_commit_hash, previous_commit_hash,
storage_bucket, is_private_content_updated)
# initiate the statistics handler for marketplace packs
statistics_handler = StatisticsHandler(service_account, index_folder_path)
# clean index and gcs from non existing or invalid packs
clean_non_existing_packs(index_folder_path, private_packs, storage_bucket, storage_base_path)
# Packages that depend on new packs that are not in the previous index.json
packs_missing_dependencies = []
# starting iteration over packs
for pack in packs_list:
if not prepare_and_zip_pack(pack, signature_key, remove_test_playbooks):
continue
task_status = pack.upload_integration_images(storage_bucket, storage_base_path, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_IMAGES_UPLOAD.name
pack.cleanup()
continue
task_status = pack.upload_author_image(storage_bucket, storage_base_path, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_AUTHOR_IMAGE_UPLOAD.name
pack.cleanup()
continue
task_status, modified_rn_files_paths, pack_was_modified = pack.detect_modified(
content_repo, index_folder_path, current_commit_hash, previous_commit_hash)
if not task_status:
pack.status = PackStatus.FAILED_DETECTING_MODIFIED_FILES.name
pack.cleanup()
continue
task_status, is_missing_dependencies = pack.format_metadata(index_folder_path,
packs_dependencies_mapping, build_number,
current_commit_hash, pack_was_modified,
statistics_handler, pack_names)
if is_missing_dependencies:
# If the pack is dependent on a new pack
# (which is not yet in the index.zip as it might not have been iterated yet)
# we will note that it is missing dependencies.
# And finally after updating all the packages in index.zip - i.e. the new pack exists now.
# We will go over the pack again to add what was missing.
# See issue #37290
packs_missing_dependencies.append(pack)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_PARSING.name
pack.cleanup()
continue
task_status, not_updated_build = pack.prepare_release_notes(index_folder_path, build_number, pack_was_modified,
modified_rn_files_paths)
if not task_status:
pack.status = PackStatus.FAILED_RELEASE_NOTES.name
pack.cleanup()
continue
if not_updated_build:
pack.status = PackStatus.PACK_IS_NOT_UPDATED_IN_RUNNING_BUILD.name
pack.cleanup()
continue
task_status, skipped_upload, _ = pack.upload_to_storage(pack.zip_path, pack.latest_version, storage_bucket,
override_all_packs or pack_was_modified,
storage_base_path)
if not task_status:
pack.status = PackStatus.FAILED_UPLOADING_PACK.name
pack.cleanup()
continue
task_status, exists_in_index = pack.check_if_exists_in_index(index_folder_path)
if not task_status:
pack.status = PackStatus.FAILED_SEARCHING_PACK_IN_INDEX.name
pack.cleanup()
continue
task_status = pack.prepare_for_index_upload()
if not task_status:
pack.status = PackStatus.FAILED_PREPARING_INDEX_FOLDER.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
# in case that pack already exist at cloud storage path and in index, don't show that the pack was changed
if skipped_upload and exists_in_index and pack not in packs_missing_dependencies:
pack.status = PackStatus.PACK_ALREADY_EXISTS.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
logging.info(f"packs_missing_dependencies: {packs_missing_dependencies}")
# Going over all packs that were marked as missing dependencies,
# updating them with the new data for the new packs that were added to the index.zip
for pack in packs_missing_dependencies:
task_status, _ = pack.format_metadata(index_folder_path, packs_dependencies_mapping,
build_number, current_commit_hash, False, statistics_handler,
pack_names, format_dependencies_only=True)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_REFORMATING.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
# upload core packs json to bucket
create_corepacks_config(storage_bucket, build_number, index_folder_path,
os.path.dirname(packs_artifacts_path), storage_base_path, marketplace)
# finished iteration over content packs
upload_index_to_storage(index_folder_path=index_folder_path, extract_destination_path=extract_destination_path,
index_blob=index_blob, build_number=build_number, private_packs=private_packs,
current_commit_hash=current_commit_hash, index_generation=index_generation,
force_upload=force_upload, previous_commit_hash=previous_commit_hash,
landing_page_sections=statistics_handler.landing_page_sections,
artifacts_dir=os.path.dirname(packs_artifacts_path),
storage_bucket=storage_bucket,
)
# get the lists of packs divided by their status
successful_packs, skipped_packs, failed_packs = get_packs_summary(packs_list)
# Store successful and failed packs list in CircleCI artifacts - to be used in Upload Packs To Marketplace job
packs_results_file_path = os.path.join(os.path.dirname(packs_artifacts_path), BucketUploadFlow.PACKS_RESULTS_FILE)
store_successful_and_failed_packs_in_ci_artifacts(
packs_results_file_path, BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING, successful_packs, failed_packs,
updated_private_packs_ids, images_data=get_images_data(packs_list)
)
# summary of packs status
print_packs_summary(successful_packs, skipped_packs, failed_packs, not is_bucket_upload_flow)
if is_create_dependencies_zip and is_create_dependencies_zip != 'false' and marketplace == 'xsoar':
# handle packs with dependencies zip
upload_packs_with_dependencies_zip(extract_destination_path, packs_dependencies_mapping, signature_key,
storage_bucket, storage_base_path, id_set_path, packs_list, marketplace)
| def main():
install_logging('Prepare_Content_Packs_For_Testing.log', logger=logging)
option = option_handler()
packs_artifacts_path = option.packs_artifacts_path
id_set_path = option.id_set_path
extract_destination_path = option.extract_path
storage_bucket_name = option.bucket_name
service_account = option.service_account
target_packs = option.pack_names if option.pack_names else ""
build_number = option.ci_build_number if option.ci_build_number else str(uuid.uuid4())
override_all_packs = option.override_all_packs
signature_key = option.key_string
packs_dependencies_mapping = load_json(option.pack_dependencies) if option.pack_dependencies else {}
storage_base_path = option.storage_base_path
remove_test_playbooks = option.remove_test_playbooks
is_bucket_upload_flow = option.bucket_upload
private_bucket_name = option.private_bucket_name
ci_branch = option.ci_branch
force_upload = option.force_upload
marketplace = option.marketplace
is_create_dependencies_zip = option.create_dependencies_zip
# google cloud storage client initialized
storage_client = init_storage_client(service_account)
storage_bucket = storage_client.bucket(storage_bucket_name)
# Relevant when triggering test upload flow
if storage_bucket_name:
GCPConfig.PRODUCTION_BUCKET = storage_bucket_name
# download and extract index from public bucket
index_folder_path, index_blob, index_generation = download_and_extract_index(storage_bucket,
extract_destination_path,
storage_base_path)
# content repo client initialized
content_repo = get_content_git_client(CONTENT_ROOT_PATH)
current_commit_hash, previous_commit_hash = get_recent_commits_data(content_repo, index_folder_path,
is_bucket_upload_flow, ci_branch)
# detect packs to upload
pack_names = get_packs_names(target_packs, previous_commit_hash)
extract_packs_artifacts(packs_artifacts_path, extract_destination_path)
packs_list = [Pack(pack_name, os.path.join(extract_destination_path, pack_name), marketplace) for pack_name in pack_names
if os.path.exists(os.path.join(extract_destination_path, pack_name))]
diff_files_list = content_repo.commit(current_commit_hash).diff(content_repo.commit(previous_commit_hash))
# taking care of private packs
is_private_content_updated, private_packs, updated_private_packs_ids = handle_private_content(
index_folder_path, private_bucket_name, extract_destination_path, storage_client, pack_names, storage_base_path
)
if not option.override_all_packs:
check_if_index_is_updated(index_folder_path, content_repo, current_commit_hash, previous_commit_hash,
storage_bucket, is_private_content_updated)
# initiate the statistics handler for marketplace packs
statistics_handler = StatisticsHandler(service_account, index_folder_path)
# clean index and gcs from non existing or invalid packs
clean_non_existing_packs(index_folder_path, private_packs, storage_bucket, storage_base_path)
# Packages that depend on new packs that are not in the previous index.json
packs_missing_dependencies = []
# starting iteration over packs
for pack in packs_list:
if not prepare_and_zip_pack(pack, signature_key, remove_test_playbooks):
continue
task_status = pack.upload_integration_images(storage_bucket, storage_base_path, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_IMAGES_UPLOAD.name
pack.cleanup()
continue
task_status = pack.upload_author_image(storage_bucket, storage_base_path, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_AUTHOR_IMAGE_UPLOAD.name
pack.cleanup()
continue
task_status, modified_rn_files_paths, pack_was_modified = pack.detect_modified(
content_repo, index_folder_path, current_commit_hash, previous_commit_hash)
if not task_status:
pack.status = PackStatus.FAILED_DETECTING_MODIFIED_FILES.name
pack.cleanup()
continue
task_status, is_missing_dependencies = pack.format_metadata(index_folder_path,
packs_dependencies_mapping, build_number,
current_commit_hash, pack_was_modified,
statistics_handler, pack_names)
if is_missing_dependencies:
# If the pack is dependent on a new pack
# (which is not yet in the index.zip as it might not have been iterated yet)
# we will note that it is missing dependencies.
# And finally after updating all the packages in index.zip - i.e. the new pack exists now.
# We will go over the pack again to add what was missing.
# See issue #37290
packs_missing_dependencies.append(pack)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_PARSING.name
pack.cleanup()
continue
task_status, not_updated_build = pack.prepare_release_notes(index_folder_path, build_number, pack_was_modified,
modified_rn_files_paths)
if not task_status:
pack.status = PackStatus.FAILED_RELEASE_NOTES.name
pack.cleanup()
continue
if not_updated_build:
pack.status = PackStatus.PACK_IS_NOT_UPDATED_IN_RUNNING_BUILD.name
pack.cleanup()
continue
task_status, skipped_upload, _ = pack.upload_to_storage(pack.zip_path, pack.latest_version, storage_bucket,
override_all_packs or pack_was_modified,
storage_base_path)
if not task_status:
pack.status = PackStatus.FAILED_UPLOADING_PACK.name
pack.cleanup()
continue
task_status, exists_in_index = pack.check_if_exists_in_index(index_folder_path)
if not task_status:
pack.status = PackStatus.FAILED_SEARCHING_PACK_IN_INDEX.name
pack.cleanup()
continue
task_status = pack.prepare_for_index_upload()
if not task_status:
pack.status = PackStatus.FAILED_PREPARING_INDEX_FOLDER.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
# in case that pack already exist at cloud storage path and in index, don't show that the pack was changed
if skipped_upload and exists_in_index and pack not in packs_missing_dependencies:
pack.status = PackStatus.PACK_ALREADY_EXISTS.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
logging.info(f"packs_missing_dependencies: {packs_missing_dependencies}")
# Going over all packs that were marked as missing dependencies,
# updating them with the new data for the new packs that were added to the index.zip
for pack in packs_missing_dependencies:
task_status, _ = pack.format_metadata(index_folder_path, packs_dependencies_mapping,
build_number, current_commit_hash, False, statistics_handler,
pack_names, format_dependencies_only=True)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_REFORMATING.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
# upload core packs json to bucket
create_corepacks_config(storage_bucket, build_number, index_folder_path,
os.path.dirname(packs_artifacts_path), storage_base_path, marketplace)
# finished iteration over content packs
upload_index_to_storage(index_folder_path=index_folder_path, extract_destination_path=extract_destination_path,
index_blob=index_blob, build_number=build_number, private_packs=private_packs,
current_commit_hash=current_commit_hash, index_generation=index_generation,
force_upload=force_upload, previous_commit_hash=previous_commit_hash,
landing_page_sections=statistics_handler.landing_page_sections,
artifacts_dir=os.path.dirname(packs_artifacts_path),
storage_bucket=storage_bucket,
)
# get the lists of packs divided by their status
successful_packs, skipped_packs, failed_packs = get_packs_summary(packs_list)
# Store successful and failed packs list in CircleCI artifacts - to be used in Upload Packs To Marketplace job
packs_results_file_path = os.path.join(os.path.dirname(packs_artifacts_path), BucketUploadFlow.PACKS_RESULTS_FILE)
store_successful_and_failed_packs_in_ci_artifacts(
packs_results_file_path, BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING, successful_packs, failed_packs,
updated_private_packs_ids, images_data=get_images_data(packs_list)
)
# summary of packs status
print_packs_summary(successful_packs, skipped_packs, failed_packs, not is_bucket_upload_flow)
if is_create_dependencies_zip and marketplace == 'xsoar':
# handle packs with dependencies zip
upload_packs_with_dependencies_zip(extract_destination_path, packs_dependencies_mapping, signature_key,
storage_bucket, storage_base_path, id_set_path, packs_list, marketplace)
|
8,665 | def configure(config):
config.define_section('currency', CurrencySection, validate=False)
config.currency.configure_setting('fixer_io_key', 'API key for fixer IO. Leave blank to use exchangeratesapi.io:')
config.currency.configure_setting('enable_regex', 'automatically respond to regex matches:')
| def configure(config):
config.define_section('currency', CurrencySection, validate=False)
config.currency.configure_setting('fixer_io_key', 'API key for fixer IO. Leave blank to use exchangeratesapi.io:')
config.currency.configure_setting('enable_regex', 'Automatically respond to regex matches?')
|
20,224 | def process_missing(missing_ids):
"""Create missing school and alias objects and dump csv of additions. """
csv_out_data = []
csv_slug = '{}/schools_added_on_{}.csv'.format(ipeds_directory,
datetime.date.today())
missing_data = process_datafiles(add_schools=missing_ids)
for school_id in missing_data:
create_school(int(school_id), missing_data[school_id])
data_row = missing_data[school_id]
data_row['ID'] = school_id
csv_out_data.append(data_row)
header = sorted(csv_out_data[0].keys())
dump_csv(csv_slug, header, csv_out_data)
| def process_missing(missing_ids):
"""Create missing school and alias objects and dump csv of additions."""
csv_out_data = []
csv_slug = '{}/schools_added_on_{}.csv'.format(ipeds_directory,
datetime.date.today())
missing_data = process_datafiles(add_schools=missing_ids)
for school_id in missing_data:
create_school(int(school_id), missing_data[school_id])
data_row = missing_data[school_id]
data_row['ID'] = school_id
csv_out_data.append(data_row)
header = sorted(csv_out_data[0].keys())
dump_csv(csv_slug, header, csv_out_data)
|
30,938 | def write_data(sheet, data_item, data_headers, workbook, bold, border):
if not isinstance(data_item, list):
data_item = [data_item]
if not data_headers:
data_headers = list(data_item[0].keys())
worksheet = workbook.add_worksheet(sheet)
row = 0
col = 0
for key in data_headers:
worksheet.write(row, col, key, bold)
col += 1
for item in data_item:
if len(item) > 0:
col = 0
row += 1
for value in data_headers:
if item.get(value):
worksheet.write(row, col, item.get(value), border)
col += 1
else:
raise ValueError(f'The header "{value}" does not exist in the given data item.')
| def write_data(sheet, data_item, data_headers, workbook, bold, border):
if not isinstance(data_item, list):
data_item = [data_item]
if not data_headers:
data_headers = list(data_item[0].keys())
worksheet = workbook.add_worksheet(sheet)
row = 0
col = 0
for key in data_headers:
worksheet.write(row, col, key, bold)
col += 1
for item in data_item:
if item:
col = 0
row += 1
for value in data_headers:
if item.get(value):
worksheet.write(row, col, item.get(value), border)
col += 1
else:
raise ValueError(f'The header "{value}" does not exist in the given data item.')
|
31,006 | def get_pack_dir(branch: str, pr_number: str, repo: str) -> List[str]:
"""
Get a packs dir names from a contribution pull request changed files
Args:
branch: The contrib branch
pr_number: The contrib PR
repo: The contrib repo
Returns:
A list of packs dir names, if found.
"""
page = 1
list_packs_dir_names = []
while True:
response = requests.get(f'https://api.github.com/repos/demisto/content/pulls/{pr_number}/files',
params={'page': str(page)})
response.raise_for_status()
files = response.json()
if not files:
break
for pr_file in files:
if pr_file['filename'].startswith('Packs/'):
pack_dir_name = pr_file['filename'].split('/')[1]
if pack_dir_name not in list_packs_dir_names:
list_packs_dir_names.append(pack_dir_name)
page += 1
return list_packs_dir_names
| def get_pack_dir(branch: str, pr_number: str, repo: str) -> List[str]:
"""
Get packs dir names from a contribution pull request changed files
Args:
branch: The contrib branch
pr_number: The contrib PR
repo: The contrib repo
Returns:
A list of packs dir names, if found.
"""
page = 1
list_packs_dir_names = []
while True:
response = requests.get(f'https://api.github.com/repos/demisto/content/pulls/{pr_number}/files',
params={'page': str(page)})
response.raise_for_status()
files = response.json()
if not files:
break
for pr_file in files:
if pr_file['filename'].startswith('Packs/'):
pack_dir_name = pr_file['filename'].split('/')[1]
if pack_dir_name not in list_packs_dir_names:
list_packs_dir_names.append(pack_dir_name)
page += 1
return list_packs_dir_names
|
44,408 | def states_to_numbers(hilbert: DiscreteHilbert, σ: Array) -> Array:
"""
Converts the configuration σ to a 64-bit integer labelling the Hilbert Space.
.. Note::
Requires jax >= 0.3.17 and will crash on older versions.
Args:
hilbert: The Hilbert space
σ: A single or a batch of configurations
Returns:
a single integer or a batch of integer indices.
"""
if module_version("jax") < (0, 3, 17):
raise RuntimeError(
"The jitted conversion of bit-strings to hilbert numbers"
"is only supported with jax.__version__ >= 0.3.17, but you "
f"have {module_version('jax')}"
)
if not hilbert.is_indexable:
raise ValueError(
f"Hilbert space {hilbert} is too large to be indexed or "
f"cannot be indexed at all."
)
# calls back into python
return jax.pure_callback(
hilbert.states_to_numbers,
jax.ShapeDtypeStruct(σ.shape[:-1], jnp.int64),
σ,
vectorized=True,
)
| def states_to_numbers(hilbert: DiscreteHilbert, σ: Array) -> Array:
"""
Converts the configuration σ to a 64-bit integer labelling the Hilbert Space.
.. Note::
Requires jax >= 0.3.17 and will raise an exception on older versions.
Args:
hilbert: The Hilbert space
σ: A single or a batch of configurations
Returns:
a single integer or a batch of integer indices.
"""
if module_version("jax") < (0, 3, 17):
raise RuntimeError(
"The jitted conversion of bit-strings to hilbert numbers"
"is only supported with jax.__version__ >= 0.3.17, but you "
f"have {module_version('jax')}"
)
if not hilbert.is_indexable:
raise ValueError(
f"Hilbert space {hilbert} is too large to be indexed or "
f"cannot be indexed at all."
)
# calls back into python
return jax.pure_callback(
hilbert.states_to_numbers,
jax.ShapeDtypeStruct(σ.shape[:-1], jnp.int64),
σ,
vectorized=True,
)
|
6,606 | def get_or_make_bin(item_code, warehouse):
bin_record = frappe.db.get_value('Bin', {'item_code': item_code, 'warehouse': warehouse})
if not bin_record:
bin_obj = frappe.get_doc({
"doctype": "Bin",
"item_code": item_code,
"warehouse": warehouse,
})
bin_obj.flags.ignore_permissions = 1
bin_obj.insert()
bin_record = bin_obj.name
return bin_record
| def get_or_make_bin(item_code, warehouse) -> str:
bin_record = frappe.db.get_value('Bin', {'item_code': item_code, 'warehouse': warehouse})
if not bin_record:
bin_obj = frappe.get_doc({
"doctype": "Bin",
"item_code": item_code,
"warehouse": warehouse,
})
bin_obj.flags.ignore_permissions = 1
bin_obj.insert()
bin_record = bin_obj.name
return bin_record
|
13,911 | def _find_excluded_ranges(
lines: List[Tuple[int, str]],
*,
warnings: _ExclusionRangeWarnings,
exclude_lines_by_pattern: Optional[str] = None,
exclude_branches_by_pattern: Optional[str] = None,
exclude_pattern_prefix: str,
) -> Callable[[int], bool]:
"""
Scan through all lines to find line ranges and branch ranges covered by exclusion markers.
Example:
>>> lines = [(11, '//PREFIX_EXCL_LINE'), (13, '//IGNORE_LINE'), (15, '//PREFIX_EXCL_START'), (18, '//PREFIX_EXCL_STOP'),
... (21, '//PREFIX_EXCL_BR_LINE'), (23, '//IGNORE_BR'), (25, '//PREFIX_EXCL_BR_START'), (28, '//PREFIX_EXCL_BR_STOP')]
>>> [exclude_line, exclude_branch] = _find_excluded_ranges(
... lines, warnings=..., exclude_lines_by_pattern = '.*IGNORE_LINE',
... exclude_branches_by_pattern = '.*IGNORE_BR', exclude_pattern_prefix='PREFIX')
>>> [lineno for lineno in range(30) if exclude_line(lineno)]
[11, 13, 15, 16, 17]
>>> [lineno for lineno in range(30) if exclude_branch(lineno)]
[21, 23, 25, 26, 27]
"""
exclude_lines_by_pattern_regex = None
if exclude_lines_by_pattern:
exclude_lines_by_pattern_regex = re.compile(exclude_lines_by_pattern)
exclude_branches_by_pattern_regex = None
if exclude_branches_by_pattern:
exclude_branches_by_pattern_regex = re.compile(exclude_branches_by_pattern)
# possibly overlapping half-open ranges that are excluded
exclude_line_ranges: List[Tuple[int, int]] = []
exclude_branch_ranges: List[Tuple[int, int]] = []
exclusion_stack_line = []
exclusion_stack_branch = []
for lineno, code in lines:
if _EXCLUDE_FLAG in code:
# process the exclusion marker
#
# header is a marker name like LCOV or GCOVR
#
# START flags are added to the exlusion stack
# STOP flags remove a marker from the exclusion stack
# line exclusion
excl_line_pattern = re.compile(
"(" + exclude_pattern_prefix + ")" + _EXCLUDE_LINE_PATTERN_POSTFIX
)
for header, flag in excl_line_pattern.findall(code):
if flag == "LINE":
if exclusion_stack_line:
warnings.line_after_start(
lineno, f"{header}_EXCL_LINE", exclusion_stack_line[-1][1]
)
else:
exclude_line_ranges.append((lineno, lineno + 1))
if flag == "START":
exclusion_stack_line.append((header, lineno))
elif flag == "STOP":
if not exclusion_stack_line:
warnings.stop_without_start(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
continue
start_header, start_lineno = exclusion_stack_line.pop()
if header != start_header:
warnings.mismatched_start_stop(
start_lineno,
f"{start_header}_EXCL_START",
lineno,
f"{header}_EXCL_STOP",
)
exclude_line_ranges.append((start_lineno, lineno))
else: # pragma: no cover
pass
# branche exclusion
excl_branch_pattern = re.compile(
"(" + exclude_pattern_prefix + ")" + _EXCLUDE_BRANCH_PATTERN_POSTFIX
)
for header, flag in excl_branch_pattern.findall(code):
if flag == "LINE":
if exclusion_stack_branch:
warnings.branch_after_start(
lineno, f"{header}_EXCL_LINE", exclusion_stack_branch[-1][1]
)
else:
exclude_branch_ranges.append((lineno, lineno + 1))
if flag == "START":
exclusion_stack_branch.append((header, lineno))
elif flag == "STOP":
if not exclusion_stack_branch:
warnings.stop_without_start(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
continue
start_header, start_lineno = exclusion_stack_branch.pop()
if header != start_header:
warnings.mismatched_start_stop(
start_lineno,
f"{start_header}_EXCL_START",
lineno,
f"{header}_EXCL_STOP",
)
exclude_branch_ranges.append((start_lineno, lineno))
else: # pragma: no cover
pass
if exclude_lines_by_pattern_regex:
if exclude_lines_by_pattern_regex.match(code):
exclude_line_ranges.append((lineno, lineno + 1))
if exclude_branches_by_pattern_regex:
if exclude_branches_by_pattern_regex.match(code):
exclude_branch_ranges.append((lineno, lineno + 1))
for header, lineno in exclusion_stack_line:
warnings.start_without_stop(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
for header, lineno in exclusion_stack_branch:
warnings.start_without_stop(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
return [
_make_is_in_any_range(exclude_line_ranges),
_make_is_in_any_range(exclude_branch_ranges),
]
| def _find_excluded_ranges(
lines: List[Tuple[int, str]],
*,
warnings: _ExclusionRangeWarnings,
exclude_lines_by_pattern: Optional[str] = None,
exclude_branches_by_pattern: Optional[str] = None,
exclude_pattern_prefix: str,
) -> Callable[[int], bool]:
"""
Scan through all lines to find line ranges and branch ranges covered by exclusion markers.
Example:
>>> lines = [(11, '//PREFIX_EXCL_LINE'), (13, '//IGNORE_LINE'), (15, '//PREFIX_EXCL_START'), (18, '//PREFIX_EXCL_STOP'),
... (21, '//PREFIX_EXCL_BR_LINE'), (23, '//IGNORE_BR'), (25, '//PREFIX_EXCL_BR_START'), (28, '//PREFIX_EXCL_BR_STOP')]
>>> [exclude_line, exclude_branch] = _find_excluded_ranges(
... lines, warnings=..., exclude_lines_by_pattern = '.*IGNORE_LINE',
... exclude_branches_by_pattern = '.*IGNORE_BRANCH', exclude_pattern_prefix='PREFIX')
>>> [lineno for lineno in range(30) if exclude_line(lineno)]
[11, 13, 15, 16, 17]
>>> [lineno for lineno in range(30) if exclude_branch(lineno)]
[21, 23, 25, 26, 27]
"""
exclude_lines_by_pattern_regex = None
if exclude_lines_by_pattern:
exclude_lines_by_pattern_regex = re.compile(exclude_lines_by_pattern)
exclude_branches_by_pattern_regex = None
if exclude_branches_by_pattern:
exclude_branches_by_pattern_regex = re.compile(exclude_branches_by_pattern)
# possibly overlapping half-open ranges that are excluded
exclude_line_ranges: List[Tuple[int, int]] = []
exclude_branch_ranges: List[Tuple[int, int]] = []
exclusion_stack_line = []
exclusion_stack_branch = []
for lineno, code in lines:
if _EXCLUDE_FLAG in code:
# process the exclusion marker
#
# header is a marker name like LCOV or GCOVR
#
# START flags are added to the exlusion stack
# STOP flags remove a marker from the exclusion stack
# line exclusion
excl_line_pattern = re.compile(
"(" + exclude_pattern_prefix + ")" + _EXCLUDE_LINE_PATTERN_POSTFIX
)
for header, flag in excl_line_pattern.findall(code):
if flag == "LINE":
if exclusion_stack_line:
warnings.line_after_start(
lineno, f"{header}_EXCL_LINE", exclusion_stack_line[-1][1]
)
else:
exclude_line_ranges.append((lineno, lineno + 1))
if flag == "START":
exclusion_stack_line.append((header, lineno))
elif flag == "STOP":
if not exclusion_stack_line:
warnings.stop_without_start(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
continue
start_header, start_lineno = exclusion_stack_line.pop()
if header != start_header:
warnings.mismatched_start_stop(
start_lineno,
f"{start_header}_EXCL_START",
lineno,
f"{header}_EXCL_STOP",
)
exclude_line_ranges.append((start_lineno, lineno))
else: # pragma: no cover
pass
# branche exclusion
excl_branch_pattern = re.compile(
"(" + exclude_pattern_prefix + ")" + _EXCLUDE_BRANCH_PATTERN_POSTFIX
)
for header, flag in excl_branch_pattern.findall(code):
if flag == "LINE":
if exclusion_stack_branch:
warnings.branch_after_start(
lineno, f"{header}_EXCL_LINE", exclusion_stack_branch[-1][1]
)
else:
exclude_branch_ranges.append((lineno, lineno + 1))
if flag == "START":
exclusion_stack_branch.append((header, lineno))
elif flag == "STOP":
if not exclusion_stack_branch:
warnings.stop_without_start(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
continue
start_header, start_lineno = exclusion_stack_branch.pop()
if header != start_header:
warnings.mismatched_start_stop(
start_lineno,
f"{start_header}_EXCL_START",
lineno,
f"{header}_EXCL_STOP",
)
exclude_branch_ranges.append((start_lineno, lineno))
else: # pragma: no cover
pass
if exclude_lines_by_pattern_regex:
if exclude_lines_by_pattern_regex.match(code):
exclude_line_ranges.append((lineno, lineno + 1))
if exclude_branches_by_pattern_regex:
if exclude_branches_by_pattern_regex.match(code):
exclude_branch_ranges.append((lineno, lineno + 1))
for header, lineno in exclusion_stack_line:
warnings.start_without_stop(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
for header, lineno in exclusion_stack_branch:
warnings.start_without_stop(
lineno, f"{header}_EXCL_START", f"{header}_EXCL_STOP"
)
return [
_make_is_in_any_range(exclude_line_ranges),
_make_is_in_any_range(exclude_branch_ranges),
]
|
5,578 | def parse_metar(metar_text, year, month, station_metadata=station_info):
"""Parse a METAR report in text form into a list of named tuples.
Parameters
----------
metar_text : str
The METAR report
station_metadata : dict
Mapping of station identifiers to station metadata
year : int
Reported year of observation for constructing 'date_time'
month : int
Reported month of observation for constructing 'date_time'
Returns
-------
metar : namedtuple
Named tuple of parsed METAR fields
Notes
-----
Returned data has named tuples with the following attributes:
* 'station_id': Station Identifier (ex. KLOT)
* 'latitude': Latitude of the observation, measured in degrees
* 'longitude': Longitude of the observation, measured in degrees
* 'elevation': Elevation of the observation above sea level, measured in meters
* 'date_time': Date and time of the observation, datetime object
* 'wind_direction': Direction the wind is coming from, measured in degrees
* 'wind_speed': Wind speed, measured in knots
* 'wind_gust': Wind gusts, measured in knots
* 'current_wx1': Current weather (1 of 3)
* 'current_wx2': Current weather (2 of 3)
* 'current_wx3': Current weather (3 of 3)
* 'skyc1': Sky cover (ex. FEW)
* 'skylev1': Height of sky cover 1, measured in feet
* 'skyc2': Sky cover (ex. OVC)
* 'skylev2': Height of sky cover 2, measured in feet
* 'skyc3': Sky cover (ex. FEW)
* 'skylev3': Height of sky cover 3, measured in feet
* 'skyc4': Sky cover (ex. CLR)
* 'skylev4:': Height of sky cover 4, measured in feet
* 'cloudcover': Cloud coverage measured in oktas, taken from maximum of sky cover values
* 'temperature': Temperature, measured in degrees Celsius
* 'dewpoint': Dewpoint, measured in degrees Celsius
* 'altimeter': Altimeter value, measured in inches of mercury
* 'current_wx1_symbol': Current weather symbol (1 of 3), WMO integer code from [WMO306]_
Attachment IV
* 'current_wx2_symbol': Current weather symbol (2 of 3), WMO integer code from [WMO306]_
Attachment IV
* 'current_wx3_symbol': Current weather symbol (3 of 3), WMO integer code from [WMO306]_
Attachment IV
* 'visibility': Visibility distance, measured in meters
* 'remarks': Remarks (unparsed) in the report
"""
from ..plots.wx_symbols import wx_code_to_numeric
# Decode the data using the parser (built using Canopy) the parser utilizes a grammar
# file which follows the format structure dictated by the WMO Handbook, but has the
# flexibility to decode the METAR text when there are missing or incorrectly
# encoded values
tree = parse(metar_text)
# Station ID which is used to find the latitude, longitude, and elevation
station_id = tree.siteid.text.strip()
# Extract the latitude and longitude values from 'master' dictionary
try:
info = station_metadata[station_id]
lat = info.latitude
lon = info.longitude
elev = info.altitude
except KeyError:
lat = np.nan
lon = np.nan
elev = np.nan
# Set the datetime, day, and time_utc
try:
day_time_utc = tree.datetime.text.strip()
day = int(day_time_utc[0:2])
hour = int(day_time_utc[2:4])
minute = int(day_time_utc[4:6])
date_time = datetime(year, month, day, hour, minute)
except ValueError:
date_time = np.nan
# Set the wind values
wind_units = 'kts'
try:
# If there are missing wind values, set wind speed and wind direction to nan
if ('/' in tree.wind.text) or (tree.wind.text == 'KT') or (tree.wind.text == ''):
wind_dir = np.nan
wind_spd = np.nan
# If the wind direction is variable, set wind direction to nan but keep the wind speed
else:
wind_spd = float(tree.wind.wind_spd.text)
if 'MPS' in tree.wind.text:
wind_units = 'm/s'
wind_spd = units.Quantity(wind_spd, wind_units).m_as('knots')
if (tree.wind.wind_dir.text == 'VRB') or (tree.wind.wind_dir.text == 'VAR'):
wind_dir = np.nan
else:
wind_dir = int(tree.wind.wind_dir.text)
# If there are any errors, return nan
except ValueError:
wind_dir = np.nan
wind_spd = np.nan
# Parse out the wind gust field
if 'G' in tree.wind.text:
wind_gust = units.Quantity(float(tree.wind.gust.text.strip()[1:]),
wind_units).m_as('knots')
else:
wind_gust = np.nan
# Handle visibility
try:
if tree.vis.text.endswith('SM'):
visibility = 0
# Strip off the SM and any whitespace around the value and any leading 'M'
vis_str = tree.vis.text[:-2].strip().lstrip('M')
# Case of e.g. 1 1/4SM
if ' ' in vis_str:
whole, vis_str = vis_str.split(maxsplit=1)
visibility += int(whole)
# Handle fraction regardless
if '/' in vis_str:
num, denom = vis_str.split('/', maxsplit=1)
visibility += int(num) / int(denom)
else: # Should be getting all cases of whole number without fraction
visibility += int(vis_str)
visibility = units.Quantity(visibility, 'miles').m_as('meter')
# CAVOK means vis is "at least 10km" and no significant clouds or weather
elif 'CAVOK' in tree.vis.text:
visibility = 10000
elif not tree.vis.text or tree.vis.text.strip() == '////':
visibility = np.nan
else:
# Only worry about the first 4 characters (digits) and ignore possible 'NDV'
visibility = int(tree.vis.text.strip()[:4])
# If there are any errors, return nan
except ValueError:
visibility = np.nan
# Set the weather symbols
# If the weather symbol is missing, set values to nan
current_wx = []
current_wx_symbol = []
if tree.curwx.text.strip() not in ('', '//', 'NSW'):
current_wx = tree.curwx.text.strip().split()
# Handle having e.g. '+' and 'TSRA' parsed into separate items
if current_wx[0] in ('-', '+') and current_wx[1]:
current_wx[0] += current_wx[1]
current_wx.pop(1)
current_wx_symbol = wx_code_to_numeric(current_wx).tolist()
while len(current_wx) < 3:
current_wx.append(np.nan)
while len(current_wx_symbol) < 3:
current_wx_symbol.append(0)
# Set the sky conditions
skyc = [np.nan] * 4
skylev = [np.nan] * 4
if tree.skyc.text[1:3] == 'VV':
skyc[0] = 'VV'
level = tree.skyc.text.strip()[2:5]
skylev[0] = np.nan if '/' in level else 100 * int(level)
else:
for ind, part in enumerate(tree.skyc.text.strip().split(maxsplit=3)):
cover = part[:3]
level = part[3:6] # Strips off any ending text like in FEW017CB
if '/' not in cover:
skyc[ind] = cover
if level and '/' not in level:
with contextlib.suppress(ValueError):
skylev[ind] = float(level) * 100
# Set the cloud cover variable (measured in oktas)
if 'OVC' in tree.skyc.text or 'VV' in tree.skyc.text:
cloudcover = 8
elif 'BKN' in tree.skyc.text:
cloudcover = 6
elif 'SCT' in tree.skyc.text:
cloudcover = 4
elif 'FEW' in tree.skyc.text:
cloudcover = 2
elif ('SKC' in tree.skyc.text or 'NCD' in tree.skyc.text or 'NSC' in tree.skyc.text
or 'CLR' in tree.skyc.text or 'CAVOK' in tree.vis.text):
cloudcover = 0
else:
cloudcover = 10
# Set the temperature and dewpoint
temp = np.nan
dewp = np.nan
if tree.temp_dewp.text and tree.temp_dewp.text != ' MM/MM':
with contextlib.suppress(ValueError):
temp = float(tree.temp_dewp.temp.text[-2:])
if 'M' in tree.temp_dewp.temp.text:
temp *= -1
with contextlib.suppress(ValueError):
dewp = float(tree.temp_dewp.dewp.text[-2:])
if 'M' in tree.temp_dewp.dewp.text:
dewp *= -1
# Set the altimeter value and sea level pressure
if tree.altim.text:
val = float(tree.altim.text.strip()[1:5])
altim = val / 100 if val > 1100 else units.Quantity(val, 'hPa').m_as('inHg')
else:
altim = np.nan
# Strip off extraneous stuff off the remarks section
remarks = tree.remarks.text.lstrip().rstrip('= ')
if remarks.startswith('RMK'):
remarks = remarks[3:].strip()
# Returns a named tuple with all the relevant variables
return Metar(station_id, lat, lon, elev, date_time, wind_dir, wind_spd, wind_gust,
visibility, current_wx[0], current_wx[1], current_wx[2], skyc[0], skylev[0],
skyc[1], skylev[1], skyc[2], skylev[2], skyc[3], skylev[3], cloudcover, temp,
dewp, altim, current_wx_symbol[0], current_wx_symbol[1], current_wx_symbol[2],
remarks)
| def parse_metar(metar_text, year, month, station_metadata=station_info):
"""Parse a METAR report in text form into a list of named tuples.
Parameters
----------
metar_text : str
The METAR report
station_metadata : dict
Mapping of station identifiers to station metadata
year : int
Reported year of observation for constructing 'date_time'
month : int
Reported month of observation for constructing 'date_time'
Returns
-------
metar : namedtuple
Named tuple of parsed METAR fields
Notes
-----
Returned data has named tuples with the following attributes:
* 'station_id': Station Identifier (ex. KLOT)
* 'latitude': Latitude of the observation, measured in degrees
* 'longitude': Longitude of the observation, measured in degrees
* 'elevation': Elevation of the observation above sea level, measured in meters
* 'date_time': Date and time of the observation, datetime object
* 'wind_direction': Direction the wind is coming from, measured in degrees
* 'wind_speed': Wind speed, measured in knots
* 'wind_gust': Wind gust, measured in knots
* 'current_wx1': Current weather (1 of 3)
* 'current_wx2': Current weather (2 of 3)
* 'current_wx3': Current weather (3 of 3)
* 'skyc1': Sky cover (ex. FEW)
* 'skylev1': Height of sky cover 1, measured in feet
* 'skyc2': Sky cover (ex. OVC)
* 'skylev2': Height of sky cover 2, measured in feet
* 'skyc3': Sky cover (ex. FEW)
* 'skylev3': Height of sky cover 3, measured in feet
* 'skyc4': Sky cover (ex. CLR)
* 'skylev4:': Height of sky cover 4, measured in feet
* 'cloudcover': Cloud coverage measured in oktas, taken from maximum of sky cover values
* 'temperature': Temperature, measured in degrees Celsius
* 'dewpoint': Dewpoint, measured in degrees Celsius
* 'altimeter': Altimeter value, measured in inches of mercury
* 'current_wx1_symbol': Current weather symbol (1 of 3), WMO integer code from [WMO306]_
Attachment IV
* 'current_wx2_symbol': Current weather symbol (2 of 3), WMO integer code from [WMO306]_
Attachment IV
* 'current_wx3_symbol': Current weather symbol (3 of 3), WMO integer code from [WMO306]_
Attachment IV
* 'visibility': Visibility distance, measured in meters
* 'remarks': Remarks (unparsed) in the report
"""
from ..plots.wx_symbols import wx_code_to_numeric
# Decode the data using the parser (built using Canopy) the parser utilizes a grammar
# file which follows the format structure dictated by the WMO Handbook, but has the
# flexibility to decode the METAR text when there are missing or incorrectly
# encoded values
tree = parse(metar_text)
# Station ID which is used to find the latitude, longitude, and elevation
station_id = tree.siteid.text.strip()
# Extract the latitude and longitude values from 'master' dictionary
try:
info = station_metadata[station_id]
lat = info.latitude
lon = info.longitude
elev = info.altitude
except KeyError:
lat = np.nan
lon = np.nan
elev = np.nan
# Set the datetime, day, and time_utc
try:
day_time_utc = tree.datetime.text.strip()
day = int(day_time_utc[0:2])
hour = int(day_time_utc[2:4])
minute = int(day_time_utc[4:6])
date_time = datetime(year, month, day, hour, minute)
except ValueError:
date_time = np.nan
# Set the wind values
wind_units = 'kts'
try:
# If there are missing wind values, set wind speed and wind direction to nan
if ('/' in tree.wind.text) or (tree.wind.text == 'KT') or (tree.wind.text == ''):
wind_dir = np.nan
wind_spd = np.nan
# If the wind direction is variable, set wind direction to nan but keep the wind speed
else:
wind_spd = float(tree.wind.wind_spd.text)
if 'MPS' in tree.wind.text:
wind_units = 'm/s'
wind_spd = units.Quantity(wind_spd, wind_units).m_as('knots')
if (tree.wind.wind_dir.text == 'VRB') or (tree.wind.wind_dir.text == 'VAR'):
wind_dir = np.nan
else:
wind_dir = int(tree.wind.wind_dir.text)
# If there are any errors, return nan
except ValueError:
wind_dir = np.nan
wind_spd = np.nan
# Parse out the wind gust field
if 'G' in tree.wind.text:
wind_gust = units.Quantity(float(tree.wind.gust.text.strip()[1:]),
wind_units).m_as('knots')
else:
wind_gust = np.nan
# Handle visibility
try:
if tree.vis.text.endswith('SM'):
visibility = 0
# Strip off the SM and any whitespace around the value and any leading 'M'
vis_str = tree.vis.text[:-2].strip().lstrip('M')
# Case of e.g. 1 1/4SM
if ' ' in vis_str:
whole, vis_str = vis_str.split(maxsplit=1)
visibility += int(whole)
# Handle fraction regardless
if '/' in vis_str:
num, denom = vis_str.split('/', maxsplit=1)
visibility += int(num) / int(denom)
else: # Should be getting all cases of whole number without fraction
visibility += int(vis_str)
visibility = units.Quantity(visibility, 'miles').m_as('meter')
# CAVOK means vis is "at least 10km" and no significant clouds or weather
elif 'CAVOK' in tree.vis.text:
visibility = 10000
elif not tree.vis.text or tree.vis.text.strip() == '////':
visibility = np.nan
else:
# Only worry about the first 4 characters (digits) and ignore possible 'NDV'
visibility = int(tree.vis.text.strip()[:4])
# If there are any errors, return nan
except ValueError:
visibility = np.nan
# Set the weather symbols
# If the weather symbol is missing, set values to nan
current_wx = []
current_wx_symbol = []
if tree.curwx.text.strip() not in ('', '//', 'NSW'):
current_wx = tree.curwx.text.strip().split()
# Handle having e.g. '+' and 'TSRA' parsed into separate items
if current_wx[0] in ('-', '+') and current_wx[1]:
current_wx[0] += current_wx[1]
current_wx.pop(1)
current_wx_symbol = wx_code_to_numeric(current_wx).tolist()
while len(current_wx) < 3:
current_wx.append(np.nan)
while len(current_wx_symbol) < 3:
current_wx_symbol.append(0)
# Set the sky conditions
skyc = [np.nan] * 4
skylev = [np.nan] * 4
if tree.skyc.text[1:3] == 'VV':
skyc[0] = 'VV'
level = tree.skyc.text.strip()[2:5]
skylev[0] = np.nan if '/' in level else 100 * int(level)
else:
for ind, part in enumerate(tree.skyc.text.strip().split(maxsplit=3)):
cover = part[:3]
level = part[3:6] # Strips off any ending text like in FEW017CB
if '/' not in cover:
skyc[ind] = cover
if level and '/' not in level:
with contextlib.suppress(ValueError):
skylev[ind] = float(level) * 100
# Set the cloud cover variable (measured in oktas)
if 'OVC' in tree.skyc.text or 'VV' in tree.skyc.text:
cloudcover = 8
elif 'BKN' in tree.skyc.text:
cloudcover = 6
elif 'SCT' in tree.skyc.text:
cloudcover = 4
elif 'FEW' in tree.skyc.text:
cloudcover = 2
elif ('SKC' in tree.skyc.text or 'NCD' in tree.skyc.text or 'NSC' in tree.skyc.text
or 'CLR' in tree.skyc.text or 'CAVOK' in tree.vis.text):
cloudcover = 0
else:
cloudcover = 10
# Set the temperature and dewpoint
temp = np.nan
dewp = np.nan
if tree.temp_dewp.text and tree.temp_dewp.text != ' MM/MM':
with contextlib.suppress(ValueError):
temp = float(tree.temp_dewp.temp.text[-2:])
if 'M' in tree.temp_dewp.temp.text:
temp *= -1
with contextlib.suppress(ValueError):
dewp = float(tree.temp_dewp.dewp.text[-2:])
if 'M' in tree.temp_dewp.dewp.text:
dewp *= -1
# Set the altimeter value and sea level pressure
if tree.altim.text:
val = float(tree.altim.text.strip()[1:5])
altim = val / 100 if val > 1100 else units.Quantity(val, 'hPa').m_as('inHg')
else:
altim = np.nan
# Strip off extraneous stuff off the remarks section
remarks = tree.remarks.text.lstrip().rstrip('= ')
if remarks.startswith('RMK'):
remarks = remarks[3:].strip()
# Returns a named tuple with all the relevant variables
return Metar(station_id, lat, lon, elev, date_time, wind_dir, wind_spd, wind_gust,
visibility, current_wx[0], current_wx[1], current_wx[2], skyc[0], skylev[0],
skyc[1], skylev[1], skyc[2], skylev[2], skyc[3], skylev[3], cloudcover, temp,
dewp, altim, current_wx_symbol[0], current_wx_symbol[1], current_wx_symbol[2],
remarks)
|
42,005 | def _run_iteration(
zmap: Dict[complex, Union[int, float]], coordinates: List[complex], overshoot: float = 0.0
) -> Tuple[Dict[complex, Union[int, float]], float]:
max_fractional_delta = 0.0
for coord in coordinates:
current_val = zmap.get(coord, None)
max_neighbor = -np.inf
min_neighbor = np.inf
sum_neighbors = 0
n_neighbors = 0
for offset in NEIGHBOR_OFFSETS:
neighbor = zmap.get(coord + offset, None)
if neighbor is None:
# off the edge or not filled in
continue
sum_neighbors += neighbor # type: ignore
n_neighbors += 1
if current_val is not None:
max_neighbor = max(max_neighbor, neighbor)
min_neighbor = min(min_neighbor, neighbor)
# fill value is just mean of its neighbors
new_val = sum_neighbors / n_neighbors
if current_val is None:
zmap[coord] = new_val
max_fractional_delta = 1.0
else:
zmap[coord] = (1 + overshoot) * new_val - overshoot * current_val
if max_neighbor > min_neighbor:
fractional_delta = abs(new_val - current_val) / (max_neighbor - min_neighbor)
max_fractional_delta = max(overshoot, fractional_delta)
return zmap, max_fractional_delta
| def _run_iteration(
zmap: Dict[complex, Union[int, float]], coordinates: List[complex], overshoot: float = 0.0
) -> Tuple[Dict[complex, Union[int, float]], float]:
max_fractional_delta = 0.0
for coord in coordinates:
current_val = zmap.get(coord, None)
max_neighbor = -np.inf
min_neighbor = np.inf
sum_neighbors = 0
n_neighbors = 0
for offset in NEIGHBOR_OFFSETS:
neighbor = zmap.get(coord + offset, None)
if neighbor is None:
# off the edge or not filled in
continue
sum_neighbors += neighbor # type: ignore
n_neighbors += 1
if current_val is not None:
max_neighbor = max(max_neighbor, neighbor)
min_neighbor = min(min_neighbor, neighbor)
# fill value is just mean of its neighbors
new_val = sum_neighbors / n_neighbors
if current_val is None:
zmap[coord] = new_val
max_fractional_delta = 1.0
else:
zmap[coord] = (1 + overshoot) * new_val - overshoot * current_val
if max_neighbor > min_neighbor:
fractional_delta = abs(new_val - current_val) / (max_neighbor - min_neighbor)
max_fractional_delta = max(overshoot, fractional_delta)
return max_fractional_delta
|
38,902 | def field_singleton_schema( # noqa: C901 (ignore complexity)
field: Field,
*,
by_alias: bool,
model_name_map: Dict[Type['BaseModel'], str],
schema_overrides: bool = False,
ref_prefix: Optional[str] = None,
known_models: Set[Type['BaseModel']],
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
This function is indirectly used by ``field_schema()``, you should probably be using that function.
Take a single Pydantic ``Field``, and return its schema and any additional definitions from sub-models.
"""
ref_prefix = ref_prefix or default_prefix
definitions: Dict[str, Any] = {}
if field.sub_fields:
return field_singleton_sub_fields_schema(
field.sub_fields,
by_alias=by_alias,
model_name_map=model_name_map,
schema_overrides=schema_overrides,
ref_prefix=ref_prefix,
known_models=known_models,
)
if field.type_ is Any:
return {}, definitions # no restrictions
if is_callable_type(field.type_):
raise SkipField(f'Callable {field.name} was excluded from schema since JSON schema has no equivalent type.')
f_schema: Dict[str, Any] = {}
if field.schema is not None and field.schema.const:
f_schema['const'] = field.default
if issubclass(field.type_, Enum):
f_schema.update({'enum': [item.value for item in field.type_]})
# Don't return immediately, to allow adding specific types
for field_name, schema_name in validation_attribute_to_schema_keyword.items():
field_value = getattr(field.type_, field_name, None)
if field_value is not None:
if field_name == 'regex':
field_value = field_value.pattern
f_schema[schema_name] = field_value
for type_, t_schema in field_class_to_schema_enum_enabled:
if issubclass(field.type_, type_):
f_schema.update(t_schema)
break
# Return schema, with or without enum definitions
if f_schema:
return f_schema, definitions
for type_, t_schema in field_class_to_schema_enum_disabled:
if issubclass(field.type_, type_):
return t_schema, definitions
# Handle dataclass-based models
field_type = field.type_
if lenient_issubclass(getattr(field_type, '__pydantic_model__', None), pydantic.BaseModel):
field_type = cast(Type['dataclasses.DataclassType'], field_type)
field_type = field_type.__pydantic_model__
if issubclass(field_type, pydantic.BaseModel):
model_name = model_name_map[field_type]
if field_type not in known_models:
sub_schema, sub_definitions = model_process_schema(
field_type,
by_alias=by_alias,
model_name_map=model_name_map,
ref_prefix=ref_prefix,
known_models=known_models,
)
definitions.update(sub_definitions)
definitions[model_name] = sub_schema
else:
definitions[model_name] = None
schema_ref = {'$ref': f'{ref_prefix}{model_name}'}
if not schema_overrides:
return schema_ref, definitions
else:
return {'allOf': [schema_ref]}, definitions
raise ValueError(f'Value not declarable with JSON Schema, field: {field}')
| def field_singleton_schema( # noqa: C901 (ignore complexity)
field: Field,
*,
by_alias: bool,
model_name_map: Dict[Type['BaseModel'], str],
schema_overrides: bool = False,
ref_prefix: Optional[str] = None,
known_models: Set[Type['BaseModel']],
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
This function is indirectly used by ``field_schema()``, you should probably be using that function.
Take a single Pydantic ``Field``, and return its schema and any additional definitions from sub-models.
"""
ref_prefix = ref_prefix or default_prefix
definitions: Dict[str, Any] = {}
if field.sub_fields:
return field_singleton_sub_fields_schema(
field.sub_fields,
by_alias=by_alias,
model_name_map=model_name_map,
schema_overrides=schema_overrides,
ref_prefix=ref_prefix,
known_models=known_models,
)
if field.type_ is Any:
return {}, definitions # no restrictions
if is_callable_type(field.type_):
raise SkipField(f'Callable {field.name} was excluded from schema since JSON schema has no equivalent type.')
f_schema: Dict[str, Any] = {}
if field.schema is not None and field.schema.const:
f_schema['const'] = field.default
if issubclass(field.type_, Enum):
f_schema.update({'enum': [item.value for item in field.type_]})
# Don't return immediately, to allow adding specific types
for field_name, schema_name in validation_attribute_to_schema_keyword.items():
field_value = getattr(field.type_, field_name, None)
if field_value is not None:
if field_name == 'regex':
field_value = field_value.pattern
f_schema[schema_name] = field_value
for type_, t_schema in field_class_to_schema_enum_enabled:
if issubclass(field.type_, type_):
f_schema.update(t_schema)
break
# Return schema, with or without enum definitions
if f_schema:
return f_schema, definitions
for type_, t_schema in field_class_to_schema_enum_disabled:
if issubclass(field.type_, type_):
return t_schema, definitions
# Handle dataclass-based models
field_type = field.type_
if lenient_issubclass(getattr(field_type, '__pydantic_model__', None), pydantic.BaseModel):
field_type = cast(Type['dataclasses.DataclassType'], field_type)
field_type = field_type.__pydantic_model__
if issubclass(field_type, pydantic.BaseModel):
model_name = model_name_map[field_type]
if field_type not in known_models:
sub_schema, sub_definitions = model_process_schema(
field_type,
by_alias=by_alias,
model_name_map=model_name_map,
ref_prefix=ref_prefix,
known_models=known_models,
)
definitions.update(sub_definitions)
definitions[model_name] = sub_schema
else:
definitions[model_name] = None
schema_ref = {'$ref': ref_prefix + model_name}
if not schema_overrides:
return schema_ref, definitions
else:
return {'allOf': [schema_ref]}, definitions
raise ValueError(f'Value not declarable with JSON Schema, field: {field}')
|
44,177 | def catch_warn_ExpvalCost(ansatz, hamiltonian, device, **kwargs):
"""Computes the ExpvalCost and catches the initial deprecation warning."""
with pytest.warns(UserWarning, match="will be deprecated,"):
res = qml.ExpvalCost(ansatz, hamiltonian, device, **kwargs)
return res
| def catch_warn_ExpvalCost(ansatz, hamiltonian, device, **kwargs):
"""Computes the ExpvalCost and catches the initial deprecation warning."""
with pytest.warns(UserWarning, match="is deprecated,"):
res = qml.ExpvalCost(ansatz, hamiltonian, device, **kwargs)
return res
|
4,560 | def clean(signals, sessions=None, detrend=True, standardize='zscore',
confounds=None, standardize_confounds=True, filter="butterworth",
low_pass=None, high_pass=None, t_r=2.5, ensure_finite=False):
"""Improve SNR on masked fMRI signals.
This function can do several things on the input signals, in
the following order:
- detrend
- low- and high-pass filter
- remove confounds
- standardize
Low-pass filtering improves specificity.
High-pass filtering should be kept small, to keep some
sensitivity.
Filtering is only meaningful on evenly-sampled signals.
According to Lindquist et al. (2018), removal of confounds will be done
orthogonally to temporal filters (low- and/or high-pass filters), if both
are specified.
Parameters
----------
signals: numpy.ndarray
Timeseries. Must have shape (instant number, features number).
This array is not modified.
sessions : numpy array, optional
Add a session level to the cleaning process. Each session will be
cleaned independently. Must be a 1D array of n_samples elements.
confounds: numpy.ndarray, str, DataFrame or list of
Confounds timeseries. Shape must be
(instant number, confound number), or just (instant number,)
The number of time instants in signals and confounds must be
identical (i.e. signals.shape[0] == confounds.shape[0]).
If a string is provided, it is assumed to be the name of a csv file
containing signals as columns, with an optional one-line header.
If a list is provided, all confounds are removed from the input
signal, as if all were in the same array.
t_r: float
Repetition time, in second (sampling period). Set to None if not.
filter: {'butterworth', False}
Filtering methods.
'butterworth': perform butterworth filtering.
False : Do not perform filtering.
low_pass, high_pass: float
Respectively high and low cutoff frequencies, in Hertz.
detrend: bool
If detrending should be applied on timeseries (before
confound removal)
standardize: {'zscore', 'psc', False}, default is 'zscore'
Strategy to standardize the signal.
'zscore': the signal is z-scored. Timeseries are shifted
to zero mean and scaled to unit variance.
'psc': Timeseries are shifted to zero mean value and scaled
to percent signal change (as compared to original mean signal).
False : Do not standardize the data.
standardize_confounds: boolean, optional, default is True
If standardize_confounds is True, the confounds are z-scored:
their mean is put to 0 and their variance to 1 in the time dimension.
ensure_finite: bool
If True, the non-finite values (NANs and infs) found in the data
will be replaced by zeros.
Returns
-------
cleaned_signals: numpy.ndarray
Input signals, cleaned. Same shape as `signals`.
Notes
-----
Confounds removal is based on a projection on the orthogonal
of the signal space. See `Friston, K. J., A. P. Holmes,
K. J. Worsley, J.-P. Poline, C. D. Frith, et R. S. J. Frackowiak.
"Statistical Parametric Maps in Functional Imaging: A General
Linear Approach". Human Brain Mapping 2, no 4 (1994): 189-210.
<http://dx.doi.org/10.1002/hbm.460020402>`_
Orthogonalization between temporal filters and confound removal is based on
suggestions in `Lindquist, M., Geuter, S., Wager, T., & Caffo, B. (2018).
Modular preprocessing pipelines can reintroduce artifacts into fMRI data.
bioRxiv, 407676. <http://dx.doi.org/10.1101/407676>`_
See Also
--------
nilearn.image.clean_img
"""
# Read confounds and signals
signals, confounds = _sanitize_inputs(signals, confounds, ensure_finite)
# check if filter paramters are satidfied
_ = _check_filter_parameters(filter, low_pass, high_pass, t_r)
# Restrict the signal to the orthogonal of the confounds
if sessions is not None:
signals = _process_session(signals, sessions, detrend, standardize,
confounds, low_pass, high_pass, t_r)
# Detrend
# Detrend and filtering should apply to confounds, if confound presents
# keep filters orthogonal (according to Lindquist et al. (2018))
if detrend:
mean_signals = signals.mean(axis=0)
signals = _standardize(signals, standardize=False, detrend=detrend)
if confounds is not None:
confounds = _standardize(confounds, standardize=False,
detrend=detrend)
# Apply low- and high-pass filters
if filter == "butterworth" and t_r is not None: # this change enticipate extra fltering methods
signals = butterworth(signals, sampling_rate=1. / t_r,
low_pass=low_pass, high_pass=high_pass)
if confounds is not None:
# Apply low- and high-pass filters to keep filters orthogonal
# (according to Lindquist et al. (2018))
confounds = butterworth(confounds, sampling_rate=1. / t_r,
low_pass=low_pass, high_pass=high_pass)
# if filter == "cosine":
# ...
# Remove confounds
if confounds is not None:
confounds = _standardize(confounds, standardize=standardize_confounds,
detrend=False)
if not standardize_confounds:
# Improve numerical stability by controlling the range of
# confounds. We don't rely on _standardize as it removes any
# constant contribution to confounds.
confound_max = np.max(np.abs(confounds), axis=0)
confound_max[confound_max == 0] = 1
confounds /= confound_max
# Pivoting in qr decomposition was added in scipy 0.10
Q, R, _ = linalg.qr(confounds, mode='economic', pivoting=True)
Q = Q[:, np.abs(np.diag(R)) > np.finfo(np.float64).eps * 100.]
signals -= Q.dot(Q.T).dot(signals)
# Standardize
if detrend and (standardize == 'psc'):
# If the signal is detrended, we have to know the original mean
# signal to calculate the psc.
signals = _standardize(signals + mean_signals, standardize=standardize,
detrend=False)
else:
signals = _standardize(signals, standardize=standardize,
detrend=False)
return signals
| def clean(signals, sessions=None, detrend=True, standardize='zscore',
confounds=None, standardize_confounds=True, filter='butterworth',
low_pass=None, high_pass=None, t_r=2.5, ensure_finite=False):
"""Improve SNR on masked fMRI signals.
This function can do several things on the input signals, in
the following order:
- detrend
- low- and high-pass filter
- remove confounds
- standardize
Low-pass filtering improves specificity.
High-pass filtering should be kept small, to keep some
sensitivity.
Filtering is only meaningful on evenly-sampled signals.
According to Lindquist et al. (2018), removal of confounds will be done
orthogonally to temporal filters (low- and/or high-pass filters), if both
are specified.
Parameters
----------
signals: numpy.ndarray
Timeseries. Must have shape (instant number, features number).
This array is not modified.
sessions : numpy array, optional
Add a session level to the cleaning process. Each session will be
cleaned independently. Must be a 1D array of n_samples elements.
confounds: numpy.ndarray, str, DataFrame or list of
Confounds timeseries. Shape must be
(instant number, confound number), or just (instant number,)
The number of time instants in signals and confounds must be
identical (i.e. signals.shape[0] == confounds.shape[0]).
If a string is provided, it is assumed to be the name of a csv file
containing signals as columns, with an optional one-line header.
If a list is provided, all confounds are removed from the input
signal, as if all were in the same array.
t_r: float
Repetition time, in second (sampling period). Set to None if not.
filter: {'butterworth', False}
Filtering methods.
'butterworth': perform butterworth filtering.
False : Do not perform filtering.
low_pass, high_pass: float
Respectively high and low cutoff frequencies, in Hertz.
detrend: bool
If detrending should be applied on timeseries (before
confound removal)
standardize: {'zscore', 'psc', False}, default is 'zscore'
Strategy to standardize the signal.
'zscore': the signal is z-scored. Timeseries are shifted
to zero mean and scaled to unit variance.
'psc': Timeseries are shifted to zero mean value and scaled
to percent signal change (as compared to original mean signal).
False : Do not standardize the data.
standardize_confounds: boolean, optional, default is True
If standardize_confounds is True, the confounds are z-scored:
their mean is put to 0 and their variance to 1 in the time dimension.
ensure_finite: bool
If True, the non-finite values (NANs and infs) found in the data
will be replaced by zeros.
Returns
-------
cleaned_signals: numpy.ndarray
Input signals, cleaned. Same shape as `signals`.
Notes
-----
Confounds removal is based on a projection on the orthogonal
of the signal space. See `Friston, K. J., A. P. Holmes,
K. J. Worsley, J.-P. Poline, C. D. Frith, et R. S. J. Frackowiak.
"Statistical Parametric Maps in Functional Imaging: A General
Linear Approach". Human Brain Mapping 2, no 4 (1994): 189-210.
<http://dx.doi.org/10.1002/hbm.460020402>`_
Orthogonalization between temporal filters and confound removal is based on
suggestions in `Lindquist, M., Geuter, S., Wager, T., & Caffo, B. (2018).
Modular preprocessing pipelines can reintroduce artifacts into fMRI data.
bioRxiv, 407676. <http://dx.doi.org/10.1101/407676>`_
See Also
--------
nilearn.image.clean_img
"""
# Read confounds and signals
signals, confounds = _sanitize_inputs(signals, confounds, ensure_finite)
# check if filter paramters are satidfied
_ = _check_filter_parameters(filter, low_pass, high_pass, t_r)
# Restrict the signal to the orthogonal of the confounds
if sessions is not None:
signals = _process_session(signals, sessions, detrend, standardize,
confounds, low_pass, high_pass, t_r)
# Detrend
# Detrend and filtering should apply to confounds, if confound presents
# keep filters orthogonal (according to Lindquist et al. (2018))
if detrend:
mean_signals = signals.mean(axis=0)
signals = _standardize(signals, standardize=False, detrend=detrend)
if confounds is not None:
confounds = _standardize(confounds, standardize=False,
detrend=detrend)
# Apply low- and high-pass filters
if filter == "butterworth" and t_r is not None: # this change enticipate extra fltering methods
signals = butterworth(signals, sampling_rate=1. / t_r,
low_pass=low_pass, high_pass=high_pass)
if confounds is not None:
# Apply low- and high-pass filters to keep filters orthogonal
# (according to Lindquist et al. (2018))
confounds = butterworth(confounds, sampling_rate=1. / t_r,
low_pass=low_pass, high_pass=high_pass)
# if filter == "cosine":
# ...
# Remove confounds
if confounds is not None:
confounds = _standardize(confounds, standardize=standardize_confounds,
detrend=False)
if not standardize_confounds:
# Improve numerical stability by controlling the range of
# confounds. We don't rely on _standardize as it removes any
# constant contribution to confounds.
confound_max = np.max(np.abs(confounds), axis=0)
confound_max[confound_max == 0] = 1
confounds /= confound_max
# Pivoting in qr decomposition was added in scipy 0.10
Q, R, _ = linalg.qr(confounds, mode='economic', pivoting=True)
Q = Q[:, np.abs(np.diag(R)) > np.finfo(np.float64).eps * 100.]
signals -= Q.dot(Q.T).dot(signals)
# Standardize
if detrend and (standardize == 'psc'):
# If the signal is detrended, we have to know the original mean
# signal to calculate the psc.
signals = _standardize(signals + mean_signals, standardize=standardize,
detrend=False)
else:
signals = _standardize(signals, standardize=standardize,
detrend=False)
return signals
|
14,255 | def get_sim_steps(
time: Union[Real, Decimal],
units: str = "step",
round_mode: str = "error"
) -> int:
"""Calculates the number of simulation time steps for a given amount of *time*.
Args:
time: The value to convert to simulation time steps.
units: String specifying the units of the result
(one of ``'step'``, ``'fs'``, ``'ps'``, ``'ns'``, ``'us'``, ``'ms'``, ``'sec'``).
``'step'`` means time is already in simulation time steps.
round_mode: String specifying how to handle time values that sit between time steps
(one of ``'error'``, ``'round'``, ``'ceil'``, ``'floor'``).
Returns:
The number of simulation time steps.
When *round_mode* is ``"error"``, a :exc:`ValueError` is thrown if the value cannot
be accurately represented in terms of simulator time steps.
When *round_mode* is ``"round"``, ``"ceil"``, or ``"floor"``, the corresponding
rounding function from the standard library will be used to round to a simulator
time step.
.. versionchanged:: 1.5
Support ``'step'`` as the the *units* argument to mean "simulator time step".
.. versionchanged:: 1.6
Support rounding modes.
"""
if units not in (None, "step"):
result = _ldexp10(time, _get_log_time_scale(units) - _get_simulator_precision())
else:
result = time
if units is None:
warnings.warn(
'Using units=None is deprecated, use units="step" instead.',
DeprecationWarning, stacklevel=2)
units="step" # don't propagate deprecated value
if round_mode == "error":
result_rounded = math.floor(result)
if result_rounded != result:
precision = _get_simulator_precision()
raise ValueError(
f"Unable to accurately represent {time}({units}) with the simulator precision of 1e{precision}"
)
elif round_mode == "ceil":
result_rounded = math.ceil(result)
elif round_mode == "round":
result_rounded = round(result)
elif round_mode == "floor":
result_rounded = math.floor(result)
else:
raise ValueError(f"invalid round_mode specifier: {round_mode}")
return result_rounded
| def get_sim_steps(
time: Union[Real, Decimal],
units: str = "step",
round_mode: str = "error"
) -> int:
"""Calculates the number of simulation time steps for a given amount of *time*.
Args:
time: The value to convert to simulation time steps.
units: String specifying the units of the result
(one of ``'step'``, ``'fs'``, ``'ps'``, ``'ns'``, ``'us'``, ``'ms'``, ``'sec'``).
``'step'`` means time is already in simulation time steps.
round_mode: String specifying how to handle time values that sit between time steps
(one of ``'error'``, ``'round'``, ``'ceil'``, ``'floor'``).
Returns:
The number of simulation time steps.
When *round_mode* is ``"error"``, a :exc:`ValueError` is thrown if the value cannot
be accurately represented in terms of simulator time steps.
When *round_mode* is ``"round"``, ``"ceil"``, or ``"floor"``, the corresponding
rounding function from the standard library will be used to round to a simulator
time step.
.. versionchanged:: 1.5
Support ``'step'`` as the the *units* argument to mean "simulator time step".
.. versionchanged:: 1.6
Support rounding modes.
"""
if units not in (None, "step"):
result = _ldexp10(time, _get_log_time_scale(units) - _get_simulator_precision())
else:
result = time
if units is None:
warnings.warn(
'Using units=None is deprecated, use units="step" instead.',
DeprecationWarning, stacklevel=2)
units="step" # don't propagate deprecated value
if round_mode == "error":
result_rounded = math.floor(result)
if result_rounded != result:
precision = _get_simulator_precision()
raise ValueError(
f"Unable to accurately represent {time}({units}) with the simulator precision of 1e{precision}"
)
elif round_mode == "ceil":
result_rounded = math.ceil(result)
elif round_mode == "round":
result_rounded = round(result)
elif round_mode == "floor":
result_rounded = math.floor(result)
else:
raise ValueError(f"Invalid round_mode specifier: {round_mode}")
return result_rounded
|
14,125 | def _continuous_to_discrete_coords(total_bounds, bounds, p):
"""
Calculates mid points & ranges of geoms and returns
as discrete coords
Parameters
----------
total_bounds : Total bounds of geometries - array
bounds : Bounds of each geometry - array
p : The number of iterations used in constructing the Hilbert curve
Returns
---------
Discrete two-dimensional numpy array
Two-dimensional array Array of hilbert distances for each geom
"""
# Hilbert Side len
side_length = 2 ** p
# Calculate x and y range of total bound coords - returns array
xmin, ymin, xmax, ymax = total_bounds
# Calculate mid points for x and y bound coords - returns array
x_mids = (bounds[:, 0] + bounds[:, 2]) / 2.0
y_mids = (bounds[:, 1] + bounds[:, 3]) / 2.0
# Transform continuous int to discrete int for each dimension
x_int = _continuous_to_discrete(x_mids, (xmin, xmax), side_length)
y_int = _continuous_to_discrete(y_mids, (ymin, ymax), side_length)
return x_int, y_int
| def _continuous_to_discrete_coords(total_bounds, bounds, p):
"""
Calculates mid points & ranges of geoms and returns
as discrete coords
Parameters
----------
total_bounds : Total bounds of geometries - array
bounds : Bounds of each geometry - array
p : The number of iterations used in constructing the Hilbert curve
Returns
---------
Discrete two-dimensional numpy array
Two-dimensional array Array of hilbert distances for each geom
"""
# Hilbert Side len
side_length = 2 ** p
# Calculate x and y range of total bound coords - returns array
xmin, ymin, xmax, ymax = total_bounds
# Calculate mid points for x and y bound coords - returns array
x_mids = (bounds[:, 0] + bounds[:, 2]) / 2.0
y_mids = (bounds[:, 1] + bounds[:, 3]) / 2.0
# Transform continuous value to discrete integer for each dimension
x_int = _continuous_to_discrete(x_mids, (xmin, xmax), side_length)
y_int = _continuous_to_discrete(y_mids, (ymin, ymax), side_length)
return x_int, y_int
|
6,585 | def execute():
click.secho(
"E-Invoicing Integration is moved to a separate app and will be removed from ERPNext in version-14.\n"
"Please install the app to continue using the integration: https://github.com/frappe/erpnext_gst_compliance",
fg="yellow",
)
| def execute():
click.secho(
"Indian E-Invoicing integration is moved to a separate app and will be removed from ERPNext in version-14.\n"
"Please install the app to continue using the integration: https://github.com/frappe/erpnext_gst_compliance",
fg="yellow",
)
|
20,458 | def merge_stock_location_path_stock_rule(env):
openupgrade.logged_query(
env.cr, """
INSERT INTO stock_rule (name, active, action, sequence, company_id,
location_id, location_src_id, route_id, procure_method,
route_sequence, picking_type_id, delay, propagate, warehouse_id,
auto, create_uid, create_date, write_uid, write_date, %s)
SELECT name, active, 'push' AS action, sequence, company_id,
location_dest_id, location_from_id, route_id,
'make_to_stock' AS procure_method, route_sequence,
picking_type_id, delay, propagate, warehouse_id, auto,
create_uid, create_date, write_uid, write_date, id
FROM stock_location_path
""", (AsIs(openupgrade.get_legacy_name('loc_path_id')), ),
)
openupgrade.logged_query(
env.cr, """
UPDATE ir_model_data imd
SET model = 'stock.rule', res_id = sr.id
FROM stock_rule sr
WHERE imd.res_id = sr.%s AND model = 'stock.location.path'
""", (AsIs(openupgrade.get_legacy_name('loc_path_id')), ),
)
env.cr.execute(
"""
SELECT DISTINCT sm.rule_id, sr.id
FROM stock_move sm
INNER JOIN stock_rule sr ON sm.%s = sr.%s
WHERE sr.%s IS NOT NULL AND sm.rule_id IS NOT NULL
""", (
AsIs(openupgrade.get_legacy_name('push_rule_id')),
AsIs(openupgrade.get_legacy_name('loc_path_id')),
AsIs(openupgrade.get_legacy_name('loc_path_id')),
),
)
rules_to_merge = env.cr.fetchall()
openupgrade.logged_query(
env.cr, """
UPDATE stock_move sm
SET rule_id = sr.id
FROM stock_rule sr
WHERE sm.%s = sr.%s
AND sr.%s IS NOT NULL AND sm.rule_id IS NULL
""", (
AsIs(openupgrade.get_legacy_name('push_rule_id')),
AsIs(openupgrade.get_legacy_name('loc_path_id')),
AsIs(openupgrade.get_legacy_name('loc_path_id')),
),
)
for row in rules_to_merge:
openupgrade_merge_records.merge_records(
env, 'stock.rule',
[row[1]],
row[0],
)
pull_push_rule_ids = list(set([r[0] for r in rules_to_merge]))
if pull_push_rule_ids:
openupgrade.logged_query(
env.cr, """
UPDATE stock_rule
SET action = 'pull_push'
WHERE id in %s""", (tuple(pull_push_rule_ids), ),
)
| def merge_stock_location_path_stock_rule(env):
openupgrade.logged_query(
env.cr, """
INSERT INTO stock_rule (name, active, action, sequence, company_id,
location_id, location_src_id, route_id, procure_method,
route_sequence, picking_type_id, delay, propagate, warehouse_id,
auto, create_uid, create_date, write_uid, write_date, %s)
SELECT name, active, 'push' AS action, sequence, company_id,
location_dest_id, location_from_id, route_id,
'make_to_stock' AS procure_method, route_sequence,
picking_type_id, delay, propagate, warehouse_id, auto,
create_uid, create_date, write_uid, write_date, id
FROM stock_location_path
""", (AsIs(openupgrade.get_legacy_name('loc_path_id')), ),
)
openupgrade.logged_query(
env.cr, """
UPDATE ir_model_data imd
SET model = 'stock.rule', res_id = sr.id
FROM stock_rule sr
WHERE imd.res_id = sr.%s AND model = 'stock.location.path'
""", (AsIs(openupgrade.get_legacy_name('loc_path_id')), ),
)
env.cr.execute(
"""
SELECT DISTINCT sm.rule_id, sr.id
FROM stock_move sm
INNER JOIN stock_rule sr ON sm.%s = sr.%s
WHERE sr.%s IS NOT NULL AND sm.rule_id IS NOT NULL
""", (
AsIs(openupgrade.get_legacy_name('push_rule_id')),
AsIs(openupgrade.get_legacy_name('loc_path_id')),
AsIs(openupgrade.get_legacy_name('loc_path_id')),
),
)
rules_to_merge = env.cr.fetchall()
openupgrade.logged_query(
env.cr, """
UPDATE stock_move sm
SET rule_id = sr.id
FROM stock_rule sr
WHERE sm.%s = sr.%s
AND sr.%s IS NOT NULL AND sm.rule_id IS NULL
""", (
AsIs(openupgrade.get_legacy_name('push_rule_id')),
AsIs(openupgrade.get_legacy_name('loc_path_id')),
AsIs(openupgrade.get_legacy_name('loc_path_id')),
),
)
for row in rules_to_merge:
openupgrade_merge_records.merge_records(
env, 'stock.rule',
[row[1]],
row[0],
)
pull_push_rule_ids = tuple(set([r[0] for r in rules_to_merge]))
if pull_push_rule_ids:
openupgrade.logged_query(
env.cr, """
UPDATE stock_rule
SET action = 'pull_push'
WHERE id in %s""", (tuple(pull_push_rule_ids), ),
)
|
31,722 | def get_remote_data_command(client: Client, params: Dict[str, Any], args: Dict) -> GetRemoteDataResponse:
"""
get-remote-data command: Returns an updated incident and entries
If offense's events were updated in the long running container, update the demisto incident.
Args:
client (Client): QRadar client to perform the API calls.
params (Dict): Demisto params.
args (Dict):
id: Offense id to retrieve.
lastUpdate: When was the last time we data was retrieved in Epoch.
Returns:
GetRemoteDataResponse.
"""
remote_args = GetRemoteDataArgs(args)
ip_enrich, asset_enrich = get_offense_enrichment(params.get('enrichment', 'IPs And Assets'))
offense_id = remote_args.remote_incident_id
offense = client.offenses_list(offense_id=offense_id)
offense_last_update = get_time_parameter(offense.get('last_persisted_time'))
mirror_options = params.get('mirror_options')
context_data = get_integration_context()
processed_offenses = print_mirror_events_stats(context_data, f"Starting Get Remote Data For "
f"Offense {str(offense.get('id'))}")
# versions below 6.1 compatibility
last_update = get_time_parameter(args.get('lastUpdate'))
if last_update and last_update > offense_last_update and str(offense.get("id")) not in processed_offenses:
demisto.debug('Nothing new in the ticket')
return GetRemoteDataResponse({'id': offense_id, 'in_mirror_error': ''}, [])
demisto.debug(f'Updating offense. Offense last update was {offense_last_update}')
entries = []
if offense.get('status') == 'CLOSED' and argToBoolean(params.get('close_incident', False)):
demisto.debug(f'Offense is closed: {offense}')
if closing_reason := offense.get('closing_reason_id', ''):
closing_reason = client.closing_reasons_list(closing_reason).get('text')
offense_close_time = offense.get('close_time', '')
closed_offense_notes = client.offense_notes_list(offense_id, f'items={DEFAULT_RANGE_VALUE}',
filter_=f'create_time >= {offense_close_time}')
# In QRadar UI, when you close a reason, a note is added with the reason and more details. Try to get note
# if exists, else fallback to closing reason only, as closing QRadar through an API call does not create a note.
close_reason_with_note = next((note.get('note_text') for note in closed_offense_notes if
note.get('note_text').startswith('This offense was closed with reason:')),
closing_reason)
if not close_reason_with_note:
print_debug_msg(f'Could not find closing reason or closing note for offense with offense id {offense_id}')
close_reason_with_note = 'Unknown closing reason from QRadar'
else:
close_reason_with_note = f'From QRadar: {close_reason_with_note}'
entries.append({
'Type': EntryType.NOTE,
'Contents': {
'dbotIncidentClose': True,
'closeReason': close_reason_with_note
},
'ContentsFormat': EntryFormat.JSON
})
if mirror_options == MIRROR_OFFENSE_AND_EVENTS:
offenses_with_updated_events = context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, [])
offenses_waiting_for_update = context_data.get(MIRRORED_OFFENSES_CTX_KEY, [])
max_retries = MAX_FETCH_EVENT_RETIRES * (len(offenses_waiting_for_update) + 3)
is_waiting_to_be_updated = True
evented_offense = None
retries = 0
while ((not evented_offense) or is_waiting_to_be_updated) and retries < max_retries:
if retries != 0:
time.sleep(FAILURE_SLEEP)
ctx = get_integration_context()
context_data = ctx.copy()
print_mirror_events_stats(context_data, f"Get Remote Data Loop for id {offense.get('id')}, retry {retries}")
retries += 1
offenses_with_updated_events = context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, [])
offenses_waiting_for_update = context_data.get(MIRRORED_OFFENSES_CTX_KEY, [])
evented_offense = [evented_offense for evented_offense in offenses_with_updated_events
if str(evented_offense.get('id')) == str(offense.get("id"))]
is_waiting_to_be_updated = any([True for waiting_offense in offenses_waiting_for_update
if str(waiting_offense.get('id')) == str(offense.get("id"))])
if evented_offense:
demisto.debug(f"Mirror Events: Offense {offense.get('id')} events were updated, updating incident.")
if evented_offense[0].get('events'):
offense['events'] = evented_offense[0].get('events')
demisto.debug(f"Mirror Events: Offense {offense.get('id')} now has {offense.get('events')} "
f"fetched events.")
offenses_with_updated_events.remove(evented_offense[0])
resubmitted_offenses_ids = context_data.get(RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY, []).copy()
if offense.get("id") in resubmitted_offenses_ids:
resubmitted_offenses_ids.remove(offense.get("id"))
context_data[RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY] = resubmitted_offenses_ids
context_data[UPDATED_MIRRORED_OFFENSES_CTX_KEY] = offenses_with_updated_events
print_mirror_events_stats(context_data, f"Get Remote Data End for id {offense.get('id')}")
set_integration_context(context_data)
enriched_offense = enrich_offenses_result(client, offense, ip_enrich, asset_enrich)
final_offense_data = sanitize_outputs(enriched_offense)[0]
return GetRemoteDataResponse(final_offense_data, entries)
| def get_remote_data_command(client: Client, params: Dict[str, Any], args: Dict) -> GetRemoteDataResponse:
"""
get-remote-data command: Returns an updated incident and entries
If offense's events were updated in the long running container, update the demisto incident.
Args:
client (Client): QRadar client to perform the API calls.
params (Dict): Demisto params.
args (Dict):
id: Offense id to retrieve.
lastUpdate: When was the last time we data was retrieved in Epoch.
Returns:
GetRemoteDataResponse.
"""
remote_args = GetRemoteDataArgs(args)
ip_enrich, asset_enrich = get_offense_enrichment(params.get('enrichment', 'IPs And Assets'))
offense_id = remote_args.remote_incident_id
offense = client.offenses_list(offense_id=offense_id)
offense_last_update = get_time_parameter(offense.get('last_persisted_time'))
mirror_options = params.get('mirror_options')
context_data = get_integration_context()
processed_offenses = print_mirror_events_stats(context_data, f"Starting Get Remote Data For "
f"Offense {str(offense.get('id'))}")
# versions below 6.1 compatibility
last_update = get_time_parameter(args.get('lastUpdate'))
if last_update and last_update > offense_last_update and str(offense.get("id")) not in processed_offenses:
demisto.debug('Nothing new in the ticket')
return GetRemoteDataResponse({'id': offense_id, 'in_mirror_error': ''}, [])
demisto.debug(f'Updating offense. Offense last update was {offense_last_update}')
entries = []
if offense.get('status') == 'CLOSED' and argToBoolean(params.get('close_incident', False)):
demisto.debug(f'Offense is closed: {offense}')
if closing_reason := offense.get('closing_reason_id', ''):
closing_reason = client.closing_reasons_list(closing_reason).get('text')
offense_close_time = offense.get('close_time', '')
closed_offense_notes = client.offense_notes_list(offense_id, f'items={DEFAULT_RANGE_VALUE}',
filter_=f'create_time >= {offense_close_time}')
# In QRadar UI, when you close a reason, a note is added with the reason and more details. Try to get note
# if exists, else fallback to closing reason only, as closing QRadar through an API call does not create a note.
close_reason_with_note = next((note.get('note_text') for note in closed_offense_notes if
note.get('note_text').startswith('This offense was closed with reason:')),
closing_reason)
if not close_reason_with_note:
print_debug_msg(f'Could not find closing reason or closing note for offense with offense id {offense_id}')
close_reason_with_note = 'Unknown closing reason from QRadar'
else:
close_reason_with_note = f'From QRadar: {close_reason_with_note}'
entries.append({
'Type': EntryType.NOTE,
'Contents': {
'dbotIncidentClose': True,
'closeReason': close_reason_with_note
},
'ContentsFormat': EntryFormat.JSON
})
if mirror_options == MIRROR_OFFENSE_AND_EVENTS:
offenses_with_updated_events = context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, [])
offenses_waiting_for_update = context_data.get(MIRRORED_OFFENSES_CTX_KEY, [])
max_retries = MAX_FETCH_EVENT_RETIRES * (len(offenses_waiting_for_update) + 3)
is_waiting_to_be_updated = True
evented_offense = None
retries = 0
while ((not evented_offense) or is_waiting_to_be_updated) and retries < max_retries:
if retries != 0:
time.sleep(FAILURE_SLEEP)
context_data = get_integration_context().copy()
print_mirror_events_stats(context_data, f"Get Remote Data Loop for id {offense.get('id')}, retry {retries}")
retries += 1
offenses_with_updated_events = context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, [])
offenses_waiting_for_update = context_data.get(MIRRORED_OFFENSES_CTX_KEY, [])
evented_offense = [evented_offense for evented_offense in offenses_with_updated_events
if str(evented_offense.get('id')) == str(offense.get("id"))]
is_waiting_to_be_updated = any([True for waiting_offense in offenses_waiting_for_update
if str(waiting_offense.get('id')) == str(offense.get("id"))])
if evented_offense:
demisto.debug(f"Mirror Events: Offense {offense.get('id')} events were updated, updating incident.")
if evented_offense[0].get('events'):
offense['events'] = evented_offense[0].get('events')
demisto.debug(f"Mirror Events: Offense {offense.get('id')} now has {offense.get('events')} "
f"fetched events.")
offenses_with_updated_events.remove(evented_offense[0])
resubmitted_offenses_ids = context_data.get(RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY, []).copy()
if offense.get("id") in resubmitted_offenses_ids:
resubmitted_offenses_ids.remove(offense.get("id"))
context_data[RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY] = resubmitted_offenses_ids
context_data[UPDATED_MIRRORED_OFFENSES_CTX_KEY] = offenses_with_updated_events
print_mirror_events_stats(context_data, f"Get Remote Data End for id {offense.get('id')}")
set_integration_context(context_data)
enriched_offense = enrich_offenses_result(client, offense, ip_enrich, asset_enrich)
final_offense_data = sanitize_outputs(enriched_offense)[0]
return GetRemoteDataResponse(final_offense_data, entries)
|
6,077 | def matchQueue(jobJDL, queueDict, fullMatch=False):
"""
Match the job description to the queue definition
:param str job: JDL job description
:param bool fullMatch: test matching on all the criteria
:param dict queueDict: queue parameters dictionary
:return: S_OK/S_ERROR, Value - result of matching, S_OK if matched or
S_ERROR with the reason for no match
"""
# Check the job description validity
job = ClassAd(jobJDL)
if not job.isOK():
return S_ERROR('Invalid job description')
noMatchReasons = []
# Check job requirements to resource
# 1. CPUTime
cpuTime = job.getAttributeInt('CPUTime')
if not cpuTime:
cpuTime = 84600
if cpuTime and cpuTime > queueDict.get('CPUTime', 0.):
noMatchReasons.append('Job CPUTime requirement not satisfied')
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 2. Multi-value match requirements
for parameter in ['Site', 'GridCE', 'Platform', 'GridMiddleware',
'PilotType', 'SubmitPool', 'JobType']:
if parameter in queueDict:
valueSet = set(job.getListFromExpression(parameter))
if not valueSet:
valueSet = set(job.getListFromExpression('%ss' % parameter))
queueSet = set(fromChar(queueDict[parameter]))
if valueSet and queueSet and not valueSet.intersection(queueSet):
valueToPrint = ','.join(valueSet)
if len(valueToPrint) > 20:
valueToPrint = "%s..." % valueToPrint[:20]
noMatchReasons.append('Job %s %s requirement not satisfied' % (parameter, valueToPrint))
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 3. Banned multi-value match requirements
for par in ['Site', 'GridCE', 'Platform', 'GridMiddleware',
'PilotType', 'SubmitPool', 'JobType']:
parameter = "Banned%s" % par
if par in queueDict:
valueSet = set(job.getListFromExpression(parameter))
if not valueSet:
valueSet = set(job.getListFromExpression('%ss' % parameter))
queueSet = set(fromChar(queueDict[par]))
if valueSet and queueSet and valueSet.issubset(queueSet):
valueToPrint = ','.join(valueSet)
if len(valueToPrint) > 20:
valueToPrint = "%s..." % valueToPrint[:20]
noMatchReasons.append('Job %s %s requirement not satisfied' % (parameter, valueToPrint))
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 4. Tags
tags = set(job.getListFromExpression('Tag'))
nProc = job.getAttributeInt('NumberOfProcessors')
if nProc and nProc > 1:
tags.add('MultiProcessor')
wholeNode = job.getAttributeString('WholeNode')
if wholeNode:
tags.add('WholeNode')
queueTags = set(queueDict.get('Tags', []))
if not tags.issubset(queueTags):
noMatchReasons.append('Job Tag %s not satisfied' % ','.join(tags))
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 4. MultiProcessor requirements
if nProc and nProc > int(queueDict.get('NumberOfProcessors', 1)):
noMatchReasons.append('Job NumberOfProcessors %d requirement not satisfied' % nProc)
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 5. RAM
ram = job.getAttributeInt('RAM')
if ram and ram > int(queueDict['MaxRAM']):
noMatchReasons.append('Job RAM %d requirement not satisfied' % ram)
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# Check resource requirements to job
# 1. OwnerGroup - rare case but still
if "OwnerGroup" in queueDict:
result = getProxyInfo(disableVOMS=True)
if not result['OK']:
return S_ERROR('No valid proxy available')
ownerGroup = result['Value']['group']
if ownerGroup != queueDict['OwnerGroup']:
noMatchReasons.append('Resource OwnerGroup %s requirement not satisfied' % queueDict['OwnerGroup'])
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 2. Required tags
requiredTags = set(queueDict.get('RequiredTags', []))
if not requiredTags.issubset(tags):
noMatchReasons.append('Resource RequiredTags %s not satisfied' % ','.join(requiredTags))
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 3. RunningLimit
site = queueDict['Site']
opsHelper = Operations()
result = opsHelper.getSections('JobScheduling/RunningLimit')
if result['OK'] and site in result['Value']:
result = opsHelper.getSections('JobScheduling/RunningLimit/%s' % site)
if result['OK']:
for parameter in result['Value']:
value = job.getAttributeString(parameter)
if value and opsHelper.getValue('JobScheduling/RunningLimit/%s/%s/%s' % (site, parameter, value), 1) == 0:
noMatchReasons.append('Resource operational %s requirement not satisfied' % parameter)
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
if noMatchReasons:
return S_OK({'Match': False, 'Reason': noMatchReasons})
return S_OK({'Match': True, 'Reason': noMatchReasons})
| def matchQueue(jobJDL, queueDict, fullMatch=False):
"""
Match the job description to the queue definition
:param str job: JDL job description
:param bool fullMatch: test matching on all the criteria
:param dict queueDict: queue parameters dictionary
:return: S_OK/S_ERROR, Value - result of matching, S_OK if matched or
S_ERROR with the reason for no match
"""
# Check the job description validity
job = ClassAd(jobJDL)
if not job.isOK():
return S_ERROR('Invalid job description')
noMatchReasons = []
# Check job requirements to resource
# 1. CPUTime
cpuTime = job.getAttributeInt('CPUTime')
if not cpuTime:
cpuTime = 84600
if cpuTime and cpuTime > queueDict.get('CPUTime', 0.):
noMatchReasons.append('Job CPUTime requirement not satisfied')
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 2. Multi-value match requirements
for parameter in ['Site', 'GridCE', 'Platform', 'GridMiddleware',
'PilotType', 'SubmitPool', 'JobType']:
if parameter in queueDict:
valueSet = set(job.getListFromExpression(parameter))
if not valueSet:
valueSet = set(job.getListFromExpression('%ss' % parameter))
queueSet = set(fromChar(queueDict[parameter]))
if valueSet and queueSet and not valueSet.intersection(queueSet):
valueToPrint = ','.join(valueSet)
if len(valueToPrint) > 20:
valueToPrint = "%s..." % valueToPrint[:20]
noMatchReasons.append('Job %s %s requirement not satisfied' % (parameter, valueToPrint))
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 3. Banned multi-value match requirements
for par in ['Site', 'GridCE', 'Platform', 'GridMiddleware',
'PilotType', 'SubmitPool', 'JobType']:
parameter = "Banned%s" % par
if par in queueDict:
valueSet = set(job.getListFromExpression(parameter))
if not valueSet:
valueSet = set(job.getListFromExpression('%ss' % parameter))
queueSet = set(fromChar(queueDict[par]))
if valueSet and queueSet and valueSet.issubset(queueSet):
valueToPrint = ','.join(valueSet)
if len(valueToPrint) > 20:
valueToPrint = "%s..." % valueToPrint[:20]
noMatchReasons.append('Job %s %s requirement not satisfied' % (parameter, valueToPrint))
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 4. Tags
tags = set(job.getListFromExpression('Tag'))
nProc = job.getAttributeInt('NumberOfProcessors')
if nProc and nProc > 1:
tags.add('MultiProcessor')
wholeNode = job.getAttributeString('WholeNode')
if wholeNode:
tags.add('WholeNode')
queueTags = set(queueDict.get('Tags', []))
if not tags.issubset(queueTags):
noMatchReasons.append('Job Tag %s not satisfied' % ','.join(tags))
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 4. MultiProcessor requirements
if nProc and nProc > int(queueDict.get('NumberOfProcessors', 1)):
noMatchReasons.append('Job NumberOfProcessors %d requirement not satisfied' % nProc)
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 5. RAM
ram = job.getAttributeInt('RAM')
if ram and ram > int(queueDict['MaxRAM']):
noMatchReasons.append('Job RAM %d requirement not satisfied' % ram)
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# Check resource requirements to job
# 1. OwnerGroup - rare case but still
if "OwnerGroup" in queueDict:
result = getProxyInfo(disableVOMS=True)
if not result['OK']:
return S_ERROR('No valid proxy available')
ownerGroup = result['Value']['group']
if ownerGroup != queueDict['OwnerGroup']:
noMatchReasons.append('Resource OwnerGroup %s requirement not satisfied' % queueDict['OwnerGroup'])
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 2. Required tags
requiredTags = set(queueDict.get('RequiredTags', []))
if not requiredTags.issubset(tags):
noMatchReasons.append('Resource RequiredTags %s not satisfied' % ','.join(requiredTags))
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 3. RunningLimit
site = queueDict['Site']
opsHelper = Operations()
result = opsHelper.getSections('JobScheduling/RunningLimit')
if result['OK'] and site in result['Value']:
result = opsHelper.getSections('JobScheduling/RunningLimit/%s' % site)
if result['OK']:
for parameter in result['Value']:
value = job.getAttributeString(parameter)
if value and opsHelper.getValue('JobScheduling/RunningLimit/%s/%s/%s' % (site, parameter, value), 1) == 0:
noMatchReasons.append('Resource operational %s requirement not satisfied' % parameter)
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
if noMatchReasons:
return S_OK({'Match': False, 'Reason': noMatchReasons})
return S_OK({'Match': not bool(noMatchReasons), 'Reason': noMatchReasons})
|
25,968 | def get_data_service_client(cli_ctx, service_type, account_name, account_key, connection_string=None,
sas_token=None, socket_timeout=None, token_credential=None, endpoint_suffix=None,
location_mode=None):
logger.debug('Getting data service client service_type=%s', service_type.__name__)
try:
if account_name:
account_name = account_name.split('.', 2)[0]
client_kwargs = {'account_name': account_name,
'account_key': account_key,
'connection_string': connection_string,
'sas_token': sas_token}
if socket_timeout:
client_kwargs['socket_timeout'] = socket_timeout
if token_credential:
client_kwargs['token_credential'] = token_credential
if endpoint_suffix:
client_kwargs['endpoint_suffix'] = endpoint_suffix
client = service_type(**client_kwargs)
if location_mode:
client.location_mode = location_mode
if 'Blob' in service_type.__name__:
service = 'blob'
elif 'File' in service_type.__name__:
service = 'file'
elif 'Queue' in service_type.__name__:
service = 'queue'
elif 'Table' in service_type.__name__:
service = 'table'
else:
raise CLIError("Invalid service type.")
if account_name and len(account_name.split('.', 2)) == 2:
dns = account_name.split('.', 2)[1]
client.primary_endpoint = "{}.{}.{}.{}".format(client.primary_endpoint.split('.', 1)[0], dns, service,
endpoint_suffix)
client.secondary_endpoint = "{}.{}.{}.{}".format(client.secondary_endpoint.split('.', 1)[0], dns, service,
endpoint_suffix)
except ValueError as exc:
_ERROR_STORAGE_MISSING_INFO = get_sdk(cli_ctx, ResourceType.DATA_STORAGE,
'common._error#_ERROR_STORAGE_MISSING_INFO')
if _ERROR_STORAGE_MISSING_INFO in str(exc):
raise ValueError(exc)
raise CLIError('Unable to obtain data client. Check your connection parameters.')
# TODO: enable Fiddler
client.request_callback = _get_add_headers_callback(cli_ctx)
return client
| def get_data_service_client(cli_ctx, service_type, account_name, account_key, connection_string=None,
sas_token=None, socket_timeout=None, token_credential=None, endpoint_suffix=None,
location_mode=None):
logger.debug('Getting data service client service_type=%s', service_type.__name__)
try:
client_kwargs = {'account_name': account_name.split('.', 2)[0] if account_name else account_name,
'account_key': account_key,
'connection_string': connection_string,
'sas_token': sas_token}
if socket_timeout:
client_kwargs['socket_timeout'] = socket_timeout
if token_credential:
client_kwargs['token_credential'] = token_credential
if endpoint_suffix:
client_kwargs['endpoint_suffix'] = endpoint_suffix
client = service_type(**client_kwargs)
if location_mode:
client.location_mode = location_mode
if 'Blob' in service_type.__name__:
service = 'blob'
elif 'File' in service_type.__name__:
service = 'file'
elif 'Queue' in service_type.__name__:
service = 'queue'
elif 'Table' in service_type.__name__:
service = 'table'
else:
raise CLIError("Invalid service type.")
if account_name and len(account_name.split('.', 2)) == 2:
dns = account_name.split('.', 2)[1]
client.primary_endpoint = "{}.{}.{}.{}".format(client.primary_endpoint.split('.', 1)[0], dns, service,
endpoint_suffix)
client.secondary_endpoint = "{}.{}.{}.{}".format(client.secondary_endpoint.split('.', 1)[0], dns, service,
endpoint_suffix)
except ValueError as exc:
_ERROR_STORAGE_MISSING_INFO = get_sdk(cli_ctx, ResourceType.DATA_STORAGE,
'common._error#_ERROR_STORAGE_MISSING_INFO')
if _ERROR_STORAGE_MISSING_INFO in str(exc):
raise ValueError(exc)
raise CLIError('Unable to obtain data client. Check your connection parameters.')
# TODO: enable Fiddler
client.request_callback = _get_add_headers_callback(cli_ctx)
return client
|
54,216 | def group_settings_greedy(settings: Iterable[InitObsSetting]) \
-> Dict[InitObsSetting, List[InitObsSetting]]:
"""
Group a list of settings which can be simultaneously measured via
a greedy algorithm.
We construct a dictionary keyed by `max_setting` (see docstrings
for `_max_weight_state` and `_max_weight_observable`) where the value
is a list of settings compatible with `max_setting`. For each new setting,
we try to find an existing group to add it and update `max_setting` for
that group if necessary. Otherwise, we make a new group.
In practice, this greedy algorithm performs comparably to something
more complicated by solving the clique cover problem on a graph
of simultaneously-measurable settings.
Args:
settings: The settings to group.
Returns:
A dictionary keyed by `max_setting` which need not exist in the
input list of settings. Each dictionary value is a list of
settings compatible with `max_setting`.
"""
grouped_settings = {} # type: Dict[InitObsSetting, List[InitObsSetting]]
for setting in settings:
for max_setting, simul_settings in grouped_settings.items():
trial_grouped_settings = simul_settings + [setting]
new_max_weight_state = _max_weight_state(
stg.init_state for stg in trial_grouped_settings)
new_max_weight_obs = _max_weight_observable(
stg.observable for stg in trial_grouped_settings)
# max_weight_xxx returns None if the set of xxx's aren't compatible,
# so the following conditional is True if setting can
# be inserted into the current group.
if (new_max_weight_state is not None and
new_max_weight_obs is not None):
del grouped_settings[max_setting]
new_max_setting = InitObsSetting(new_max_weight_state,
new_max_weight_obs)
grouped_settings[new_max_setting] = trial_grouped_settings
break
else:
# made it through entire dict without finding an existing group
# Strip coefficients before using as key
new_max_weight_obs = setting.observable.with_coefficient(1.0)
new_max_setting = InitObsSetting(setting.init_state,
new_max_weight_obs)
grouped_settings[new_max_setting] = [setting]
return grouped_settings
| def group_settings_greedy(settings: Iterable[InitObsSetting]) \
-> Dict[InitObsSetting, List[InitObsSetting]]:
"""
Group a list of settings which can be simultaneously measured via
a greedy algorithm.
We construct a dictionary keyed by `max_setting` (see docstrings
for `_max_weight_state` and `_max_weight_observable`) where the value
is a list of settings compatible with `max_setting`. For each new setting,
we try to find an existing group to add it and update `max_setting` for
that group if necessary. Otherwise, we make a new group.
In practice, this greedy algorithm performs comparably to something
more complicated by solving the clique cover problem on a graph
of simultaneously-measurable settings.
Args:
settings: The settings to group.
Returns:
A dictionary keyed by `max_setting` which need not exist in the
input list of settings. Each dictionary value is a list of
settings compatible with `max_setting`.
"""
grouped_settings = {} # type: Dict[InitObsSetting, List[InitObsSetting]]
for setting in settings:
for max_setting, simul_settings in grouped_settings.items():
trial_grouped_settings = simul_settings + [setting]
new_max_weight_state = _max_weight_state(
stg.init_state for stg in trial_grouped_settings)
new_max_weight_obs = _max_weight_observable(
stg.observable for stg in trial_grouped_settings)
# max_weight_xxx returns None if the set of xxx's aren't compatible,
# so the following conditional is True if setting can
# be inserted into the current group.
if (new_max_weight_state is not None and
new_max_weight_obs is not None):
del grouped_settings[max_setting]
new_max_setting = InitObsSetting(new_max_weight_state,
new_max_weight_obs)
grouped_settings[new_max_setting] = trial_grouped_settings
break
else:
# made it through entire dict without finding a compatible group,
# thus a new group needs to be created
# Strip coefficients before using as key
new_max_weight_obs = setting.observable.with_coefficient(1.0)
new_max_setting = InitObsSetting(setting.init_state,
new_max_weight_obs)
grouped_settings[new_max_setting] = [setting]
return grouped_settings
|
20,273 | def unholder(item):
"""Get the held itme of an object holder of list of object holers."""
if isinstance(item, list):
return [i.held_object if hasattr(i, 'held_object') else i for i in item]
if hasattr(item, 'held_object'):
return item.held_object
return item
| def unholder(item):
"""Get the held item of an object holder or list of object holders."""
if isinstance(item, list):
return [i.held_object if hasattr(i, 'held_object') else i for i in item]
if hasattr(item, 'held_object'):
return item.held_object
return item
|
40,426 | def test_graph_store_conversion():
graph_store = MyGraphStore()
edge_index = get_edge_index(100, 100, 300)
edge_index = sort_edge_index(edge_index, sort_by_row=False)
adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(100, 100))
coo = (edge_index[0], edge_index[1])
csr = adj.csr()[:2]
csc = adj.csc()[-2::-1]
# Put all edge indices:
graph_store.put_edge_index(edge_index=coo, edge_type=('v', '1', 'v'),
layout='coo', num_nodes=(100, 100),
is_sorted=True)
graph_store.put_edge_index(edge_index=csr, edge_type=('v', '2', 'v'),
layout='csr', num_nodes=(100, 100))
graph_store.put_edge_index(edge_index=csc, edge_type=('v', '3', 'v'),
layout='csc', num_nodes=(100, 100))
def assert_edge_index_equal(expected: torch.Tensor, actual: torch.Tensor):
assert torch.equal(sort_edge_index(expected), sort_edge_index(actual))
# Convert to COO:
row_dict, col_dict, perm_dict = graph_store.coo()
assert len(row_dict) == len(col_dict) == len(perm_dict) == 3
for key in row_dict.keys():
actual = torch.stack((row_dict[key], col_dict[key]))
assert_edge_index_equal(actual, edge_index)
assert perm_dict[key] is None
# Convert to CSR:
row_dict, col_dict, perm_dict = graph_store.csr()
assert len(row_dict) == len(col_dict) == len(perm_dict) == 3
for key in row_dict:
assert torch.equal(row_dict[key], csr[0])
assert torch.equal(col_dict[key], csr[1])
if key == ('v', '1', 'v'):
assert perm_dict[key] is not None
# Convert to CSC:
row_dict, col_dict, perm_dict = graph_store.csc()
assert len(row_dict) == len(col_dict) == len(perm_dict) == 3
for key in row_dict:
assert torch.equal(row_dict[key], csc[0])
assert torch.equal(col_dict[key], csc[1])
assert perm_dict[key] is None
| def test_graph_store_conversion():
graph_store = MyGraphStore()
edge_index = get_edge_index(100, 100, 300)
edge_index = sort_edge_index(edge_index, sort_by_row=False)
adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(100, 100))
coo = (edge_index[0], edge_index[1])
csr = adj.csr()[:2]
csc = adj.csc()[-2::-1]
# Put all edge indices:
graph_store.put_edge_index(edge_index=coo, edge_type=('v', '1', 'v'),
layout='coo', num_nodes=(100, 100),
is_sorted=True)
graph_store.put_edge_index(edge_index=csr, edge_type=('v', '2', 'v'),
layout='csr', num_nodes=(100, 100))
graph_store.put_edge_index(edge_index=csc, edge_type=('v', '3', 'v'),
layout='csc', num_nodes=(100, 100))
def assert_edge_index_equal(expected: torch.Tensor, actual: torch.Tensor):
assert torch.equal(sort_edge_index(expected), sort_edge_index(actual))
# Convert to COO:
row_dict, col_dict, perm_dict = graph_store.coo()
assert len(row_dict) == len(col_dict) == len(perm_dict) == 3
for key in row_dict.keys():
actual = torch.stack((row_dict[key], col_dict[key]))
assert_edge_index_equal(actual, edge_index)
assert perm_dict[key] is None
# Convert to CSR:
row_dict, col_dict, perm_dict = graph_store.csr()
assert len(row_dict) == len(col_dict) == len(perm_dict) == 3
for key in row_dict:
assert torch.equal(row_dict[key], csr[0])
assert torch.equal(col_dict[key], csr[1])
if key == ('v', '1', 'v'):
assert perm_dict[key] is not None
# Convert to CSC:
row_dict, colptr_dict, perm_dict = graph_store.csc()
assert len(row_dict) == len(col_dict) == len(perm_dict) == 3
for key in row_dict:
assert torch.equal(row_dict[key], csc[0])
assert torch.equal(col_dict[key], csc[1])
assert perm_dict[key] is None
|
58,329 | def rk4(f, x, t, dt, stages=4, s=0.0):
"""Runge-Kutta (explicit, non-adaptive) numerical (S)ODE solvers.
The rule has strong / weak convergence order 1.0 for generic SDEs and order 4.0
convergence for ODEs when stages=4. For stages=1, this becomes the Euler-Maruyama
schemefor SDEs (s > 0.0) with strong / weak convergence order 1.0 for SDEs with
additive noise as defined in the below. See `bib.grudzien2020numerical`.
Parameters
----------
f : function
The time derivative of the dynamical system. Must be of the form `f(t, x)`
x : ndarray or float
State vector of the forcing term
t : float
Starting time of the integration
dt : float
Integration time step.
stages : int, optional
The number of stages of the RK method. Default: 4. When stages=1, this becomes
Euler / Euler-Maruyama.
s : float
The diffusion coeffient for models with additive noise. Default: 0 for
deterministic integration.
Returns
-------
ndarray
State vector at the new time, `t+dt`
"""
if s > 0.0:
# non-trivial diffusion, this defines the SDE integration with additive noise
# generate perturbation for Brownian motion
dims = np.shape(x)
if len(dims) > 1:
N_e, N_x , = dims
W = np.sqrt(dt) * np.random.standard_normal(N_e, N_x)
else:
N_x , = dims
W = np.sqrt(dt) * np.random.standard_normal(N_x)
if stages >=1: k1 = dt * f(t , x) + s * W # noqa
if stages >=2: k2 = dt * f(t+dt/2.0, x+k1/2.0) + s * W # noqa
if stages ==3: k3 = dt * f(t+dt , x+k2*2.0-k1) + s * W # noqa
if stages ==4: # noqa
k3 = dt * f(t+dt/2.0, x+k2/2.0) + s * W # noqa
k4 = dt * f(t+dt , x+k3) + s * W # noqa
if stages ==1: return x + k1 # noqa
elif stages ==2: return x + k2 # noqa
elif stages ==3: return x + (k1 + 4.0*k2 + k3)/6.0 # noqa
elif stages ==4: return x + (k1 + 2.0*(k2 + k3) + k4)/6.0 # noqa
else: raise NotImplementedError # noqa
else:
# deterministic integration
if stages >=1: k1 = dt * f(t , x) # noqa
if stages >=2: k2 = dt * f(t+dt/2.0, x+k1/2.0) # noqa
if stages ==3: k3 = dt * f(t+dt , x+k2*2.0-k1) # noqa
if stages ==4: # noqa
k3 = dt * f(t+dt/2.0, x+k2/2.0) # noqa
k4 = dt * f(t+dt , x+k3) # noqa
if stages ==1: return x + k1 # noqa
elif stages ==2: return x + k2 # noqa
elif stages ==3: return x + (k1 + 4.0*k2 + k3)/6.0 # noqa
elif stages ==4: return x + (k1 + 2.0*(k2 + k3) + k4)/6.0 # noqa
else: raise NotImplementedError # noqa
# fmt: on
| def rk4(f, x, t, dt, stages=4, s=0.0):
"""Runge-Kutta (explicit, non-adaptive) numerical (S)ODE solvers.
The rule has strong / weak convergence order 1.0 for generic SDEs and order 4.0
convergence for ODEs when stages=4. For stages=1, this becomes the Euler-Maruyama
schemefor SDEs (s > 0.0) with strong / weak convergence order 1.0 for SDEs with
additive noise as defined in the below. See `bib.grudzien2020numerical`.
Parameters
----------
f : function
The time derivative of the dynamical system. Must be of the form `f(t, x)`
x : ndarray or float
State vector of the forcing term
t : float
Starting time of the integration
dt : float
Integration time step.
stages : int, optional
The number of stages of the RK method.
When stages=1, this becomes the Euler (-Maruyama) scheme.
Default: 4.
s : float
The diffusion coeffient for models with additive noise. Default: 0 for
deterministic integration.
Returns
-------
ndarray
State vector at the new time, `t+dt`
"""
if s > 0.0:
# non-trivial diffusion, this defines the SDE integration with additive noise
# generate perturbation for Brownian motion
dims = np.shape(x)
if len(dims) > 1:
N_e, N_x , = dims
W = np.sqrt(dt) * np.random.standard_normal(N_e, N_x)
else:
N_x , = dims
W = np.sqrt(dt) * np.random.standard_normal(N_x)
if stages >=1: k1 = dt * f(t , x) + s * W # noqa
if stages >=2: k2 = dt * f(t+dt/2.0, x+k1/2.0) + s * W # noqa
if stages ==3: k3 = dt * f(t+dt , x+k2*2.0-k1) + s * W # noqa
if stages ==4: # noqa
k3 = dt * f(t+dt/2.0, x+k2/2.0) + s * W # noqa
k4 = dt * f(t+dt , x+k3) + s * W # noqa
if stages ==1: return x + k1 # noqa
elif stages ==2: return x + k2 # noqa
elif stages ==3: return x + (k1 + 4.0*k2 + k3)/6.0 # noqa
elif stages ==4: return x + (k1 + 2.0*(k2 + k3) + k4)/6.0 # noqa
else: raise NotImplementedError # noqa
else:
# deterministic integration
if stages >=1: k1 = dt * f(t , x) # noqa
if stages >=2: k2 = dt * f(t+dt/2.0, x+k1/2.0) # noqa
if stages ==3: k3 = dt * f(t+dt , x+k2*2.0-k1) # noqa
if stages ==4: # noqa
k3 = dt * f(t+dt/2.0, x+k2/2.0) # noqa
k4 = dt * f(t+dt , x+k3) # noqa
if stages ==1: return x + k1 # noqa
elif stages ==2: return x + k2 # noqa
elif stages ==3: return x + (k1 + 4.0*k2 + k3)/6.0 # noqa
elif stages ==4: return x + (k1 + 2.0*(k2 + k3) + k4)/6.0 # noqa
else: raise NotImplementedError # noqa
# fmt: on
|
53,266 | def boris_push_relativistic(x, v, B, E, q, m, dt):
r"""
The explicit Boris pusher, including realtivistic corrections.
Parameters
----------
x : np.ndarray
particle position at full timestep, in SI (meter) units.
v : np.ndarray
particle velocity at half timestep, in SI (meter/second) units.
B : np.ndarray
magnetic field at full timestep, in SI (tesla) units.
E : float
electric field at full timestep, in SI (V/m) units.
q : float
particle charge, in SI (Coulomb) units.
m : float
particle mass, in SI (kg) units.
dt : float
timestep, in SI (second) units.
Notes
----------
The Boris algorithm is the standard energy conserving algorithm for
particle movement in plasma physics. See [1]_ for more details, and
[2]_ for a nice overview.
Conceptually, the algorithm has three phases:
1. Add half the impulse from electric field.
2. Rotate the particle velocity about the direction of the magnetic
field.
3. Add the second half of the impulse from the electric field.
This ends up causing the magnetic field action to be properly "centered" in
time, and the algorithm, being a symplectic integrator, conserves energy.
References
----------
.. [1] C. K. Birdsall, A. B. Langdon, "Plasma Physics via Computer
Simulation", 2004, p. 58-63
.. [2] L. Brieda, "Particle Push in Magnetic Field (Boris Method)",
https://www.particleincell.com/2011/vxb-rotation/
"""
c = constants.c.si.value
γ = 1 / np.sqrt(1 - (v / c) ** 2)
uvel = v * γ
uvel_minus = uvel + q * E * dt / (2 * m)
γ1 = np.sqrt(1 + (uvel_minus / c) ** 2)
# Birdsall has a factor of c incorrect in the definiton of t?
# See this source: https://www.sciencedirect.com/science/article/pii/S163107211400148X
t = q * B * dt / (2 * γ1 * m)
s = 2 * t / (1 + (t * t).sum(axis=1, keepdims=True))
uvel_prime = uvel_minus + np.cross(uvel_minus.si.value, t)
uvel_plus = uvel_minus + np.cross(uvel_prime.si.value, s)
uvel_new = uvel_plus + +q * E * dt / (2 * m)
# You can show that this expression is equivalent to calculating
# v_new then calculating γnew using the usual formula
γ2 = np.sqrt(1 + (uvel_new / c) ** 2)
# Update the velocities of the particles that are being pushed
v[...] = uvel_new / γ2
x += v * dt
| def boris_push_relativistic(x, v, B, E, q, m, dt):
r"""
The explicit Boris pusher, including realtivistic corrections.
Parameters
----------
x : np.ndarray
particle position at full timestep, in SI (meter) units.
v : np.ndarray
particle velocity at half timestep, in SI (meter/second) units.
B : np.ndarray
magnetic field at full timestep, in SI (tesla) units.
E : float
electric field at full timestep, in SI (V/m) units.
q : float
particle charge, in SI (Coulomb) units.
m : float
particle mass, in SI (kg) units.
dt : float
timestep, in SI (second) units.
Notes
----------
For the basic overview of this algorithm, see `boris_push`. This
version, based on [1]_, applies relativistic corrections such as
TODO.
Keep in mind that the non-relativistic version will be slightly
faster if you don't encounter velocities in relativistic regimes.
References
----------
.. [1] C. K. Birdsall, A. B. Langdon, "Plasma Physics via Computer
Simulation", 2004, p. 58-63
"""
c = constants.c.si.value
γ = 1 / np.sqrt(1 - (v / c) ** 2)
uvel = v * γ
uvel_minus = uvel + q * E * dt / (2 * m)
γ1 = np.sqrt(1 + (uvel_minus / c) ** 2)
# Birdsall has a factor of c incorrect in the definiton of t?
# See this source: https://www.sciencedirect.com/science/article/pii/S163107211400148X
t = q * B * dt / (2 * γ1 * m)
s = 2 * t / (1 + (t * t).sum(axis=1, keepdims=True))
uvel_prime = uvel_minus + np.cross(uvel_minus.si.value, t)
uvel_plus = uvel_minus + np.cross(uvel_prime.si.value, s)
uvel_new = uvel_plus + +q * E * dt / (2 * m)
# You can show that this expression is equivalent to calculating
# v_new then calculating γnew using the usual formula
γ2 = np.sqrt(1 + (uvel_new / c) ** 2)
# Update the velocities of the particles that are being pushed
v[...] = uvel_new / γ2
x += v * dt
|
1,217 | def needs_nibabel_data(subdir=None):
""" Decorator for tests needing nibabel-data
Parameters
----------
subdir : None or str
Subdirectory we need in nibabel-data directory. If None, only require
nibabel-data directory itself.
Returns
-------
skip_dec : decorator
Decorator skipping tests if required directory not present
"""
nibabel_data = get_nibabel_data()
if nibabel_data == '':
return pytest.mark.skipif(True, reason="Need nibabel-data directory for this test")
if subdir is None:
return pytest.mark.skipif(False, reason="todo")
required_path = pjoin(nibabel_data, subdir)
# Path should not be empty (as is the case for not-updated submodules)
have_files = exists(required_path) and len(listdir(required_path)) > 0
return pytest.mark.skipif(not have_files,
reason="Need files in {0} for these tests".format(required_path))
| def needs_nibabel_data(subdir=None):
""" Decorator for tests needing nibabel-data
Parameters
----------
subdir : None or str
Subdirectory we need in nibabel-data directory. If None, only require
nibabel-data directory itself.
Returns
-------
skip_dec : decorator
Decorator skipping tests if required directory not present
"""
nibabel_data = get_nibabel_data()
if nibabel_data == '':
return pytest.mark.skipif(True, reason="Need nibabel-data directory for this test")
if subdir is None:
return pytest.mark.skipif(False, reason="todo")
required_path = pjoin(nibabel_data, subdir)
# Path should not be empty (as is the case for not-updated submodules)
have_files = exists(required_path) and len(listdir(required_path)) > 0
return pytest.mark.skipif(not have_files,
reason="Need files in {0} for these tests".format(required_path))
|
57,843 | def main() -> None:
try:
arguments = demisto.args()
api_key = demisto.params().get('apikey')
base_url = urljoin(demisto.params()['url'], '/api/')
verify_certificate = not demisto.params().get('insecure', False)
first_fetch_time = arg_to_timestamp(
arg=demisto.params().get('first_fetch', '1 days'),
arg_name='First fetch time',
required=True
)
assert isinstance(first_fetch_time, int)
proxy = demisto.params().get('proxy', False)
page = arguments.get('page', "1")
page_count_no = arguments.get('max', "25")
demisto.debug(f'Command being called is {demisto.command()}')
params = {'page': page, 'max': page_count_no}
headers = {
'Authorization': f'Bearer {api_key}'
}
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy)
if demisto.command() == 'test-module':
result = test_module_command(client)
return_results(result)
elif demisto.command() == 'fetch-incidents':
# Set and define the fetch incidents command to run after activated via integration settings.
fetch_incident_command = demisto.params().get('fetch_incident_command')
max_results = arg_to_int(
arg=demisto.params().get('max_fetch'),
arg_name='max_fetch',
required=False
)
if not max_results or max_results > MAX_INCIDENTS_TO_FETCH:
max_results = MAX_INCIDENTS_TO_FETCH
next_run, incidents = fetch_incidents(
client=client,
max_results=max_results,
last_run=demisto.getLastRun(), # getLastRun() gets the last run dict
first_fetch_time=first_fetch_time,
command_type=fetch_incident_command
)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif demisto.command() == 'gra-fetch-users':
fetch_records(client, '/users', 'Gra.Users', 'employeeId', params)
elif demisto.command() == 'gra-fetch-accounts':
fetch_records(client, '/accounts', 'Gra.Accounts', 'id', params)
elif demisto.command() == 'gra-fetch-active-resource-accounts':
resource_name = arguments.get('resource_name', 'Windows Security')
active_resource_url = '/resources/' + resource_name + '/accounts'
fetch_records(client, active_resource_url, 'Gra.Active.Resource.Accounts', 'id', params)
elif demisto.command() == 'gra-fetch-user-accounts':
employee_id = arguments.get('employee_id')
user_account_url = '/users/' + employee_id + '/accounts'
fetch_records(client, user_account_url, 'Gra.User.Accounts', 'id', params)
elif demisto.command() == 'gra-fetch-resource-highrisk-accounts':
res_name = arguments.get('Resource_name', 'Windows Security')
high_risk_account_resource_url = '/resources/' + res_name + '/accounts/highrisk'
fetch_records(client, high_risk_account_resource_url, 'Gra.Resource.Highrisk.Accounts', 'id', params)
elif demisto.command() == 'gra-fetch-hpa':
fetch_records(client, '/accounts/highprivileged', 'Gra.Hpa', 'id', params)
elif demisto.command() == 'gra-fetch-resource-hpa':
resource_name = arguments.get('Resource_name', 'Windows Security')
resource_hpa = '/resources/' + resource_name + '/accounts/highprivileged'
fetch_records(client, resource_hpa, 'Gra.Resource.Hpa', 'id', params)
elif demisto.command() == 'gra-fetch-orphan-accounts':
fetch_records(client, '/accounts/orphan', 'Gra.Orphan.Accounts', 'id', params)
elif demisto.command() == 'gra-fetch-resource-orphan-accounts':
resource_name = arguments.get('resource_name', 'Windows Security')
resource_orphan = '/resources/' + resource_name + '/accounts/orphan'
fetch_records(client, resource_orphan, 'Gra.Resource.Orphan.Accounts', 'id', params)
elif demisto.command() == 'gra-user-activities':
employee_id = arguments.get('employee_id')
user_activities_url = '/user/' + employee_id + '/activity'
fetch_records(client, user_activities_url, 'Gra.User.Activity', 'employee_id', params)
elif demisto.command() == 'gra-fetch-users-details':
employee_id = arguments.get('employee_id')
fetch_records(client, '/users/' + employee_id, 'Gra.User', 'employeeId', params)
elif demisto.command() == 'gra-highRisk-users':
fetch_records(client, '/users/highrisk', 'Gra.Highrisk.Users', 'employeeId', params)
elif demisto.command() == 'gra-cases':
status = arguments.get('status')
cases_url = '/cases/' + status
fetch_records(client, cases_url, 'Gra.Cases', 'caseId', params)
elif demisto.command() == 'gra-user-anomalies':
employee_id = arguments.get('employee_id')
anomaly_url = '/users/' + employee_id + '/anomalies/'
fetch_records(client, anomaly_url, 'Gra.User.Anomalies', 'anomaly_name', params)
elif demisto.command() == 'gra-case-action':
action = arguments.get('action')
caseId = arguments.get('caseId')
subOption = arguments.get('subOption')
caseComment = arguments.get('caseComment')
riskAcceptDate = arguments.get('riskAcceptDate')
cases_url = '/cases/' + action
if action == 'riskManageCase':
post_url = '{"caseId":' + caseId + ',"subOption":"' + subOption + '","caseComment":"' + caseComment +\
'","riskAcceptDate":"' + riskAcceptDate + '"}'
else:
post_url = '{"caseId":' + caseId + ',"subOption":"' + subOption + '","caseComment":"' + \
caseComment + '"}'
fetch_post_records(client, cases_url, 'Gra.Case.Action', 'caseId', params, post_url)
elif demisto.command() == 'gra-case-action-anomaly':
action = arguments.get('action')
caseId = arguments.get('caseId')
anomalyNames = arguments.get('anomalyNames')
subOption = arguments.get('subOption')
caseComment = arguments.get('caseComment')
riskAcceptDate = arguments.get('riskAcceptDate')
cases_url = '/cases/' + action
if action == 'riskAcceptCaseAnomaly':
post_url = '{"caseId":' + caseId + ',"anomalyNames":' + anomalyNames + ',"subOption":"' + subOption +\
'","caseComment":"' + caseComment + '","riskAcceptDate":"' + riskAcceptDate + '"}'
else:
post_url = '{"caseId":' + caseId + ',"anomalyNames":"' + anomalyNames + '","subOption":"' + \
subOption + '","caseComment":"' + caseComment + '"}'
fetch_post_records(client, cases_url, 'Gra.Cases.Action.Anomaly', 'caseId', params, post_url)
elif demisto.command() == 'gra-investigate-anomaly-summary':
fromDate = arguments.get('fromDate')
toDate = arguments.get('toDate')
modelName = arguments.get('modelName')
if fromDate is not None and toDate is not None:
investigateAnomaly_url = '/investigateAnomaly/anomalySummary/' + modelName + '?fromDate=' + fromDate \
+ ' 00:00:00&toDate=' + toDate + ' 23:59:59'
else:
investigateAnomaly_url = '/investigateAnomaly/anomalySummary/' + modelName
fetch_records(client, investigateAnomaly_url, 'Gra.Investigate.Anomaly.Summary', 'modelId', params)
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
| def main() -> None:
try:
arguments = demisto.args()
api_key = demisto.params().get('apikey')
base_url = urljoin(demisto.params()['url'], '/api/')
verify_certificate = not demisto.params().get('insecure', False)
first_fetch_time = arg_to_timestamp(
arg=demisto.params().get('first_fetch', '1 days'),
arg_name='First fetch time',
required=True
)
assert isinstance(first_fetch_time, int)
proxy = demisto.params().get('proxy', False)
page = arguments.get('page', "1")
page_count_no = arguments.get('max', "25")
demisto.debug(f'Command being called is {demisto.command()}')
params = {'page': page, 'max': page_count_no}
headers = {
'Authorization': f'Bearer {api_key}'
}
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy)
if demisto.command() == 'test-module':
result = test_module_command(client)
return_results(result)
elif demisto.command() == 'fetch-incidents':
# Set and define the fetch incidents command to run after activated via integration settings.
fetch_incident_command = demisto.params().get('fetch_incident_command')
max_results = arg_to_int(
arg=demisto.params().get('max_fetch'),
arg_name='max_fetch',
required=False
)
if not max_results or max_results > MAX_INCIDENTS_TO_FETCH:
max_results = MAX_INCIDENTS_TO_FETCH
next_run, incidents = fetch_incidents(
client=client,
max_results=max_results,
last_run=demisto.getLastRun(), # getLastRun() gets the last run dict
first_fetch_time=first_fetch_time,
command_type=fetch_incident_command
)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif demisto.command() == 'gra-fetch-users':
fetch_records(client, '/users', 'Gra.Users', 'employeeId', params)
elif demisto.command() == 'gra-fetch-accounts':
fetch_records(client, '/accounts', 'Gra.Accounts', 'id', params)
elif demisto.command() == 'gra-fetch-active-resource-accounts':
resource_name = arguments.get('resource_name', 'Windows Security')
active_resource_url = '/resources/' + resource_name + '/accounts'
fetch_records(client, active_resource_url, 'Gra.Active.Resource.Accounts', 'id', params)
elif demisto.command() == 'gra-fetch-user-accounts':
employee_id = arguments.get('employee_id')
user_account_url = '/users/' + employee_id + '/accounts'
fetch_records(client, user_account_url, 'Gra.User.Accounts', 'id', params)
elif demisto.command() == 'gra-fetch-resource-highrisk-accounts':
res_name = arguments.get('Resource_name', 'Windows Security')
high_risk_account_resource_url = '/resources/' + res_name + '/accounts/highrisk'
fetch_records(client, high_risk_account_resource_url, 'Gra.Resource.Highrisk.Accounts', 'id', params)
elif demisto.command() == 'gra-fetch-hpa':
fetch_records(client, '/accounts/highprivileged', 'Gra.Hpa', 'id', params)
elif demisto.command() == 'gra-fetch-resource-hpa':
resource_name = arguments.get('Resource_name', 'Windows Security')
resource_hpa = '/resources/' + resource_name + '/accounts/highprivileged'
fetch_records(client, resource_hpa, 'Gra.Resource.Hpa', 'id', params)
elif demisto.command() == 'gra-fetch-orphan-accounts':
fetch_records(client, '/accounts/orphan', 'Gra.Orphan.Accounts', 'id', params)
elif demisto.command() == 'gra-fetch-resource-orphan-accounts':
resource_name = arguments.get('resource_name', 'Windows Security')
resource_orphan = '/resources/' + resource_name + '/accounts/orphan'
fetch_records(client, resource_orphan, 'Gra.Resource.Orphan.Accounts', 'id', params)
elif demisto.command() == 'gra-user-activities':
employee_id = arguments.get('employee_id')
user_activities_url = '/user/' + employee_id + '/activity'
fetch_records(client, user_activities_url, 'Gra.User.Activity', 'employee_id', params)
elif demisto.command() == 'gra-fetch-users-details':
employee_id = arguments.get('employee_id')
fetch_records(client, '/users/' + employee_id, 'Gra.User', 'employeeId', params)
elif demisto.command() == 'gra-highRisk-users':
fetch_records(client, '/users/highrisk', 'Gra.Highrisk.Users', 'employeeId', params)
elif demisto.command() == 'gra-cases':
status = arguments.get('status')
cases_url = '/cases/' + status
fetch_records(client, cases_url, 'Gra.Cases', 'caseId', params)
elif demisto.command() == 'gra-user-anomalies':
employee_id = arguments.get('employee_id')
anomaly_url = '/users/' + employee_id + '/anomalies/'
fetch_records(client, anomaly_url, 'Gra.User.Anomalies', 'anomaly_name', params)
elif demisto.command() == 'gra-case-action':
action = arguments.get('action')
caseId = arguments.get('caseId')
subOption = arguments.get('subOption')
caseComment = arguments.get('caseComment')
riskAcceptDate = arguments.get('riskAcceptDate')
cases_url = '/cases/' + action
post_url = {
"caseId": caseId,
"subOption": subOption,
"caseComment": caseComment,
}
if action == 'riskManageCase':
post_url["riskAcceptDate"] = riskAcceptDate
fetch_post_records(client, cases_url, 'Gra.Case.Action', 'caseId', params, json.dumps(post_url))
elif demisto.command() == 'gra-case-action-anomaly':
action = arguments.get('action')
caseId = arguments.get('caseId')
anomalyNames = arguments.get('anomalyNames')
subOption = arguments.get('subOption')
caseComment = arguments.get('caseComment')
riskAcceptDate = arguments.get('riskAcceptDate')
cases_url = '/cases/' + action
if action == 'riskAcceptCaseAnomaly':
post_url = '{"caseId":' + caseId + ',"anomalyNames":' + anomalyNames + ',"subOption":"' + subOption +\
'","caseComment":"' + caseComment + '","riskAcceptDate":"' + riskAcceptDate + '"}'
else:
post_url = '{"caseId":' + caseId + ',"anomalyNames":"' + anomalyNames + '","subOption":"' + \
subOption + '","caseComment":"' + caseComment + '"}'
fetch_post_records(client, cases_url, 'Gra.Cases.Action.Anomaly', 'caseId', params, post_url)
elif demisto.command() == 'gra-investigate-anomaly-summary':
fromDate = arguments.get('fromDate')
toDate = arguments.get('toDate')
modelName = arguments.get('modelName')
if fromDate is not None and toDate is not None:
investigateAnomaly_url = '/investigateAnomaly/anomalySummary/' + modelName + '?fromDate=' + fromDate \
+ ' 00:00:00&toDate=' + toDate + ' 23:59:59'
else:
investigateAnomaly_url = '/investigateAnomaly/anomalySummary/' + modelName
fetch_records(client, investigateAnomaly_url, 'Gra.Investigate.Anomaly.Summary', 'modelId', params)
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
57,765 | def test_module(client: Client) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param Client: GreatHorn client to use
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
try:
client.get_policy()
except DemistoException as e:
if 'Forbidden' in str(e):
return 'Authorization Error: make sure API Key is correctly set'
else:
raise e
return 'ok'
| def test_module(client: Client) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param Client: GreatHorn client to use
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
try:
client.get_policy()
except DemistoException as e:
if 'Forbidden' in str(e):
raise 'Authorization Error: make sure API Key is correctly set'
else:
raise e
return 'ok'
|
31,228 | def get_connector_runs(client: Client, *_) -> Tuple[str, Dict[str, Any], List[Dict[str, Any]]]:
"""Get Connector Runs command.
Args:
client: Client which connects to api
Returns:
Human Readable
Entry Context
Raw Data
"""
connector_id = demisto.getArg("connector_id")
url_suffix = '/connectors/%s/connector_runs' % connector_id
human_readable = []
context: Dict[str, Any] = {}
connectors: List[Dict[str, Any]] = client.http_request(message='GET', suffix=url_suffix)
if connectors:
keys = [
"id", "start_time",
"end_time", "success",
"total_payload_count",
"processed_palyoad_count",
"failed_payload_count",
"processed_assets_count",
"assets_with_tags_reset_count",
"processed_scanner_vuln_count",
"created_scanner_vuln_count",
"closed_scanner_vuln_count",
"autoclosed_scanner_vuln_count",
"reopened_scanner_vuln_count",
"closed_vuln_count",
"autoclosed_vuln_count",
"reopened_vuln_count"
]
context_list = parse_response(connectors, keys, keys)
for connector in connectors:
curr_dict = {
"id": connector.get("id"),
"start_time": connector.get("start_time"),
"end_time": connector.get("end_time"),
"success": connector.get("success"),
"total_payload_count": connector.get("total_payload_count"),
"processed_payload_count": connector.get("total_payload_count"),
"failed_payload_count": connector.get("failed_payload_count"),
"processed_assets_count": connector.get("processed_assets_count"),
"assets_with_tags_reset_count": connector.get("assets_with_tags_reset_count"),
"processed_scanner_vuln_count": connector.get("processed_scanner_vuln_count"),
"updated_scanner_vuln_count": connector.get("updated_scanner_vuln_count"),
"created_scanner_vuln_count": connector.get("created_scanner_vuln_count"),
"closed_scanner_vuln_count": connector.get("closed_scanner_vuln_count"),
"autoclosed_scanner_vuln_count": connector.get("autoclosed_scanner_vuln_count"),
"reopened_scanner_vuln_count": connector.get("reopened_scanner_vuln_count"),
"closed_vuln_count": connector.get("closed_vuln_count"),
"autoclosed_vuln_count": connector.get("closed_vuln_count"),
"reopened_vuln_count": connector.get("reopened_vuln_count")
}
human_readable.append(curr_dict)
context = {
'Kenna.ConnectorRunsList(val.ID === obj.ID)': context_list
}
human_readable_markdown = tableToMarkdown('Kenna Connector Runs', human_readable, removeNull=True)
else:
human_readable_markdown = "no connectors in get response."
return human_readable_markdown, context, connectors
| def get_connector_runs(client: Client, *_) -> Tuple[str, Dict[str, Any], List[Dict[str, Any]]]:
"""Get Connector Runs command.
Args:
client: Client which connects to api
Returns:
Human Readable
Entry Context
Raw Data
"""
connector_id = str(args.get("connector_id"))
url_suffix = '/connectors/%s/connector_runs' % connector_id
human_readable = []
context: Dict[str, Any] = {}
connectors: List[Dict[str, Any]] = client.http_request(message='GET', suffix=url_suffix)
if connectors:
keys = [
"id", "start_time",
"end_time", "success",
"total_payload_count",
"processed_palyoad_count",
"failed_payload_count",
"processed_assets_count",
"assets_with_tags_reset_count",
"processed_scanner_vuln_count",
"created_scanner_vuln_count",
"closed_scanner_vuln_count",
"autoclosed_scanner_vuln_count",
"reopened_scanner_vuln_count",
"closed_vuln_count",
"autoclosed_vuln_count",
"reopened_vuln_count"
]
context_list = parse_response(connectors, keys, keys)
for connector in connectors:
curr_dict = {
"id": connector.get("id"),
"start_time": connector.get("start_time"),
"end_time": connector.get("end_time"),
"success": connector.get("success"),
"total_payload_count": connector.get("total_payload_count"),
"processed_payload_count": connector.get("total_payload_count"),
"failed_payload_count": connector.get("failed_payload_count"),
"processed_assets_count": connector.get("processed_assets_count"),
"assets_with_tags_reset_count": connector.get("assets_with_tags_reset_count"),
"processed_scanner_vuln_count": connector.get("processed_scanner_vuln_count"),
"updated_scanner_vuln_count": connector.get("updated_scanner_vuln_count"),
"created_scanner_vuln_count": connector.get("created_scanner_vuln_count"),
"closed_scanner_vuln_count": connector.get("closed_scanner_vuln_count"),
"autoclosed_scanner_vuln_count": connector.get("autoclosed_scanner_vuln_count"),
"reopened_scanner_vuln_count": connector.get("reopened_scanner_vuln_count"),
"closed_vuln_count": connector.get("closed_vuln_count"),
"autoclosed_vuln_count": connector.get("closed_vuln_count"),
"reopened_vuln_count": connector.get("reopened_vuln_count")
}
human_readable.append(curr_dict)
context = {
'Kenna.ConnectorRunsList(val.ID === obj.ID)': context_list
}
human_readable_markdown = tableToMarkdown('Kenna Connector Runs', human_readable, removeNull=True)
else:
human_readable_markdown = "no connectors in get response."
return human_readable_markdown, context, connectors
|
31,366 | def is_there_private_packs_to_upload(public_index_json, private_index_path):
""" Checks if there are private packs that should be uploaded.
The check compares the private index with the public one to verify if Content commit hash of each private pack in
those files (private and public index files) are equal. If there is one private pack that has a different
content commit hash, it tells us that this pack was updated and should be uploaded. So, an upload flow should NOT
be skipped.
Args:
public_index_json (dict) : The public index file.
private_index_path : Path to where the private index is located.
Returns:
(bool) True is there is at least one private pack that should be upload.
False otherwise (i.e there are no private packs that should upload)
"""
logging.debug("Checking if there are private packs to upload")
with open(os.path.join(private_index_path, f"{GCPConfig.INDEX_NAME}.json")) as private_index_file:
private_index_json = json.load(private_index_file)
if was_private_pack_updated(private_index_json, public_index_json):
logging.debug(f"There is at least one private pack that was updated, upload should not be skipped")
return True
return False
| def is_there_private_packs_to_upload(public_index_json, private_index_path):
""" Checks if there are private packs that should be uploaded.
The check compares the private index with the public one to verify if Content commit hash of each private pack in
those files (private and public index files) are equal. If there is one private pack that has a different
content commit hash, it tells us that this pack was updated and should be uploaded. So, an upload flow should NOT
be skipped.
Args:
public_index_json (dict) : The public index file.
private_index_path : Path to where the private index.zip is located.
Returns:
(bool) True is there is at least one private pack that should be upload.
False otherwise (i.e there are no private packs that should upload)
"""
logging.debug("Checking if there are private packs to upload")
with open(os.path.join(private_index_path, f"{GCPConfig.INDEX_NAME}.json")) as private_index_file:
private_index_json = json.load(private_index_file)
if was_private_pack_updated(private_index_json, public_index_json):
logging.debug(f"There is at least one private pack that was updated, upload should not be skipped")
return True
return False
|
5,862 | def _dirstats_preprocessing(samples, normalize, axis):
"""
Preprocessing of input for directional stats functions. Performs
input validation and if necesssary normalization. Used by
directionalvar and directionalmean.
Parameters
----------
samples : array
Input array. Must be at least two-dimensional, and the last axis of the
input must correspond with the dimensionality of the vector space.
axis : int
Axis along which the directional mean is computed.
normalize: boolean
If True, normalize the input to ensure that each observation is a
unit vector. It the observations are already unit vectors, consider
setting this to False to avoid unnecessary computation.
"""
samples = np.asarray(samples)
if samples.ndim < 2:
raise ValueError("samples must at least be two-dimensional. "
f"Instead samples has shape: {samples.shape!r}")
samples = np.moveaxis(samples, axis, 0)
if normalize:
samples = samples/np.linalg.norm(samples, axis=-1, keepdims=True)
return samples
| def _dirstats_preprocessing(samples, normalize, axis):
"""
Preprocessing of input for directional stats functions. Performs
input validation and if necesssary normalization. Used by
directionalvar and directionalmean.
Parameters
----------
samples : array
Input array. Must be at least two-dimensional, and the last axis of the
input must correspond with the dimensionality of the vector space.
axis : int
Axis along which the directional statistic is computed.
normalize: boolean
If True, normalize the input to ensure that each observation is a
unit vector. It the observations are already unit vectors, consider
setting this to False to avoid unnecessary computation.
"""
samples = np.asarray(samples)
if samples.ndim < 2:
raise ValueError("samples must at least be two-dimensional. "
f"Instead samples has shape: {samples.shape!r}")
samples = np.moveaxis(samples, axis, 0)
if normalize:
samples = samples/np.linalg.norm(samples, axis=-1, keepdims=True)
return samples
|
42,827 | def backup_packages(backup_path, dry_run: bool = False, skip=False):
"""
Creates `packages` directory and places install list text files there.
"""
def run_cmd_if_no_dry_run(command, dest, dry_run) -> int:
if dry_run:
print_dry_run_copy_info(f"$ {command}", dest)
# Return -1 for any processes depending on chained successful commands (npm)
return -1
else:
return run_cmd_write_stdout(command, dest)
print_section_header("PACKAGES", Fore.BLUE)
if not dry_run:
overwrite_dir_prompt_if_needed(backup_path, skip)
for mgr in ["gem"]:
# deal with package managers that have spaces in them.
print_pkg_mgr_backup(mgr)
command = f"{mgr} list"
dest = f"{backup_path}/{mgr.replace(' ', '-')}_list.txt"
run_cmd_if_no_dry_run(command, dest, dry_run)
# brew
print_pkg_mgr_backup("brew")
command = f"brew bundle dump --file {backup_path}/brew_list.txt"
dest = f"{backup_path}/brew_list.txt"
if not dry_run:
ret = run_cmd(command)
if not ret:
print_yellow("Package manager not present.")
# cargo
print_pkg_mgr_backup("cargo")
command = "ls {}".format(home_prefix(".cargo/bin/"))
dest = f"{backup_path}/cargo_list.txt"
run_cmd_if_no_dry_run(command, dest, dry_run)
# pip
print_pkg_mgr_backup("pip")
command = "pip list --format=freeze"
dest = f"{backup_path}/pip_list.txt"
run_cmd_if_no_dry_run(command, dest, dry_run)
# pip3
print_pkg_mgr_backup("pip3")
command = "pip3 list --format=freeze"
dest = f"{backup_path}/pip3_list.txt"
run_cmd_if_no_dry_run(command, dest, dry_run)
# npm
print_pkg_mgr_backup("npm")
command = "npm ls --global --parseable=true --depth=0"
temp_file_path = f"{backup_path}/npm_temp_list.txt"
# If command is successful, go to the next parsing step.
npm_backup_cmd_success = run_cmd_if_no_dry_run(command, temp_file_path, dry_run) == 0
if npm_backup_cmd_success:
npm_dest_file = f"{backup_path}/npm_list.txt"
# Parse npm output
with open(temp_file_path, mode="r+") as temp_file:
# Skip first line of file
temp_file.seek(1)
with open(npm_dest_file, mode="w+") as dest:
for line in temp_file:
dest.write(line.split("/")[-1])
os.remove(temp_file_path)
# atom package manager
print_pkg_mgr_backup("Atom")
command = "apm list --installed --bare"
dest = f"{backup_path}/apm_list.txt"
run_cmd_if_no_dry_run(command, dest, dry_run)
# vscode extensions
print_pkg_mgr_backup("VSCode")
command = "code --list-extensions --show-versions"
dest = f"{backup_path}/vscode_list.txt"
run_cmd_if_no_dry_run(command, dest, dry_run)
# macports
print_pkg_mgr_backup("macports")
command = "port installed requested"
dest = f"{backup_path}/macports_list.txt"
run_cmd_if_no_dry_run(command, dest, dry_run)
# system installs
print_pkg_mgr_backup("System Applications")
applications_path = get_applications_dir()
command = "ls {}".format(applications_path)
dest = f"{backup_path}/system_apps_list.txt"
run_cmd_if_no_dry_run(command, dest, dry_run)
| def backup_packages(backup_path, dry_run: bool = False, skip=False):
"""
Creates `packages` directory and places install list text files there.
"""
def run_cmd_if_no_dry_run(command, dest, dry_run) -> int:
if dry_run:
print_dry_run_copy_info(f"$ {command}", dest)
# Return -1 for any processes depending on chained successful commands (npm)
return -1
else:
return run_cmd_write_stdout(command, dest)
print_section_header("PACKAGES", Fore.BLUE)
if not dry_run:
overwrite_dir_prompt_if_needed(backup_path, skip)
for mgr in ["gem"]:
# deal with package managers that have spaces in them.
print_pkg_mgr_backup(mgr)
command = f"{mgr} list"
dest = f"{backup_path}/{mgr.replace(' ', '-')}_list.txt"
run_cmd_if_no_dry_run(command, dest, dry_run)
# brew
print_pkg_mgr_backup("brew")
command = f"brew bundle dump --file {backup_path}/brew_list.txt"
dest = f"{backup_path}/brew_list.txt"
if not dry_run:
ret = run_cmd(command)
if not ret:
print_yellow("brew package manager not found.")
# cargo
print_pkg_mgr_backup("cargo")
command = "ls {}".format(home_prefix(".cargo/bin/"))
dest = f"{backup_path}/cargo_list.txt"
run_cmd_if_no_dry_run(command, dest, dry_run)
# pip
print_pkg_mgr_backup("pip")
command = "pip list --format=freeze"
dest = f"{backup_path}/pip_list.txt"
run_cmd_if_no_dry_run(command, dest, dry_run)
# pip3
print_pkg_mgr_backup("pip3")
command = "pip3 list --format=freeze"
dest = f"{backup_path}/pip3_list.txt"
run_cmd_if_no_dry_run(command, dest, dry_run)
# npm
print_pkg_mgr_backup("npm")
command = "npm ls --global --parseable=true --depth=0"
temp_file_path = f"{backup_path}/npm_temp_list.txt"
# If command is successful, go to the next parsing step.
npm_backup_cmd_success = run_cmd_if_no_dry_run(command, temp_file_path, dry_run) == 0
if npm_backup_cmd_success:
npm_dest_file = f"{backup_path}/npm_list.txt"
# Parse npm output
with open(temp_file_path, mode="r+") as temp_file:
# Skip first line of file
temp_file.seek(1)
with open(npm_dest_file, mode="w+") as dest:
for line in temp_file:
dest.write(line.split("/")[-1])
os.remove(temp_file_path)
# atom package manager
print_pkg_mgr_backup("Atom")
command = "apm list --installed --bare"
dest = f"{backup_path}/apm_list.txt"
run_cmd_if_no_dry_run(command, dest, dry_run)
# vscode extensions
print_pkg_mgr_backup("VSCode")
command = "code --list-extensions --show-versions"
dest = f"{backup_path}/vscode_list.txt"
run_cmd_if_no_dry_run(command, dest, dry_run)
# macports
print_pkg_mgr_backup("macports")
command = "port installed requested"
dest = f"{backup_path}/macports_list.txt"
run_cmd_if_no_dry_run(command, dest, dry_run)
# system installs
print_pkg_mgr_backup("System Applications")
applications_path = get_applications_dir()
command = "ls {}".format(applications_path)
dest = f"{backup_path}/system_apps_list.txt"
run_cmd_if_no_dry_run(command, dest, dry_run)
|
38,427 | def register_keys(web3: Web3, keys: Optional[list]):
def not_none(x):
return x if x is not None else []
for key in not_none(keys):
register_key(web3, key)
| def register_keys(web3: Web3, keys: Optional[list]):
def not_none(x):
return x if x is not None else []
for key in keys or []:
register_key(web3, key)
|
39,301 | def vtk_points(points, deep=True):
"""Convert numpy or list of points to a vtkPoints object."""
if not isinstance(points, np.ndarray):
points = np.array(points)
# verify is numeric
if not np.issubdtype(points.dtype, np.number):
raise TypeError('Points must be a numeric type')
# check dimensionality
if points.ndim == 1:
points = points.reshape((-1, 3))
elif points.ndim > 2:
raise ValueError('Dimension of ``points`` should be 1 or 2, not '
f'{points.ndim}')
# verify shape
if points.shape[1] != 3:
raise ValueError('Points array must contain three values per point. \n'
f'Shape is {points.shape} and should be (X, 3)')
# points must be contiguous
if not points.flags['C_CONTIGUOUS']:
points = np.ascontiguousarray(points)
vtkpts = _vtk.vtkPoints()
vtkpts.SetData(_vtk.numpy_to_vtk(points, deep=deep))
return vtkpts
| def vtk_points(points, deep=True):
"""Convert numpy array or array-like to a vtkPoints object."""
if not isinstance(points, np.ndarray):
points = np.array(points)
# verify is numeric
if not np.issubdtype(points.dtype, np.number):
raise TypeError('Points must be a numeric type')
# check dimensionality
if points.ndim == 1:
points = points.reshape((-1, 3))
elif points.ndim > 2:
raise ValueError('Dimension of ``points`` should be 1 or 2, not '
f'{points.ndim}')
# verify shape
if points.shape[1] != 3:
raise ValueError('Points array must contain three values per point. \n'
f'Shape is {points.shape} and should be (X, 3)')
# points must be contiguous
if not points.flags['C_CONTIGUOUS']:
points = np.ascontiguousarray(points)
vtkpts = _vtk.vtkPoints()
vtkpts.SetData(_vtk.numpy_to_vtk(points, deep=deep))
return vtkpts
|
2,233 | def fastica(
X,
n_components=None,
*,
algorithm="parallel",
whiten=None,
fun="logcosh",
fun_args=None,
max_iter=200,
tol=1e-04,
w_init=None,
random_state=None,
return_X_mean=False,
compute_sources=True,
return_n_iter=False,
):
"""Perform Fast Independent Component Analysis.
The implementation is based on [1]_.
Read more in the :ref:`User Guide <ICA>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
n_components : int, default=None
Number of components to extract. If None no dimension reduction
is performed.
algorithm : {'parallel', 'deflation'}, default='parallel'
Apply a parallel or deflational FASTICA algorithm.
whiten : str or bool, default=None
Specify the whitening strategy to use.
If 'arbitrary-variance', a whitening with variance arbitrary is used.
If 'unit-variance', the whitening variance is adjusted to be unitary.
If False, the data is already considered to be whitened, and no
whitening is performed.
If None (default), 'arbitrary-variance' is used.
.. deprecated:: 1.1
From version 1.3 whiten='unit-variance' will be used by default.
`whiten=True` is deprecated from 1.1 and will be removed in 1.3.
Use `whiten=arbitrary-variance` instead.
fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. The derivative should be averaged along its last dimension.
Example:
def my_g(x):
return x ** 3, np.mean(3 * x ** 2, axis=-1)
fun_args : dict, default=None
Arguments to send to the functional form.
If empty or None and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}
max_iter : int, default=200
Maximum number of iterations to perform.
tol : float, default=1e-04
A positive scalar giving the tolerance at which the
un-mixing matrix is considered to have converged.
w_init : ndarray of shape (n_components, n_components), default=None
Initial un-mixing array of dimension (n.comp,n.comp).
If None (default) then an array of normal r.v.'s is used.
random_state : int, RandomState instance or None, default=None
Used to initialize ``w_init`` when not specified, with a
normal distribution. Pass an int, for reproducible results
across multiple function calls.
See :term:`Glossary <random_state>`.
return_X_mean : bool, default=False
If True, X_mean is returned too.
compute_sources : bool, default=True
If False, sources are not computed, but only the rotation matrix.
This can save memory when working with big data. Defaults to True.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
Returns
-------
K : ndarray of shape (n_components, n_features) or None
If whiten is 'True', K is the pre-whitening matrix that projects data
onto the first n_components principal components. If whiten is 'False',
K is 'None'.
W : ndarray of shape (n_components, n_components)
The square matrix that unmixes the data after whitening.
The mixing matrix is the pseudo-inverse of matrix ``W K``
if K is not None, else it is the inverse of W.
S : ndarray of shape (n_samples, n_components) or None
Estimated source matrix
X_mean : ndarray of shape (n_features,)
The mean over features. Returned only if return_X_mean is True.
n_iter : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge. This is
returned only when return_n_iter is set to `True`.
Notes
-----
The data matrix X is considered to be a linear combination of
non-Gaussian (independent) components i.e. X = AS where columns of S
contain the independent components and A is a linear mixing
matrix. In short ICA attempts to `un-mix' the data by estimating an
un-mixing matrix W where ``S = W K X.``
While FastICA was proposed to estimate as many sources
as features, it is possible to estimate less by setting
n_components < n_features. It this case K is not a square matrix
and the estimated A is the pseudo-inverse of ``W K``.
This implementation was originally made for data of shape
[n_features, n_samples]. Now the input is transposed
before the algorithm is applied. This makes it slightly
faster for Fortran-ordered input.
References
----------
.. [1] A. Hyvarinen and E. Oja, "Fast Independent Component Analysis",
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430.
"""
est = FastICA(
n_components=n_components,
algorithm=algorithm,
whiten=whiten,
fun=fun,
fun_args=fun_args,
max_iter=max_iter,
tol=tol,
w_init=w_init,
random_state=random_state,
)
S = est._fit(X, compute_sources=compute_sources)
if est.whiten_ in ["unitary-variance", "arbitrary-variance"]:
K = est.whitening_
X_mean = est.mean_
else:
K = None
X_mean = None
returned_values = [K, est._unmixing, S]
if return_X_mean:
returned_values.append(X_mean)
if return_n_iter:
returned_values.append(est.n_iter_)
return returned_values
| def fastica(
X,
n_components=None,
*,
algorithm="parallel",
whiten="warn",
fun="logcosh",
fun_args=None,
max_iter=200,
tol=1e-04,
w_init=None,
random_state=None,
return_X_mean=False,
compute_sources=True,
return_n_iter=False,
):
"""Perform Fast Independent Component Analysis.
The implementation is based on [1]_.
Read more in the :ref:`User Guide <ICA>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
n_components : int, default=None
Number of components to extract. If None no dimension reduction
is performed.
algorithm : {'parallel', 'deflation'}, default='parallel'
Apply a parallel or deflational FASTICA algorithm.
whiten : str or bool, default=None
Specify the whitening strategy to use.
If 'arbitrary-variance', a whitening with variance arbitrary is used.
If 'unit-variance', the whitening variance is adjusted to be unitary.
If False, the data is already considered to be whitened, and no
whitening is performed.
If None (default), 'arbitrary-variance' is used.
.. deprecated:: 1.1
From version 1.3 whiten='unit-variance' will be used by default.
`whiten=True` is deprecated from 1.1 and will be removed in 1.3.
Use `whiten=arbitrary-variance` instead.
fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. The derivative should be averaged along its last dimension.
Example:
def my_g(x):
return x ** 3, np.mean(3 * x ** 2, axis=-1)
fun_args : dict, default=None
Arguments to send to the functional form.
If empty or None and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}
max_iter : int, default=200
Maximum number of iterations to perform.
tol : float, default=1e-04
A positive scalar giving the tolerance at which the
un-mixing matrix is considered to have converged.
w_init : ndarray of shape (n_components, n_components), default=None
Initial un-mixing array of dimension (n.comp,n.comp).
If None (default) then an array of normal r.v.'s is used.
random_state : int, RandomState instance or None, default=None
Used to initialize ``w_init`` when not specified, with a
normal distribution. Pass an int, for reproducible results
across multiple function calls.
See :term:`Glossary <random_state>`.
return_X_mean : bool, default=False
If True, X_mean is returned too.
compute_sources : bool, default=True
If False, sources are not computed, but only the rotation matrix.
This can save memory when working with big data. Defaults to True.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
Returns
-------
K : ndarray of shape (n_components, n_features) or None
If whiten is 'True', K is the pre-whitening matrix that projects data
onto the first n_components principal components. If whiten is 'False',
K is 'None'.
W : ndarray of shape (n_components, n_components)
The square matrix that unmixes the data after whitening.
The mixing matrix is the pseudo-inverse of matrix ``W K``
if K is not None, else it is the inverse of W.
S : ndarray of shape (n_samples, n_components) or None
Estimated source matrix
X_mean : ndarray of shape (n_features,)
The mean over features. Returned only if return_X_mean is True.
n_iter : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge. This is
returned only when return_n_iter is set to `True`.
Notes
-----
The data matrix X is considered to be a linear combination of
non-Gaussian (independent) components i.e. X = AS where columns of S
contain the independent components and A is a linear mixing
matrix. In short ICA attempts to `un-mix' the data by estimating an
un-mixing matrix W where ``S = W K X.``
While FastICA was proposed to estimate as many sources
as features, it is possible to estimate less by setting
n_components < n_features. It this case K is not a square matrix
and the estimated A is the pseudo-inverse of ``W K``.
This implementation was originally made for data of shape
[n_features, n_samples]. Now the input is transposed
before the algorithm is applied. This makes it slightly
faster for Fortran-ordered input.
References
----------
.. [1] A. Hyvarinen and E. Oja, "Fast Independent Component Analysis",
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430.
"""
est = FastICA(
n_components=n_components,
algorithm=algorithm,
whiten=whiten,
fun=fun,
fun_args=fun_args,
max_iter=max_iter,
tol=tol,
w_init=w_init,
random_state=random_state,
)
S = est._fit(X, compute_sources=compute_sources)
if est.whiten_ in ["unitary-variance", "arbitrary-variance"]:
K = est.whitening_
X_mean = est.mean_
else:
K = None
X_mean = None
returned_values = [K, est._unmixing, S]
if return_X_mean:
returned_values.append(X_mean)
if return_n_iter:
returned_values.append(est.n_iter_)
return returned_values
|
27,900 | def copy(x, dst):
"""Copies the input variable onto the specified device.
This function copies the array of input variable onto the device specified
by ``dst``. When ``dst == -1``, it copies the array onto the host memory.
This function supports copies from host to host, from host to device,
from device to device and from device to host.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Variable to be copied.
dst: Target device specifier.
Returns:
~chainer.Variable: Output variable.
.. admonition:: Example
>>> import chainer.backends.cuda as cuda
>>> x_arr = np.random.uniform(-1, 1, (5, 10))
>>> x = chainer.Variable(x_arr)
>>> x.device
'@numpy'
>>> y = F.copy(x, '@cupy:0') # from CPU (NumPy) to GPU 0 (CuPy)
>>> y.device
'@cupy:0'
.. note::
Copies between non-ChainerX devices and ChainerX devices are not
supported.
"""
in_device = backend.get_device_from_array(
x.array if isinstance(x, chainer.Variable) else x)
out_device = chainer.get_device(dst)
is_chainerx = in_device.xp is chainerx
if is_chainerx != (out_device.xp is chainerx):
raise RuntimeError(
'F.copy does not support a copy between a non-ChainerX device and '
'a ChainerX device.\n'
'From: {}\n'
'To: {}'.format(in_device, out_device))
y, = Copy(in_device, out_device).apply((x,))
return y
| def copy(x, dst):
"""Copies the input variable onto the specified device.
This function copies the array of input variable onto the device specified
by ``dst``. When ``dst == -1``, it copies the array onto the host memory.
This function supports copies from host to host, from host to device,
from device to device and from device to host.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Variable to be copied.
dst: Target device specifier.
Returns:
~chainer.Variable: Output variable.
.. admonition:: Example
>>> import chainer.backends.cuda as cuda
>>> x_arr = np.random.uniform(-1, 1, (5, 10))
>>> x = chainer.Variable(x_arr)
>>> x.device
'@numpy'
>>> y = F.copy(x, '@cupy:0') # from CPU (NumPy) to GPU 0 (CuPy)
>>> y.device
'@cupy:0'
.. note::
Copies between non-ChainerX devices and ChainerX devices are not
supported.
"""
in_device = backend.get_device_from_array(
x.array if isinstance(x, chainer.Variable) else x)
out_device = chainer.get_device(dst)
is_chainerx = in_device.xp is chainerx
if is_chainerx != (out_device.xp is chainerx):
raise RuntimeError(
'F.copy does not support copies between non-ChainerX devices and '
'a ChainerX device.\n'
'From: {}\n'
'To: {}'.format(in_device, out_device))
y, = Copy(in_device, out_device).apply((x,))
return y
|
35,864 | def fetch_nearest_dataset_names(X,y=None, **kwargs):
"""
X: numpy array
an n_samples x n_features array of independent variables
y: numpy array or None (default: None)
a n_samples array of dependent variables
"""
df = pd.DataFrame({**{'x_'+str(i):x for i,x in enumerate(X.transpose)}
**{'target':y}})
return fetch_nearest_dataset_names(df, **kwargs)
| def nearest_datasets(X, y=None, **kwargs):
"""
X: numpy array
an n_samples x n_features array of independent variables
y: numpy array or None (default: None)
a n_samples array of dependent variables
"""
df = pd.DataFrame({**{'x_'+str(i):x for i,x in enumerate(X.transpose)}
**{'target':y}})
return fetch_nearest_dataset_names(df, **kwargs)
|
57,213 | def regenerate_missing_stats_for_exploration(
exp_id: str
) -> Tuple[list[str], list[str], int, int]:
"""Regenerates missing ExplorationStats models and entries for all
corresponding states in an exploration.
Args:
exp_id: str. The ID of the exp.
Returns:
4-tuple(missing_exp_stats, missing_state_stats, num_valid_exp_stats,
num_valid_state_stats). where:
missing_exp_stats: list(str). List of missing exploration stats.
missing_state_stats: list(str). List of missing state stats.
num_valid_exp_stats: int. Number of valid exploration stats.
num_valid_state_stats: int. Number of valid state stats.
Raises:
Exception. Fetching exploration versions failed.
Exception. No ExplorationStatsModels found.
Exception. Exploration snapshots contain invalid commit_cmds.
Exception. Exploration does not have a given state.
"""
exploration = exp_fetchers.get_exploration_by_id(exp_id)
num_valid_state_stats = 0
num_valid_exp_stats = 0
exp_versions = list(range(1, exploration.version + 1))
missing_exp_stats_indices = []
exp_stats_list = stats_services.get_multiple_exploration_stats_by_version(
exp_id, exp_versions)
exp_list = (
exp_fetchers
.get_multiple_versioned_exp_interaction_ids_mapping_by_version(
exp_id, exp_versions))
if all(exp_stats is None for exp_stats in exp_stats_list):
for index, version in enumerate(exp_versions):
exp_stats_for_version = (
stats_services.get_stats_for_new_exploration(
exp_id, version,
list(exp_list[index].state_interaction_ids_dict.keys())))
stats_services.create_stats_model(exp_stats_for_version)
raise Exception('No ExplorationStatsModels found')
snapshots = exp_models.ExplorationModel.get_snapshots_metadata(
exp_id, exp_versions)
change_lists = []
for snapshot in snapshots:
try:
change_lists.append([
exp_domain.ExplorationChange(commit_cmd)
for commit_cmd in snapshot['commit_cmds']
])
except utils.ValidationError as e:
raise Exception(
'Exploration(id=%r) snapshots contain invalid commit_cmds: %r'
% (exp_id, snapshot['commit_cmds'])) from e
missing_exp_stats = []
missing_state_stats = []
zipped_items = list(
zip(exp_stats_list, exp_list, change_lists))
revert_commit_cmd = exp_models.ExplorationModel.CMD_REVERT_COMMIT
for i, (exp_stats, exp, change_list) in enumerate(zipped_items):
revert_to_version = next(
(
int(change.version_number) for change in change_list
if change.cmd == revert_commit_cmd
), None)
new_exp_version = None
if revert_to_version is not None:
exp_versions_diff = None
# We subtract 2 from revert_to_version to get the index of the
# previous exploration version because exp_stats_list and
# prev_exp start with version 1 in the 0th index.
prev_exp_version_index = revert_to_version - 2
prev_exp_stats = exp_stats_list[prev_exp_version_index]
prev_exp = exp_list[prev_exp_version_index]
new_exp_version = revert_to_version
else:
exp_versions_diff = exp_domain.ExplorationVersionsDiff(
change_list)
# We subtract 2 from exp.version to get the index of the
# previous exploration version because exp_stats_list and
# prev_exp start with version 1 in the 0th index.
prev_exp_version_index = exp.version - 2
prev_exp_stats = exp_stats_list[prev_exp_version_index]
prev_exp = exp_list[prev_exp_version_index]
new_exp_version = exp.version
# Fill missing Exploration-level stats.
if exp_stats:
num_valid_exp_stats += 1
elif exp.version == 1:
new_exploration_stats = (
stats_services.get_stats_for_new_exploration(
exp_id, exp.version,
list(exp.state_interaction_ids_dict.keys())))
stats_services.create_stats_model(new_exploration_stats)
missing_exp_stats_indices.append(i)
missing_exp_stats.append(
'ExplorationStats(exp_id=%r, exp_version=%r)'
% (exp_id, exp.version))
num_valid_state_stats += len(
new_exploration_stats.state_stats_mapping)
continue
else:
exp_stats = prev_exp_stats and prev_exp_stats.clone()
if exp_stats is None:
new_exploration_stats = (
stats_services.get_stats_for_new_exploration(
exp_id, exp.version,
list(exp.state_interaction_ids_dict.keys())))
stats_services.create_stats_model(new_exploration_stats)
missing_exp_stats_indices.append(i)
missing_exp_stats.append(
'ExplorationStats(exp_id=%r, exp_version=%r)'
% (exp_id, exp.version))
num_valid_state_stats += len(
new_exploration_stats.state_stats_mapping)
continue
if exp_versions_diff:
exp_stats = stats_services.advance_version_of_exp_stats(
new_exp_version, exp_versions_diff, exp_stats, None,
None)
else:
exp_stats.exp_version = exp.version
stats_services.create_stats_model(exp_stats)
missing_exp_stats_indices.append(i)
missing_exp_stats.append(
'ExplorationStats(exp_id=%r, exp_version=%r)'
% (exp_id, exp.version))
# Fill missing State-level stats.
state_stats_mapping = exp_stats.state_stats_mapping
for state_name in exp.state_interaction_ids_dict.keys():
if state_name in state_stats_mapping:
num_valid_state_stats += 1
continue
if exp_versions_diff:
prev_state_name = (
exp_versions_diff.new_to_old_state_names.get(
state_name, state_name))
else:
prev_state_name = state_name
try:
prev_interaction_id = (
prev_exp.state_interaction_ids_dict[prev_state_name]
if prev_state_name in prev_exp.state_interaction_ids_dict
else None)
current_interaction_id = (
exp.state_interaction_ids_dict[state_name])
exp_stats_list_item = exp_stats_list[i]
assert exp_stats_list_item is not None
# In early schema versions of ExplorationModel, the END
# card was a persistant, implicit state present in every
# exploration. The snapshots of these old explorations have
# since been migrated but they do not have corresponding state
# stats models for the END state. So for such versions, a
# default state stats model should be created.
if current_interaction_id != prev_interaction_id or (
current_interaction_id == 'EndExploration' and
prev_state_name == 'END'):
exp_stats_list_item.state_stats_mapping[state_name] = (
stats_domain.StateStats.create_default()
)
else:
assert prev_exp_stats is not None
exp_stats_list_item.state_stats_mapping[state_name] = (
prev_exp_stats.state_stats_mapping[
prev_state_name].clone()
)
missing_state_stats.append(
'StateStats(exp_id=%r, exp_version=%r, '
'state_name=%r)' % (exp_id, exp.version, state_name))
except Exception as e:
assert exp_versions_diff is not None
raise Exception(
'Exploration(id=%r, exp_version=%r) has no '
'State(name=%r): %r' % (
exp_id, exp_stats.exp_version, prev_state_name, {
'added_state_names': (
exp_versions_diff.added_state_names),
'deleted_state_names': (
exp_versions_diff.deleted_state_names),
'new_to_old_state_names': (
exp_versions_diff.new_to_old_state_names),
'old_to_new_state_names': (
exp_versions_diff.old_to_new_state_names),
'prev_exp.states': (
prev_exp.state_interaction_ids_dict.keys()),
'prev_exp_stats': prev_exp_stats
})) from e
for index, exp_stats in enumerate(exp_stats_list):
if index not in missing_exp_stats_indices:
assert exp_stats is not None
stats_services.save_stats_model(exp_stats)
return (
missing_exp_stats, missing_state_stats,
num_valid_exp_stats, num_valid_state_stats
)
| def regenerate_missing_stats_for_exploration(
exp_id: str
) -> Tuple[list[str], list[str], int, int]:
"""Regenerates missing ExplorationStats models and entries for all
corresponding states in an exploration.
Args:
exp_id: str. The ID of the exp.
Returns:
4-tuple(missing_exp_stats, missing_state_stats, num_valid_exp_stats,
num_valid_state_stats). where:
missing_exp_stats: list(str). List of missing exploration stats.
missing_state_stats: list(str). List of missing state stats.
num_valid_exp_stats: int. Number of valid exploration stats.
num_valid_state_stats: int. Number of valid state stats.
Raises:
Exception. Fetching exploration versions failed.
Exception. No ExplorationStatsModels found.
Exception. Exploration snapshots contain invalid commit_cmds.
Exception. Exploration does not have a given state.
"""
exploration = exp_fetchers.get_exploration_by_id(exp_id)
num_valid_state_stats = 0
num_valid_exp_stats = 0
exp_versions = list(range(1, exploration.version + 1))
missing_exp_stats_indices = []
exp_stats_list = stats_services.get_multiple_exploration_stats_by_version(
exp_id, exp_versions)
exp_list = (
exp_fetchers
.get_multiple_versioned_exp_interaction_ids_mapping_by_version(
exp_id, exp_versions))
if all(exp_stats is None for exp_stats in exp_stats_list):
for index, version in enumerate(exp_versions):
exp_stats_for_version = (
stats_services.get_stats_for_new_exploration(
exp_id, version,
list(exp_list[index].state_interaction_ids_dict.keys())))
stats_services.create_stats_model(exp_stats_for_version)
raise Exception('No ExplorationStatsModels found')
snapshots = exp_models.ExplorationModel.get_snapshots_metadata(
exp_id, exp_versions)
change_lists = []
for snapshot in snapshots:
try:
change_lists.append([
exp_domain.ExplorationChange(commit_cmd)
for commit_cmd in snapshot['commit_cmds']
])
except utils.ValidationError as e:
raise Exception(
'Exploration(id=%r) snapshots contain invalid commit_cmds: %r'
% (exp_id, snapshot['commit_cmds'])) from e
missing_exp_stats = []
missing_state_stats = []
zipped_items = list(
zip(exp_stats_list, exp_list, change_lists))
revert_commit_cmd = exp_models.ExplorationModel.CMD_REVERT_COMMIT
for i, (exp_stats, exp, change_list) in enumerate(zipped_items):
revert_to_version = next(
(
int(change.version_number) for change in change_list
if change.cmd == revert_commit_cmd
), None)
new_exp_version = None
if revert_to_version is not None:
exp_versions_diff = None
# We subtract 2 from revert_to_version to get the index of the
# previous exploration version because exp_stats_list and
# prev_exp start with version 1 in the 0th index.
prev_exp_version_index = revert_to_version - 2
prev_exp_stats = exp_stats_list[prev_exp_version_index]
prev_exp = exp_list[prev_exp_version_index]
new_exp_version = revert_to_version
else:
exp_versions_diff = exp_domain.ExplorationVersionsDiff(
change_list)
# We subtract 2 from exp.version to get the index of the
# previous exploration version because exp_stats_list and
# prev_exp start with version 1 in the 0th index.
prev_exp_version_index = exp.version - 2
prev_exp_stats = exp_stats_list[prev_exp_version_index]
prev_exp = exp_list[prev_exp_version_index]
new_exp_version = exp.version
# Fill missing Exploration-level stats.
if exp_stats:
num_valid_exp_stats += 1
elif exp.version == 1:
new_exploration_stats = (
stats_services.get_stats_for_new_exploration(
exp_id, exp.version,
list(exp.state_interaction_ids_dict.keys())))
stats_services.create_stats_model(new_exploration_stats)
missing_exp_stats_indices.append(i)
missing_exp_stats.append(
'ExplorationStats(exp_id=%r, exp_version=%r)'
% (exp_id, exp.version))
num_valid_state_stats += len(
new_exploration_stats.state_stats_mapping)
continue
else:
exp_stats = prev_exp_stats and prev_exp_stats.clone()
if exp_stats is None:
new_exploration_stats = (
stats_services.get_stats_for_new_exploration(
exp_id, exp.version,
list(exp.state_interaction_ids_dict.keys())))
stats_services.create_stats_model(new_exploration_stats)
missing_exp_stats_indices.append(i)
missing_exp_stats.append(
'ExplorationStats(exp_id=%r, exp_version=%r)'
% (exp_id, exp.version))
num_valid_state_stats += len(
new_exploration_stats.state_stats_mapping)
continue
if exp_versions_diff:
exp_stats = stats_services.advance_version_of_exp_stats(
new_exp_version, exp_versions_diff, exp_stats, None,
None)
else:
exp_stats.exp_version = exp.version
stats_services.create_stats_model(exp_stats)
missing_exp_stats_indices.append(i)
missing_exp_stats.append(
'ExplorationStats(exp_id=%r, exp_version=%r)'
% (exp_id, exp.version))
# Fill missing State-level stats.
state_stats_mapping = exp_stats.state_stats_mapping
for state_name in exp.state_interaction_ids_dict.keys():
if state_name in state_stats_mapping:
num_valid_state_stats += 1
continue
if exp_versions_diff:
prev_state_name = (
exp_versions_diff.new_to_old_state_names.get(
state_name, state_name))
else:
prev_state_name = state_name
try:
prev_interaction_id = (
prev_exp.state_interaction_ids_dict[prev_state_name]
if prev_state_name in prev_exp.state_interaction_ids_dict
else None)
current_interaction_id = (
exp.state_interaction_ids_dict[state_name])
exp_stats_list_item = exp_stats_list[i]
assert exp_stats_list_item is not None
# In early schema versions of ExplorationModel, the END
# card was a persistant, implicit state present in every
# exploration. The snapshots of these old explorations have
# since been migrated but they do not have corresponding state
# stats models for the END state. So for such versions, a
# default state stats model should be created.
if (
current_interaction_id != prev_interaction_id or
(
current_interaction_id == 'EndExploration' and
prev_state_name == 'END'
)
):
exp_stats_list_item.state_stats_mapping[state_name] = (
stats_domain.StateStats.create_default()
)
else:
assert prev_exp_stats is not None
exp_stats_list_item.state_stats_mapping[state_name] = (
prev_exp_stats.state_stats_mapping[
prev_state_name].clone()
)
missing_state_stats.append(
'StateStats(exp_id=%r, exp_version=%r, '
'state_name=%r)' % (exp_id, exp.version, state_name))
except Exception as e:
assert exp_versions_diff is not None
raise Exception(
'Exploration(id=%r, exp_version=%r) has no '
'State(name=%r): %r' % (
exp_id, exp_stats.exp_version, prev_state_name, {
'added_state_names': (
exp_versions_diff.added_state_names),
'deleted_state_names': (
exp_versions_diff.deleted_state_names),
'new_to_old_state_names': (
exp_versions_diff.new_to_old_state_names),
'old_to_new_state_names': (
exp_versions_diff.old_to_new_state_names),
'prev_exp.states': (
prev_exp.state_interaction_ids_dict.keys()),
'prev_exp_stats': prev_exp_stats
})) from e
for index, exp_stats in enumerate(exp_stats_list):
if index not in missing_exp_stats_indices:
assert exp_stats is not None
stats_services.save_stats_model(exp_stats)
return (
missing_exp_stats, missing_state_stats,
num_valid_exp_stats, num_valid_state_stats
)
|
44,200 | def factorize(two, tol):
r"""Return double-factorized form of a two-electron tensor.
The second quantized electronic Hamiltonian is constructed in terms of fermionic creation,
:math:`a^{\dagger}` , and annihilation, :math:`a`, operators as
[`arXiv:1902.02134 <https://arxiv.org/pdf/1902.02134.pdf>`_]
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} h_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs}
h_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \beta}^{\dagger} a_{r, \beta} a_{s, \alpha},
where :math:`h_{pq}` and :math:`h_{pqrs}` are the one- and two-electron integrals computed as
.. math::
h_{pq} = \int \phi_p(r)^* \left ( -\frac{\nabla_r^2}{2} - \sum_i \frac{Z_i}{|r-R_i|} \right)
\phi_q(r) dr,
and
.. math::
h_{pqrs} = \int \frac{\phi_p(r_1)^* \phi_q(r_2)^* \phi_r(r_2) \phi_s(r_1)}{|r_1 - r_2|}
dr_1 dr_2.
Rearranging the integrals in the chemist notation, [11|22], gives
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} T_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs}
V_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \alpha} a_{r, \beta}^{\dagger} a_{s, \beta}.
with
.. math::
T_{pq} = h_{ij} - \frac{1}{2} \sum_s h_{pssq}.
and :math:`V` is the two-electron tensor in chemist notation.
The objective of the factorization is to find a set of symmetric matrices, :math:`L`, such that
.. math::
V_{ijkl} = \sum_r L_{ij}^{(r)} L_{kl}^{(r) T}.
with the rank :math:`r \in \mathcal{O}(n)`. The matrices :math:`L` are further diagonalized
and truncated in a second level of factorization.
The algorithm has the following steps
[`arXiv:1902.02134 <https://arxiv.org/pdf/1902.02134.pdf>`_].
1. Matricize the :math:`n \times n \times n \times n` two-electron tensor to a \
:math:`n^2 \times n^2` matrix where n is the number of orbitals.
2. Diagonalize the resulting matrix and keep the :math:`r` eigenvectors that have \
corresponding eigenvalues larger than a threshold.
3. Reshape the selected eigenvectors to :math:`n \times n` matrices.
4. Diagonalize the :math:`n \times n` matrices and keep those that the norm of their \
eigenvalues is larger than a threshold.
Args:
two (array[array[float]]): the two-electron repulsion tensor in the molecular orbital basis
arranged in chemist notation [11|22]
tol (float): cutoff value for discarding the negligible factors
Returns:
tuple(array[float]): array of symmetric matrices (factors) approximating the two-electron
tensor, eigenvalues of the generated factors, eigenvectors of the generated factors
**Example**
>>> symbols = ['H', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [0.74, 0.0, 0.0]], requires_grad = False) / 0.5291772
>>> mol = qml.qchem.Molecule(symbols, geometry)
>>> core, one, two = qml.qchem.electron_integrals(mol)()
>>> two = np.swapaxes(two, 1, 3) # convert to chemist's notation
>>> l, w, v = factorize(two, 1e-5)
>>> print(l)
[[[ 1.06723440e-01 9.73575768e-15]
[ 8.36288956e-15 -1.04898533e-01]]
[[-2.20945401e-13 -4.25688222e-01]
[-4.25688222e-01 -2.98228790e-13]]
[[-8.14472856e-01 5.01669019e-13]
[ 5.01689072e-13 -8.28642140e-01]]]
"""
n = two.shape[0]
two = two.reshape(n * n, n * n)
eigvals, eigvecs = np.linalg.eigh(two)
eigvals = np.array([val for val in eigvals if abs(val) > tol])
eigvecs = eigvecs[:, -len(eigvals) :]
vectors = eigvecs @ np.diag(np.sqrt(abs(eigvals)))
factors = np.array([vectors.reshape(n, n, len(eigvals))[:, :, r] for r in range(len(eigvals))])
eigvals, eigvecs = np.linalg.eigh(factors)
eigvals = np.array([val for val in eigvals if np.sum(abs(eigvals)) > tol])
eigvecs = eigvecs[:, -len(eigvals) :]
return factors, eigvals, eigvecs
| def factorize(two, tol):
r"""Return double-factorized form of a two-electron tensor.
The second quantized electronic Hamiltonian is constructed in terms of fermionic creation,
:math:`a^{\dagger}` , and annihilation, :math:`a`, operators as
[`arXiv:1902.02134 <https://arxiv.org/pdf/1902.02134.pdf>`_]
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} h_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs}
h_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \beta}^{\dagger} a_{r, \beta} a_{s, \alpha},
where :math:`h_{pq}` and :math:`h_{pqrs}` are the one- and two-electron integrals computed as
.. math::
h_{pq} = \int \phi_p(r)^* \left ( -\frac{\nabla_r^2}{2} - \sum_i \frac{Z_i}{|r-R_i|} \right)
\phi_q(r) dr,
and
.. math::
h_{pqrs} = \int \frac{\phi_p(r_1)^* \phi_q(r_2)^* \phi_r(r_2) \phi_s(r_1)}{|r_1 - r_2|}
dr_1 dr_2.
Rearranging the integrals in the chemist notation, [11|22], gives
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} T_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs}
V_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \alpha} a_{r, \beta}^{\dagger} a_{s, \beta}.
with
.. math::
T_{pq} = h_{ij} - \frac{1}{2} \sum_s h_{pssq}.
and :math:`V` is the two-electron tensor in chemist notation.
The objective of the factorization is to find a set of symmetric matrices, :math:`L`, such that
.. math::
V_{ijkl} = \sum_r L_{ij}^{(r)} L_{kl}^{(r) T}.
with the rank :math:`r \in \mathcal{O}(n)`. The matrices :math:`L` are further diagonalized
and truncated in a second level of factorization.
The algorithm has the following steps
[`arXiv:1902.02134 <https://arxiv.org/pdf/1902.02134.pdf>`_].
1. Reshape the :math:`n \times n \times n \times n` two-electron tensor to a \
:math:`n^2 \times n^2` matrix where n is the number of orbitals.
2. Diagonalize the resulting matrix and keep the :math:`r` eigenvectors that have \
corresponding eigenvalues larger than a threshold.
3. Reshape the selected eigenvectors to :math:`n \times n` matrices.
4. Diagonalize the :math:`n \times n` matrices and keep those that the norm of their \
eigenvalues is larger than a threshold.
Args:
two (array[array[float]]): the two-electron repulsion tensor in the molecular orbital basis
arranged in chemist notation [11|22]
tol (float): cutoff value for discarding the negligible factors
Returns:
tuple(array[float]): array of symmetric matrices (factors) approximating the two-electron
tensor, eigenvalues of the generated factors, eigenvectors of the generated factors
**Example**
>>> symbols = ['H', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [0.74, 0.0, 0.0]], requires_grad = False) / 0.5291772
>>> mol = qml.qchem.Molecule(symbols, geometry)
>>> core, one, two = qml.qchem.electron_integrals(mol)()
>>> two = np.swapaxes(two, 1, 3) # convert to chemist's notation
>>> l, w, v = factorize(two, 1e-5)
>>> print(l)
[[[ 1.06723440e-01 9.73575768e-15]
[ 8.36288956e-15 -1.04898533e-01]]
[[-2.20945401e-13 -4.25688222e-01]
[-4.25688222e-01 -2.98228790e-13]]
[[-8.14472856e-01 5.01669019e-13]
[ 5.01689072e-13 -8.28642140e-01]]]
"""
n = two.shape[0]
two = two.reshape(n * n, n * n)
eigvals, eigvecs = np.linalg.eigh(two)
eigvals = np.array([val for val in eigvals if abs(val) > tol])
eigvecs = eigvecs[:, -len(eigvals) :]
vectors = eigvecs @ np.diag(np.sqrt(abs(eigvals)))
factors = np.array([vectors.reshape(n, n, len(eigvals))[:, :, r] for r in range(len(eigvals))])
eigvals, eigvecs = np.linalg.eigh(factors)
eigvals = np.array([val for val in eigvals if np.sum(abs(eigvals)) > tol])
eigvecs = eigvecs[:, -len(eigvals) :]
return factors, eigvals, eigvecs
|
13,161 | def wait_for_deleted(
*,
pg_manager: gitlab.base.RESTManager,
object_id: int,
description: str,
hard_delete: bool = False,
) -> None:
"""Ensure the object specified can not be retrieved. If object still exists after
timeout period, fail the test"""
max_iterations = int(TIMEOUT / SLEEP_INTERVAL)
for _ in range(max_iterations):
try:
object = pg_manager.get(object_id)
except gitlab.exceptions.GitlabGetError:
return
try:
if hard_delete:
object.delete(hard_delete=True)
else:
object.delete()
except gitlab.exceptions.GitlabDeleteError:
pass
time.sleep(SLEEP_INTERVAL)
pytest.fail(f"{description} {object_id} was not deleted")
| def wait_for_deleted(
*,
manager: gitlab.base.RESTManager,
object_id: int,
description: str,
hard_delete: bool = False,
) -> None:
"""Ensure the object specified can not be retrieved. If object still exists after
timeout period, fail the test"""
max_iterations = int(TIMEOUT / SLEEP_INTERVAL)
for _ in range(max_iterations):
try:
object = pg_manager.get(object_id)
except gitlab.exceptions.GitlabGetError:
return
try:
if hard_delete:
object.delete(hard_delete=True)
else:
object.delete()
except gitlab.exceptions.GitlabDeleteError:
pass
time.sleep(SLEEP_INTERVAL)
pytest.fail(f"{description} {object_id} was not deleted")
|
41,702 | def test_generate_packages_json(tmp_path):
# Set up directory to store dummy package files for SHA-256 hash verification
if not os.path.exists(tmp_path):
os.makedirs(tmp_path)
pkg_map = buildall.generate_dependency_graph(PACKAGES_DIR, {"pkg_1", "pkg_2"})
for pkg in pkg_map.values():
pkg.file_name = pkg.file_name or pkg.name + ".file"
# Write dummy package file for SHA-256 hash verification
with open(os.path.join(tmp_path, pkg.file_name), "w") as f:
f.write(pkg.name)
package_data = buildall.generate_packages_json(tmp_path, pkg_map)
assert set(package_data.keys()) == {"info", "packages"}
assert package_data["info"] == {"arch": "wasm32", "platform": "Emscripten-1.0"}
assert set(package_data["packages"]) == {
"pkg_1",
"pkg_1_1",
"pkg_2",
"pkg_3",
"pkg_3_1",
}
assert package_data["packages"]["pkg_1"] == {
"name": "pkg_1",
"version": "1.0.0",
"file_name": "pkg_1.file",
"depends": ["pkg_1_1", "pkg_3"],
"imports": ["pkg_1"],
"install_dir": "site",
"sha_256": "c1e38241013b5663e902fff97eb8585e98e6df446585da1dcf2ad121b52c2143",
}
| def test_generate_packages_json(tmp_path):
# Set up directory to store dummy package files for SHA-256 hash verification
if not os.path.exists(tmp_path):
os.makedirs(tmp_path)
pkg_map = buildall.generate_dependency_graph(PACKAGES_DIR, {"pkg_1", "pkg_2"})
for pkg in pkg_map.values():
pkg.file_name = pkg.file_name or pkg.name + ".file"
# Write dummy package file for SHA-256 hash verification
with open(tmp_path / pkg.file_name, "w") as f:
f.write(pkg.name)
package_data = buildall.generate_packages_json(tmp_path, pkg_map)
assert set(package_data.keys()) == {"info", "packages"}
assert package_data["info"] == {"arch": "wasm32", "platform": "Emscripten-1.0"}
assert set(package_data["packages"]) == {
"pkg_1",
"pkg_1_1",
"pkg_2",
"pkg_3",
"pkg_3_1",
}
assert package_data["packages"]["pkg_1"] == {
"name": "pkg_1",
"version": "1.0.0",
"file_name": "pkg_1.file",
"depends": ["pkg_1_1", "pkg_3"],
"imports": ["pkg_1"],
"install_dir": "site",
"sha_256": "c1e38241013b5663e902fff97eb8585e98e6df446585da1dcf2ad121b52c2143",
}
|
40,165 | def compute_library_size_batch(
adata,
batch_key: str,
local_l_mean_key: str = None,
local_l_var_key: str = None,
X_layers_key=None,
copy: bool = False,
):
"""Computes the library size
Parameters
----------
adata
anndata object containing counts
batch_key
key in obs for batch information
local_l_mean_key
key in obs to save the local log mean
local_l_var_key
key in obs to save the local log variance
X_layers_key
if not None, will use this in adata.layers[] for X
copy
if True, returns a copy of the adata
Returns
-------
type
anndata.AnnData if copy was True, else None
"""
assert batch_key in adata.obs_keys(), "batch_key not valid key in obs dataframe"
local_means = np.zeros((adata.shape[0], 1))
local_vars = np.zeros((adata.shape[0], 1))
batch_indices = adata.obs[batch_key]
for i_batch in np.unique(batch_indices):
idx_batch = np.squeeze(batch_indices == i_batch)
if X_layers_key is not None:
assert (
X_layers_key in adata.layers.keys()
), "X_layers_key not a valid key for adata.layers"
data = adata[idx_batch].layers[X_layers_key]
else:
data = adata[idx_batch].X
(local_means[idx_batch], local_vars[idx_batch],) = compute_library_size(data)
if local_l_mean_key is None:
local_l_mean_key = "_scvi_local_l_mean"
if local_l_var_key is None:
local_l_var_key = "_scvi_local_l_var"
if copy:
copy = adata.copy()
copy.obs[local_l_mean_key] = local_means
copy.obs[local_l_var_key] = local_vars
return copy
else:
adata.obs[local_l_mean_key] = local_means
adata.obs[local_l_var_key] = local_vars
| def compute_library_size_batch(
adata,
batch_key: str,
local_l_mean_key_added: str = None,
local_l_var_key: str = None,
X_layers_key=None,
copy: bool = False,
):
"""Computes the library size
Parameters
----------
adata
anndata object containing counts
batch_key
key in obs for batch information
local_l_mean_key
key in obs to save the local log mean
local_l_var_key
key in obs to save the local log variance
X_layers_key
if not None, will use this in adata.layers[] for X
copy
if True, returns a copy of the adata
Returns
-------
type
anndata.AnnData if copy was True, else None
"""
assert batch_key in adata.obs_keys(), "batch_key not valid key in obs dataframe"
local_means = np.zeros((adata.shape[0], 1))
local_vars = np.zeros((adata.shape[0], 1))
batch_indices = adata.obs[batch_key]
for i_batch in np.unique(batch_indices):
idx_batch = np.squeeze(batch_indices == i_batch)
if X_layers_key is not None:
assert (
X_layers_key in adata.layers.keys()
), "X_layers_key not a valid key for adata.layers"
data = adata[idx_batch].layers[X_layers_key]
else:
data = adata[idx_batch].X
(local_means[idx_batch], local_vars[idx_batch],) = compute_library_size(data)
if local_l_mean_key is None:
local_l_mean_key = "_scvi_local_l_mean"
if local_l_var_key is None:
local_l_var_key = "_scvi_local_l_var"
if copy:
copy = adata.copy()
copy.obs[local_l_mean_key] = local_means
copy.obs[local_l_var_key] = local_vars
return copy
else:
adata.obs[local_l_mean_key] = local_means
adata.obs[local_l_var_key] = local_vars
|
23,742 | def _get_lvm_cmdline(cmd):
''' Build command line for :program:`lvm` call.
The purpose of this function is to keep all the detailed lvm options in
one place.
:param cmd: array of str, where cmd[0] is action and the rest are arguments
:return array of str appropriate for subprocess.Popen
'''
action = cmd[0]
if action == 'remove':
assert len(cmd) == 2, 'wrong number of arguments for remove'
assert not cmd[1].startswith('/'), 'absolute path to ‘remove’???'
lvm_cmd = ['lvremove', '--force', '--', cmd[1]]
elif action == 'clone':
assert len(cmd) == 3, 'wrong number of arguments for clone'
lvm_cmd = ['lvcreate', '--setactivationskip=n', '--activate=y',
'--snapshot', '--type=thin', '--name=' + cmd[2],
'--', cmd[1]]
elif action == 'create':
assert len(cmd) == 4, 'wrong number of arguments for create'
lvm_cmd = ['lvcreate', '--thin', '--setactivationskip=n',
'--activate=y', '--name=' + cmd[2],
'--virtualsize=' + cmd[3] + 'B', '--', cmd[1]]
elif action == 'resize':
assert len(cmd) == 3, 'wrong number of arguments for extend'
lvm_cmd = ["lvresize", "--force", "--size=" + cmd[2] + 'B', '--', cmd[1]]
elif action == 'activate':
assert len(cmd) == 2, 'wrong number of arguments for activate'
lvm_cmd = ['lvchange', '--activate=y', '--', cmd[1]]
elif action == 'rename':
assert len(cmd) == 3, 'wrong number of arguments for rename'
lvm_cmd = ['lvrename', '--', cmd[1], cmd[2]]
else:
raise NotImplementedError('unsupported action: ' + action)
if os.getuid() != 0:
cmd = [_sudo, _lvm] + lvm_cmd
else:
cmd = [_lvm] + lvm_cmd
return cmd
| def _get_lvm_cmdline(cmd):
''' Build command line for :program:`lvm` call.
The purpose of this function is to keep all the detailed lvm options in
one place.
:param cmd: array of str, where cmd[0] is action and the rest are arguments
:return array of str appropriate for subprocess.Popen
'''
action = cmd[0]
if action == 'remove':
assert len(cmd) == 2, 'wrong number of arguments for remove'
assert not cmd[1].startswith('/'), 'absolute path to ‘remove’???'
lvm_cmd = ['lvremove', '--force', '--', cmd[1]]
elif action == 'clone':
assert len(cmd) == 3, 'wrong number of arguments for clone'
lvm_cmd = ['lvcreate', '--setactivationskip=n', '--activate=y',
'--snapshot', '--type=thin', '--name=' + cmd[2],
'--', cmd[1]]
elif action == 'create':
assert len(cmd) == 4, 'wrong number of arguments for create'
lvm_cmd = ['lvcreate', '--thin', '--setactivationskip=n',
'--activate=y', '--name=' + cmd[2],
'--virtualsize=' + cmd[3] + 'B', '--', cmd[1]]
elif action == 'resize':
assert len(cmd) == 3, 'wrong number of arguments for resize'
lvm_cmd = ["lvresize", "--force", "--size=" + cmd[2] + 'B', '--', cmd[1]]
elif action == 'activate':
assert len(cmd) == 2, 'wrong number of arguments for activate'
lvm_cmd = ['lvchange', '--activate=y', '--', cmd[1]]
elif action == 'rename':
assert len(cmd) == 3, 'wrong number of arguments for rename'
lvm_cmd = ['lvrename', '--', cmd[1], cmd[2]]
else:
raise NotImplementedError('unsupported action: ' + action)
if os.getuid() != 0:
cmd = [_sudo, _lvm] + lvm_cmd
else:
cmd = [_lvm] + lvm_cmd
return cmd
|
1,795 | def test_permutation_importance_sample_weight():
# Creating data with 2 features and 1000 samples, where the target
# variable is a linear combination of the two features, such that
# in half of the samples the impact of feature 1 is twice the impact of
# feature 2, and vice versa on the other half of the samples.
np.random.seed(1)
n_samples = 1000
n_features = 2
n_half_samples = int(n_samples / 2)
x = np.random.normal(0.0, 0.001, (n_samples, n_features))
y = np.zeros(n_samples)
y[:n_half_samples] = 2 * x[:n_half_samples, 0] + x[:n_half_samples, 1]
y[n_half_samples:] = x[n_half_samples:, 0] + 2 * x[n_half_samples:, 1]
# Fitting linear regression with perfect prediction
lr = LinearRegression(fit_intercept=False)
lr.fit(x, y)
# When all samples are weighted with the same weights, the ratio of
# the two features importance should equal to 1 on expectation (when using
# mean absolutes error as the loss function).
pi = permutation_importance(lr, x, y, random_state=1,
scoring='neg_mean_absolute_error',
n_repeats=1000)
x1_x2_imp_ratio_w_none = pi.importances_mean[0] / pi.importances_mean[1]
assert np.round(x1_x2_imp_ratio_w_none, 2) == 1.00
# When passing a vector of ones as the sample_weight, results should be
# the same as in the case that sample_weight=None.
w = np.ones(n_samples)
pi = permutation_importance(lr, x, y, random_state=1,
scoring='neg_mean_absolute_error',
n_repeats=1000, sample_weight=w)
x1_x2_imp_ratio_w_ones = pi.importances_mean[0] / pi.importances_mean[1]
assert x1_x2_imp_ratio_w_ones == x1_x2_imp_ratio_w_none
# When the ratio between the weights of the first half of the samples and
# the second half of the samples approaches to infinity, the ratio of
# the two features importance should equal to 2 on expectation (when using
# mean absolutes error as the loss function).
w = np.hstack([np.repeat(10.0 ** 10, n_half_samples),
np.repeat(1.0, n_half_samples)])
lr.fit(x, y, w)
pi = permutation_importance(lr, x, y, random_state=1,
scoring='neg_mean_absolute_error',
n_repeats=1000,
sample_weight=w)
x1_x2_imp_ratio_w = pi.importances_mean[0] / pi.importances_mean[1]
assert np.round(x1_x2_imp_ratio_w / x1_x2_imp_ratio_w_none, 2) == 2.00
| def test_permutation_importance_sample_weight():
# Creating data with 2 features and 1000 samples, where the target
# variable is a linear combination of the two features, such that
# in half of the samples the impact of feature 1 is twice the impact of
# feature 2, and vice versa on the other half of the samples.
np.random.seed(1)
n_samples = 1000
n_features = 2
n_half_samples = n_samples // 2
x = np.random.normal(0.0, 0.001, (n_samples, n_features))
y = np.zeros(n_samples)
y[:n_half_samples] = 2 * x[:n_half_samples, 0] + x[:n_half_samples, 1]
y[n_half_samples:] = x[n_half_samples:, 0] + 2 * x[n_half_samples:, 1]
# Fitting linear regression with perfect prediction
lr = LinearRegression(fit_intercept=False)
lr.fit(x, y)
# When all samples are weighted with the same weights, the ratio of
# the two features importance should equal to 1 on expectation (when using
# mean absolutes error as the loss function).
pi = permutation_importance(lr, x, y, random_state=1,
scoring='neg_mean_absolute_error',
n_repeats=1000)
x1_x2_imp_ratio_w_none = pi.importances_mean[0] / pi.importances_mean[1]
assert np.round(x1_x2_imp_ratio_w_none, 2) == 1.00
# When passing a vector of ones as the sample_weight, results should be
# the same as in the case that sample_weight=None.
w = np.ones(n_samples)
pi = permutation_importance(lr, x, y, random_state=1,
scoring='neg_mean_absolute_error',
n_repeats=1000, sample_weight=w)
x1_x2_imp_ratio_w_ones = pi.importances_mean[0] / pi.importances_mean[1]
assert x1_x2_imp_ratio_w_ones == x1_x2_imp_ratio_w_none
# When the ratio between the weights of the first half of the samples and
# the second half of the samples approaches to infinity, the ratio of
# the two features importance should equal to 2 on expectation (when using
# mean absolutes error as the loss function).
w = np.hstack([np.repeat(10.0 ** 10, n_half_samples),
np.repeat(1.0, n_half_samples)])
lr.fit(x, y, w)
pi = permutation_importance(lr, x, y, random_state=1,
scoring='neg_mean_absolute_error',
n_repeats=1000,
sample_weight=w)
x1_x2_imp_ratio_w = pi.importances_mean[0] / pi.importances_mean[1]
assert np.round(x1_x2_imp_ratio_w / x1_x2_imp_ratio_w_none, 2) == 2.00
|
32,146 | def get_vulnerability_command():
vuln_id = demisto.args()['vulnerability_id']
scan_results_id = demisto.args()['scan_results_id']
page = int(demisto.args().get('page'))
limit = int(demisto.args().get('limit'))
if limit > 200:
limit = 200
vuln_filter = [{
'filterName': 'pluginID',
'operator': '=',
'value': vuln_id
}]
query = {
'scanID': scan_results_id,
'filters': vuln_filter,
'tool': 'vulndetails',
'type': 'vuln',
'startOffset': page, # Lower bound for the results list (must be specified)
'endOffset': page + limit # Upper bound for the results list (must be specified)
}
analysis = get_analysis(query, scan_results_id)
if not analysis or 'response' not in analysis:
return_error('Error: Could not get vulnerability analysis')
results = analysis['response']['results']
if not results or len(results) == 0:
return_error('Error: Vulnerability not found in the scan results')
vuln_response = get_vulnerability(vuln_id)
if not vuln_response or 'response' not in vuln_response:
return_message('Vulnerability not found')
vuln = vuln_response['response']
vuln['severity'] = results[0]['severity'] # The vulnerability severity is the same in all the results
hosts = get_vulnerability_hosts_from_analysis(results)
cves = None
cves_output = [] # type: List[dict]
if vuln.get('xrefs'):
# Extract CVE
cve_filter = list(filter(lambda x: x.strip().startswith('CVE'), vuln['xrefs'].split(',')))
if cve_filter and len(cve_filter) > 0:
cves = list(map(lambda c: c.replace('CVE:', '').strip(), cve_filter))
cves_output += map(lambda c: {
'ID': c
}, cves)
mapped_vuln = {
'ID': vuln['id'],
'Name': vuln['name'],
'Description': vuln['description'],
'Type': vuln['type'],
'Severity': vuln['severity'].get('name'),
'Synopsis': vuln['synopsis'],
'Solution': vuln['solution']
}
vuln_info = {
'Published': timestamp_to_utc(vuln['vulnPubDate']),
'CPE': vuln['cpe'],
'CVE': cves
}
exploit_info = {
'ExploitAvailable': vuln['exploitAvailable'],
'ExploitEase': vuln['exploitEase']
}
risk_info = {
'RiskFactor': vuln['riskFactor'],
'CVSSBaseScore': vuln['baseScore'],
'CVSSTemporalScore': vuln['temporalScore'],
'CVSSVector': vuln['cvssVector']
}
plugin_details = {
'Family': vuln['family'].get('name'),
'Published': timestamp_to_utc(vuln['pluginPubDate']),
'Modified': timestamp_to_utc(vuln['pluginModDate']),
'CheckType': vuln['checkType']
}
hr = '## Vulnerability: {} ({})\n'.format(mapped_vuln['Name'], mapped_vuln['ID'])
hr += '### Synopsis\n{}\n### Description\n{}\n### Solution\n{}\n'.format(
mapped_vuln['Synopsis'], mapped_vuln['Description'], mapped_vuln['Solution'])
hr += tableToMarkdown('Hosts', hosts, removeNull=True)
hr += tableToMarkdown('Risk Information', risk_info, removeNull=True)
hr += tableToMarkdown('Exploit Information', exploit_info, removeNull=True)
hr += tableToMarkdown('Plugin Details', plugin_details, removeNull=True)
hr += tableToMarkdown('Vulnerability Information', vuln_info, removeNull=True)
mapped_vuln.update(vuln_info)
mapped_vuln.update(exploit_info)
mapped_vuln.update(risk_info)
mapped_vuln['PluginDetails'] = plugin_details
mapped_vuln['Host'] = hosts
scan_result = {
'ID': scan_results_id,
'Vulnerability': mapped_vuln,
}
context = {}
context['TenableSC.ScanResults.Vulnerability(val.ID=={})'.format(vuln_id)] = createContext(scan_result['Vulnerability'], removeNull=True)
if len(cves_output) > 0:
context['CVE(val.ID===obj.ID)'] = createContext(cves_output)
demisto.results({
'Type': entryTypes['note'],
'Contents': vuln_response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': hr,
'EntryContext': context
})
| def get_vulnerability_command():
vuln_id = demisto.args()['vulnerability_id']
scan_results_id = demisto.args()['scan_results_id']
page = int(demisto.args().get('page'))
limit = int(demisto.args().get('limit'))
if limit > 200:
limit = 200
vuln_filter = [{
'filterName': 'pluginID',
'operator': '=',
'value': vuln_id
}]
query = {
'scanID': scan_results_id,
'filters': vuln_filter,
'tool': 'vulndetails',
'type': 'vuln',
'startOffset': page, # Lower bound for the results list (must be specified)
'endOffset': page + limit # Upper bound for the results list (must be specified)
}
analysis = get_analysis(query, scan_results_id)
if not analysis or 'response' not in analysis:
return_error('Error: Could not get vulnerability analysis')
results = analysis['response']['results']
if not results or len(results) == 0:
return_error('Error: Vulnerability not found in the scan results')
vuln_response = get_vulnerability(vuln_id)
if not vuln_response or 'response' not in vuln_response:
return_message('Vulnerability not found')
vuln = vuln_response['response']
vuln['severity'] = results[0]['severity'] # The vulnerability severity is the same in all the results
hosts = get_vulnerability_hosts_from_analysis(results)
cves = None
cves_output = [] # type: List[dict]
if vuln.get('xrefs'):
# Extract CVE
cve_filter = list(filter(lambda x: x.strip().startswith('CVE'), vuln['xrefs'].split(',')))
if cve_filter and len(cve_filter) > 0:
cves = list(map(lambda c: c.replace('CVE:', '').strip(), cve_filter))
cves_output += map(lambda c: {
'ID': c
}, cves)
mapped_vuln = {
'ID': vuln['id'],
'Name': vuln['name'],
'Description': vuln['description'],
'Type': vuln['type'],
'Severity': vuln['severity'].get('name'),
'Synopsis': vuln['synopsis'],
'Solution': vuln['solution']
}
vuln_info = {
'Published': timestamp_to_utc(vuln['vulnPubDate']),
'CPE': vuln['cpe'],
'CVE': cves
}
exploit_info = {
'ExploitAvailable': vuln['exploitAvailable'],
'ExploitEase': vuln['exploitEase']
}
risk_info = {
'RiskFactor': vuln['riskFactor'],
'CVSSBaseScore': vuln['baseScore'],
'CVSSTemporalScore': vuln['temporalScore'],
'CVSSVector': vuln['cvssVector']
}
plugin_details = {
'Family': vuln['family'].get('name'),
'Published': timestamp_to_utc(vuln['pluginPubDate']),
'Modified': timestamp_to_utc(vuln['pluginModDate']),
'CheckType': vuln['checkType']
}
hr = '## Vulnerability: {} ({})\n'.format(mapped_vuln['Name'], mapped_vuln['ID'])
hr += '### Synopsis\n{}\n### Description\n{}\n### Solution\n{}\n'.format(
mapped_vuln['Synopsis'], mapped_vuln['Description'], mapped_vuln['Solution'])
hr += tableToMarkdown('Hosts', hosts, removeNull=True)
hr += tableToMarkdown('Risk Information', risk_info, removeNull=True)
hr += tableToMarkdown('Exploit Information', exploit_info, removeNull=True)
hr += tableToMarkdown('Plugin Details', plugin_details, removeNull=True)
hr += tableToMarkdown('Vulnerability Information', vuln_info, removeNull=True)
mapped_vuln.update(vuln_info)
mapped_vuln.update(exploit_info)
mapped_vuln.update(risk_info)
mapped_vuln['PluginDetails'] = plugin_details
mapped_vuln['Host'] = hosts
scan_result = {
'ID': scan_results_id,
'Vulnerability': mapped_vuln,
}
context = {}
context['TenableSC.ScanResults.Vulnerability(val.ID===obj.ID)'] = createContext(scan_result['Vulnerability'], removeNull=True)
if len(cves_output) > 0:
context['CVE(val.ID===obj.ID)'] = createContext(cves_output)
demisto.results({
'Type': entryTypes['note'],
'Contents': vuln_response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': hr,
'EntryContext': context
})
|
23,830 | def _capture_scm_auto_fields(conanfile, conanfile_dir, recipe_layout, ignore_dirty):
"""Deduce the values for the scm auto fields or functions assigned to 'url' or 'revision'
and replace the conanfile.py contents.
Returns a tuple with (scm_data, path_to_scm_local_directory)"""
scm_data = get_scm_data(conanfile)
if not scm_data:
return None, None
# Resolve SCMData in the user workspace (someone may want to access CVS or import some py)
scm = SCM(scm_data, conanfile_dir, conanfile.output)
captured = scm_data.capture_origin or scm_data.capture_revision
if not captured:
# We replace not only "auto" values, also evaluated functions (e.g from a python_require)
_add_scmdata_to_conandata_yml(recipe_layout, scm_data)
return scm_data, None
if not scm.is_pristine() and not ignore_dirty:
conanfile.output.warning("There are uncommitted changes, skipping the replacement of 'scm.url' and "
"'scm.revision' auto fields. Use --ignore-dirty to force it. The 'conan "
"upload' command will prevent uploading recipes with 'auto' values in these "
"fields.")
origin = scm.get_qualified_remote_url(remove_credentials=True)
local_src_path = scm.get_local_path_to_url(origin)
return scm_data, local_src_path
if scm_data.url == "auto":
origin = scm.get_qualified_remote_url(remove_credentials=True)
if not origin:
conanfile.output.warning(
"Repo origin cannot be deduced, 'auto' fields won't be replaced."
" 'conan upload' command will prevent uploading recipes with 'auto'"
" values in these fields.")
local_src_path = scm.get_local_path_to_url(origin)
return scm_data, local_src_path
if scm.is_local_repository():
conanfile.output.warning("Repo origin looks like a local path: %s" % origin)
conanfile.output.success("Repo origin deduced by 'auto': %s" % origin)
scm_data.url = origin
if scm_data.revision == "auto":
# If it is pristine by default we don't replace the "auto" unless forcing
# This prevents the recipe to get uploaded pointing to an invalid commit
scm_data.revision = scm.get_revision()
conanfile.output.success("Revision deduced by 'auto': %s" % scm_data.revision)
local_src_path = scm.get_local_path_to_url(scm_data.url)
_add_scmdata_to_conandata_yml(recipe_layout, scm_data)
return scm_data, local_src_path
| def _capture_scm_auto_fields(conanfile, conanfile_dir, recipe_layout, ignore_dirty):
"""Deduce the values for the scm auto fields or functions assigned to 'url' or 'revision'
and replace the conanfile.py contents.
Returns a tuple with (scm_data, path_to_scm_local_directory)"""
scm_data = get_scm_data(conanfile)
if not scm_data:
return None, None
# Resolve SCMData in the user workspace (someone may want to access CVS or import some py)
scm = SCM(scm_data, conanfile_dir, conanfile.output)
captured = scm_data.capture_origin or scm_data.capture_revision
if not captured:
_add_scmdata_to_conandata_yml(recipe_layout, scm_data)
return scm_data, None
if not scm.is_pristine() and not ignore_dirty:
conanfile.output.warning("There are uncommitted changes, skipping the replacement of 'scm.url' and "
"'scm.revision' auto fields. Use --ignore-dirty to force it. The 'conan "
"upload' command will prevent uploading recipes with 'auto' values in these "
"fields.")
origin = scm.get_qualified_remote_url(remove_credentials=True)
local_src_path = scm.get_local_path_to_url(origin)
return scm_data, local_src_path
if scm_data.url == "auto":
origin = scm.get_qualified_remote_url(remove_credentials=True)
if not origin:
conanfile.output.warning(
"Repo origin cannot be deduced, 'auto' fields won't be replaced."
" 'conan upload' command will prevent uploading recipes with 'auto'"
" values in these fields.")
local_src_path = scm.get_local_path_to_url(origin)
return scm_data, local_src_path
if scm.is_local_repository():
conanfile.output.warning("Repo origin looks like a local path: %s" % origin)
conanfile.output.success("Repo origin deduced by 'auto': %s" % origin)
scm_data.url = origin
if scm_data.revision == "auto":
# If it is pristine by default we don't replace the "auto" unless forcing
# This prevents the recipe to get uploaded pointing to an invalid commit
scm_data.revision = scm.get_revision()
conanfile.output.success("Revision deduced by 'auto': %s" % scm_data.revision)
local_src_path = scm.get_local_path_to_url(scm_data.url)
_add_scmdata_to_conandata_yml(recipe_layout, scm_data)
return scm_data, local_src_path
|
56,277 | def export(
w: Union[Shape, Workplane],
fname: str,
exportType: Optional[ExportLiterals] = None,
tolerance: float = 0.1,
angularTolerance: float = 0.1,
opt=None,
):
"""
Export Workplane or Shape to file. Multiple entities are converted to compound.
:param w: Shape or Workplane to be exported.
:param fname: output filename.
:param exportType: the exportFormat to use. If None will be inferred from the extension. Default: None.
:param tolerance: the deflection tolerance, in model units. Default 0.1.
:param angularTolerance: the angular tolerance, in radians. Default 0.1.
:param opt: additional options passed to the specific exporter. Default None.
"""
shape: Shape
f: IO
if isinstance(w, Workplane):
shape = toCompound(w)
else:
shape = w
if exportType is None:
t = fname.split(".")[-1].upper()
if t in ExportTypes.__dict__.values():
exportType = cast(ExportLiterals, t)
else:
raise ValueError("Unknown extensions, specify export type explicitly")
if exportType == ExportTypes.TJS:
tess = shape.tessellate(tolerance, angularTolerance)
mesher = JsonMesh()
# add vertices
for v in tess[0]:
mesher.addVertex(v.x, v.y, v.z)
# add triangles
for ixs in tess[1]:
mesher.addTriangleFace(*ixs)
with open(fname, "w") as f:
f.write(mesher.toJson())
elif exportType == ExportTypes.SVG:
with open(fname, "w") as f:
f.write(getSVG(shape, opt))
elif exportType == ExportTypes.AMF:
tess = shape.tessellate(tolerance, angularTolerance)
aw = AmfWriter(tess)
with open(fname, "wb") as f:
aw.writeAmf(f)
elif exportType == ExportTypes.DXF:
if isinstance(w, Workplane):
exportDXF(w, fname)
else:
raise ValueError("Only Workplanes can be exported as DXF")
elif exportType == ExportTypes.STEP:
if opt:
shape.exportStep(fname, **opt)
else:
shape.exportStep(fname)
elif exportType == ExportTypes.STL:
if hasascii := [
opt.get(k) for k in ["ascii", "ASCII"] if opt is not None and k in opt
]:
shape.exportStl(fname, tolerance, angularTolerance, hasascii.pop())
else:
shape.exportStl(fname, tolerance, angularTolerance)
elif exportType == ExportTypes.VRML:
shape.mesh(tolerance, angularTolerance)
VrmlAPI.Write_s(shape.wrapped, fname)
elif exportType == ExportTypes.VTP:
exportVTP(shape, fname, tolerance, angularTolerance)
else:
raise ValueError("Unknown export type")
| def export(
w: Union[Shape, Workplane],
fname: str,
exportType: Optional[ExportLiterals] = None,
tolerance: float = 0.1,
angularTolerance: float = 0.1,
opt=None,
):
"""
Export Workplane or Shape to file. Multiple entities are converted to compound.
:param w: Shape or Workplane to be exported.
:param fname: output filename.
:param exportType: the exportFormat to use. If None will be inferred from the extension. Default: None.
:param tolerance: the deflection tolerance, in model units. Default 0.1.
:param angularTolerance: the angular tolerance, in radians. Default 0.1.
:param opt: additional options passed to the specific exporter. Default None.
"""
shape: Shape
f: IO
if isinstance(w, Workplane):
shape = toCompound(w)
else:
shape = w
if exportType is None:
t = fname.split(".")[-1].upper()
if t in ExportTypes.__dict__.values():
exportType = cast(ExportLiterals, t)
else:
raise ValueError("Unknown extensions, specify export type explicitly")
if exportType == ExportTypes.TJS:
tess = shape.tessellate(tolerance, angularTolerance)
mesher = JsonMesh()
# add vertices
for v in tess[0]:
mesher.addVertex(v.x, v.y, v.z)
# add triangles
for ixs in tess[1]:
mesher.addTriangleFace(*ixs)
with open(fname, "w") as f:
f.write(mesher.toJson())
elif exportType == ExportTypes.SVG:
with open(fname, "w") as f:
f.write(getSVG(shape, opt))
elif exportType == ExportTypes.AMF:
tess = shape.tessellate(tolerance, angularTolerance)
aw = AmfWriter(tess)
with open(fname, "wb") as f:
aw.writeAmf(f)
elif exportType == ExportTypes.DXF:
if isinstance(w, Workplane):
exportDXF(w, fname)
else:
raise ValueError("Only Workplanes can be exported as DXF")
elif exportType == ExportTypes.STEP:
if opt:
shape.exportStep(fname, **opt)
else:
shape.exportStep(fname)
elif exportType == ExportTypes.STL:
if opt:
useascii = opt.get("ascii", False) or opt.get("ASCII", False)
else:
useascii = False
shape.exportStl(fname, tolerance, angularTolerance, useascii)
elif exportType == ExportTypes.VRML:
shape.mesh(tolerance, angularTolerance)
VrmlAPI.Write_s(shape.wrapped, fname)
elif exportType == ExportTypes.VTP:
exportVTP(shape, fname, tolerance, angularTolerance)
else:
raise ValueError("Unknown export type")
|
24,851 | def my_func(self): # [missing-return-doc]
"""This is a docstring.
Returns:
bool:
"""
return False
| def my_func(self): # [missing-return-doc]
"""warn_partial_google_returns_type
Returns:
bool:
"""
return False
|
7,867 | def test_get_atoms(res):
"""Tests evaluating single nuclide concentration."""
t, n = res.get_atoms("1", "Xe135")
t_ref = np.array([0.0, 1296000.0, 2592000.0, 3888000.0])
n_ref = np.array(
[6.67473282e+08, 3.76986925e+14, 3.68587383e+14, 3.91338675e+14])
np.testing.assert_allclose(t, t_ref)
np.testing.assert_allclose(n, n_ref)
# Check alternate units
volume = res[0].volume["1"]
t_days, n_cm3 = res.get_atoms("1", "Xe135", nuc_units="atoms/cm^3", time_units="d")
assert t_days == pytest.approx(t_ref / (60 * 60 * 24))
assert n_cm3 == pytest.approx(n_ref / volume)
_t, n_bcm = res.get_atoms("1", "Xe135", nuc_units="atoms/b/cm")
assert n_bcm == pytest.approx(n_cm3 * 1e-24)
| def test_get_atoms(res):
"""Tests evaluating single nuclide concentration."""
t, n = res.get_atoms("1", "Xe135")
t_ref = np.array([0.0, 1296000.0, 2592000.0, 3888000.0])
n_ref = np.array(
[6.67473282e+08, 3.76986925e+14, 3.68587383e+14, 3.91338675e+14])
np.testing.assert_allclose(t, t_ref)
np.testing.assert_allclose(n, n_ref)
# Check alternate units
volume = res[0].volume["1"]
t_days, n_cm3 = res.get_atoms("1", "Xe135", nuc_units="atom/cm3", time_units="d")
assert t_days == pytest.approx(t_ref / (60 * 60 * 24))
assert n_cm3 == pytest.approx(n_ref / volume)
_t, n_bcm = res.get_atoms("1", "Xe135", nuc_units="atoms/b/cm")
assert n_bcm == pytest.approx(n_cm3 * 1e-24)
|
12,030 | def recombine_regions(
full_mesh_cube: Cube,
region_cubes: Iterable[Cube],
index_coord_name: AnyStr = "i_mesh_index",
) -> Cube:
"""
Put data from regional sub-meshes back onto the original full mesh.
The result is a region_cube identical to 'full_mesh_cube', but with its data
replaced by a combination of data from the provided 'region_cubes'.
The result metadata, including name and units, are also replaced by those
of the 'region_cubes' (which must all be the same).
Args:
* full_mesh_cube
Describes the full mesh and mesh-location to which the region data
refers, and acts as a template for the result.
Must have a :class:`~iris.experimental.ugrid.mesh.Mesh`.
* region_cubes
Contain data on a subset of the 'full_mesh_cube' mesh locations.
The region cubes do not need to have a mesh. There must be at least
1 of them, to determine the result phenomenon.
Their shapes and dimension-coords must all match those of
'full_mesh_cube', except in the mesh dimension, which can have
different sizes between the regions, and from the 'full_mesh_cube'.
The mesh dimension of each region cube must have a 1-D coord named by
'index_coord_name'. Although these region index coords can vary in
length, they must all have matching metadata (names, units and
attributes), and must also match the coord of that name in the
'full_mesh_cube', if there is one.
The ".points" values of the region index coords specify, for each
datapoint, its location in the original mesh -- i.e. they are indices
into the relevant mesh-location dimension.
* index_coord_name
Coord name of the index coords in each region cubes, containing the
mesh location indices.
Result:
* result_cube
An unstructured region_cube identical to 'full_mesh_cube', and with the
same mesh and location, but with its data and ".metadata" replaced by
that from the 'region_cubes'.
Where regions overlap, the result data comes from the last-listed of the
original region cubes which contain that location.
Where no region contains a datapoint, it will be masked in the result.
HINT: alternatively, values covered by no region can be taken from the
original 'full_mesh_cube' data, if 'full_mesh_cube' is *also* passed
as the first of the 'region_cubes'.
"""
if not region_cubes:
raise ValueError("'region_cubes' must be non-empty.")
mesh_dim = full_mesh_cube.mesh_dim()
if mesh_dim is None:
raise ValueError("'full_mesh_cube' has no \".mesh\".")
# Check the basic required properties of the input.
mesh_dim_coords = full_mesh_cube.coords(
dim_coords=True, dimensions=(mesh_dim,)
)
if not mesh_dim_coords:
err = (
"'full_mesh_cube' has no dim-coord on the mesh dimension, "
f"(dimension {mesh_dim})."
)
raise ValueError(err)
#
# Perform consistency checks on all the region-cubes.
#
def metadata_no_varname(cube_or_coord):
# Get a metadata object but omit any var_name.
metadata = cube_or_coord.metadata
fields = metadata._asdict()
fields["var_name"] = None
result = metadata.__class__(**fields)
return result
n_regions = len(region_cubes)
n_dims = full_mesh_cube.ndim
regioncube_metadata = None
indexcoord_metadata = None
for i_region, region_cube in enumerate(region_cubes):
reg_cube_str = (
f'Region cube #{i_region}/{n_regions}, "{region_cube.name()}"'
)
reg_ndims = region_cube.ndim
# Check dimensionality.
if reg_ndims != n_dims:
err = (
f"{reg_cube_str} has {reg_ndims} dimensions, but "
f"'full_mesh_cube' has {n_dims}."
)
raise ValueError(err)
# Get region_cube metadata, which will apply to the result..
region_cube_metadata = metadata_no_varname(region_cube)
if regioncube_metadata is None:
# Store the first region-cube metadata as a reference
regioncube_metadata = region_cube_metadata
elif region_cube_metadata != regioncube_metadata:
# Check subsequent region-cubes metadata against the first.
err = (
f"{reg_cube_str} has metadata {region_cube_metadata}, "
"which does not match that of the first region region_cube, "
f'"{region_cubes[0].name()}", '
f"which is {regioncube_metadata}."
)
raise ValueError(err)
# For each dim, check that coords match other regions, and full-cube.
for i_dim in range(full_mesh_cube.ndim):
if i_dim == mesh_dim:
# mesh dim : look for index coords (by name).
fulldim = full_mesh_cube.coords(
name_or_coord=index_coord_name, dimensions=(i_dim,)
)
regdim = region_cube.coords(
name_or_coord=index_coord_name, dimensions=(i_dim,)
)
else:
# non-mesh dims : look for dim-coords (only)
fulldim = full_mesh_cube.coords(
dim_coords=True, dimensions=(i_dim,)
)
regdim = region_cube.coords(
dim_coords=True, dimensions=(i_dim,)
)
if fulldim:
(fulldim,) = fulldim
full_dimname = fulldim.name()
fulldim_metadata = metadata_no_varname(fulldim)
if regdim:
(regdim,) = regdim
reg_dimname = regdim.name()
regdim_metadata = metadata_no_varname(regdim)
err = None
# N.B. checks for mesh- and non-mesh-dims are different.
if i_dim != mesh_dim:
# i_dim == mesh_dim : checks for non-mesh dims.
if fulldim and not regdim:
err = (
f"{reg_cube_str} has no dim-coord for dimension "
"{i_dim}, to match the 'full_mesh_cube' dimension "
f'"{full_dimname}".'
)
elif regdim and not fulldim:
err = (
f'{reg_cube_str} has a dim-coord "{reg_dimname}" for '
f"dimension {i_dim}, but 'full_mesh_cube' has none."
)
elif regdim != fulldim:
err = (
f'{reg_cube_str} has a dim-coord "{reg_dimname}" for '
f"dimension {i_dim}, which does not match that "
f"of 'full_mesh_cube', \"{full_dimname}\"."
)
else:
# i_dim == mesh_dim : different rules for this one
if not regdim:
# Must have an index coord on the mesh dimension
err = (
f'{reg_cube_str} has no "{index_coord_name}" coord on '
f"the mesh dimension (dimension {mesh_dim})."
)
elif fulldim and regdim_metadata != fulldim_metadata:
# May *not* have full-cube index, but if so it must match
err = (
f"{reg_cube_str} has an index coord "
f'"{index_coord_name}" whose ".metadata" does not '
"match that on 'full_mesh_cube' : "
f"{regdim_metadata} != {fulldim_metadata}."
)
# At this point, we know we *have* an index coord, and it does not
# conflict with the one on 'full_mesh_cube' (if any).
# Now check for matches between the region cubes.
if indexcoord_metadata is None:
# Store first occurrence (from first region-cube)
indexcoord_metadata = regdim_metadata
elif regdim_metadata != indexcoord_metadata:
# Compare subsequent occurences (from other region-cubes)
err = (
f"{reg_cube_str} has an index coord "
f'"{index_coord_name}" whose ".metadata" does not '
f"match that of the first region-cube : "
f"{regdim_metadata} != {indexcoord_metadata}."
)
if err:
raise ValueError(err)
# Use the mesh_dim to transpose inputs + outputs, if required, as it is
# simpler for all the array operations to always have the mesh dim *last*.
if mesh_dim == full_mesh_cube.ndim - 1:
# Mesh dim is already the last one : no tranposes required
untranspose_dims = None
else:
dim_range = np.arange(full_mesh_cube.ndim, dtype=int)
# Transpose all inputs to mesh-last order.
tranpose_dims = [i_dim for i_dim in dim_range if i_dim != mesh_dim] + [
mesh_dim
] # chop out mesh_dim + put it at the end.
def transposed_copy(cube, dim_order):
cube = cube.copy()
cube.transpose()
return cube
full_mesh_cube = transposed_copy(full_mesh_cube, tranpose_dims)
region_cubes = [
transposed_copy(region_cube, tranpose_dims)
for region_cube in region_cubes
]
# Also prepare for transforming the output back to the original order.
untranspose_dims = dim_range.copy()
# Neat trick to produce the reverse operation.
untranspose_dims[tranpose_dims] = dim_range
#
# Here's the core operation..
#
def fill_region(target, regiondata, regioninds):
if not target.flags.writeable:
# The initial input can be a section of a da.zeros(), which has no
# real array "behind" it. This means that real arrays created in
# memory are only chunk-sized, but it also means that 'target' may
# not be writeable. So take a copy to fix that, where needed.
target = target.copy()
# N.B. Indices are basically 1D, but may have leading *1 dims for
# alignment, to satisfy da.map_blocks
assert all(size == 1 for size in regioninds.shape[:-1])
inds = regioninds.flatten()
# Assign blocks with indexing on the last dim only.
target[..., inds] = regiondata
return target
# Create an initially 'empty' (all-masked) dask array matching the input.
# N.B. this does not use the full_mesh_cube.lazy_data() array, but only its
# shape and dtype, since the data itself is not used in the calculation.
# N.B. chunking matches the input cube, allowing performance control.
input_data = full_mesh_cube.lazy_data()
result_array = da.ma.masked_array(
da.zeros(
input_data.shape,
dtype=input_data.dtype,
chunks=input_data.chunksize,
),
True,
)
# Wrap this repeatedly with a lazy operation to assign each region.
# It is done this way because we couldn't get map_blocks to correctly wrap
# a function which does all regions in a single operation.
# TODO: replace with a single-stage solution: Probably better, if possible.
# Notes on resultant calculation properties:
# 1. map_blocks is chunk-mapped, so it is parallelisable and space-saving
# 2. However, fetching less than a whole chunk is not efficient
for region_cube in region_cubes:
# Lazy data array from the region cube
datarr = region_cube.lazy_data()
# Lazy indices from the mesh-dim coord.
mesh_dimcoord = region_cube.coord(
name_or_coord=index_coord_name, dimensions=region_cube.ndim - 1
)
indarr = mesh_dimcoord.lazy_points()
# Extend indarr dimensions to align it with the 'target' array dims.
assert indarr.ndim == 1
shape = (1,) * (region_cube.ndim - 1) + indarr.shape
indarr = indarr.reshape(shape)
# Apply the operation to paste from one region into the target.
# N.B. replacing 'result_array' each time around the loop.
result_array = da.map_blocks(
fill_region,
result_array,
datarr,
indarr,
dtype=result_array.dtype,
meta=np.ndarray,
)
# Construct the result cube.
result_cube = full_mesh_cube.copy()
result_cube.data = result_array
# Copy names, units + attributes from region data (N.B. but not var_name)
result_cube.metadata = regioncube_metadata
if untranspose_dims:
# Re-order dims as in the original input.
result_cube.transpose(untranspose_dims)
return result_cube
| def recombine_regions(
full_mesh_cube: Cube,
region_cubes: Iterable[Cube],
index_coord_name: AnyStr = "i_mesh_index",
) -> Cube:
"""
Put data from regional sub-meshes back onto the original full mesh.
The result is a region_cube identical to 'full_mesh_cube', but with its data
replaced by a combination of data from the provided 'region_cubes'.
The result metadata, including name and units, are also replaced by those
of the 'region_cubes' (which must all be the same).
Args:
* full_mesh_cube
Describes the full mesh and mesh-location to which the region data
refers, and acts as a template for the result.
Must have a :class:`~iris.experimental.ugrid.mesh.Mesh`.
* region_cubes
Contain data on a subset of the 'full_mesh_cube' mesh locations.
The region cubes do not need to have a mesh. There must be at least
1 of them, to determine the result phenomenon.
Their shapes and dimension-coords must all match those of
'full_mesh_cube', except in the mesh dimension, which can have
different sizes between the regions, and from the 'full_mesh_cube'.
The mesh dimension of each region cube must have a 1-D coord named by
'index_coord_name'. Although these region index coords can vary in
length, they must all have matching metadata (names, units and
attributes), and must also match the coord of that name in the
'full_mesh_cube', if there is one.
The ".points" values of the region index coords specify, for each
datapoint, its location in the original mesh -- i.e. they are indices
into the relevant mesh-location dimension.
* index_coord_name
Coord name of the index coords in each region cubes, containing the
mesh location indices.
Result:
* result_cube
An unstructured region_cube identical to 'full_mesh_cube', and with the
same mesh and location, but with its data and ".metadata" replaced by
that from the 'region_cubes'.
Where regions overlap, the result data comes from the last-listed of the
original region cubes which contain that location.
Where no region contains a datapoint, it will be masked in the result.
HINT: alternatively, values covered by no region can be taken from the
original 'full_mesh_cube' data, if 'full_mesh_cube' is *also* passed
as the first of the 'region_cubes'.
"""
if not region_cubes:
raise ValueError("'region_cubes' must be non-empty.")
mesh_dim = full_mesh_cube.mesh_dim()
if mesh_dim is None:
raise ValueError("'full_mesh_cube' has no \".mesh\".")
# Check the basic required properties of the input.
mesh_dim_coords = full_mesh_cube.coords(
dim_coords=True, dimensions=(mesh_dim,)
)
if not mesh_dim_coords:
err = (
"'full_mesh_cube' has no dim-coord on the mesh dimension, "
f"(dimension {mesh_dim})."
)
raise ValueError(err)
#
# Perform consistency checks on all the region-cubes.
#
def metadata_no_varname(cube_or_coord):
# Get a metadata object but omit any var_name.
metadata = cube_or_coord.metadata
fields = metadata._asdict()
fields["var_name"] = None
result = metadata.__class__(**fields)
return result
n_regions = len(region_cubes)
n_dims = full_mesh_cube.ndim
regioncube_metadata = None
indexcoord_metadata = None
for i_region, region_cube in enumerate(region_cubes):
reg_cube_str = (
f'Region cube #{i_region + 1}/{n_regions}, "{region_cube.name()}"'
)
reg_ndims = region_cube.ndim
# Check dimensionality.
if reg_ndims != n_dims:
err = (
f"{reg_cube_str} has {reg_ndims} dimensions, but "
f"'full_mesh_cube' has {n_dims}."
)
raise ValueError(err)
# Get region_cube metadata, which will apply to the result..
region_cube_metadata = metadata_no_varname(region_cube)
if regioncube_metadata is None:
# Store the first region-cube metadata as a reference
regioncube_metadata = region_cube_metadata
elif region_cube_metadata != regioncube_metadata:
# Check subsequent region-cubes metadata against the first.
err = (
f"{reg_cube_str} has metadata {region_cube_metadata}, "
"which does not match that of the first region region_cube, "
f'"{region_cubes[0].name()}", '
f"which is {regioncube_metadata}."
)
raise ValueError(err)
# For each dim, check that coords match other regions, and full-cube.
for i_dim in range(full_mesh_cube.ndim):
if i_dim == mesh_dim:
# mesh dim : look for index coords (by name).
fulldim = full_mesh_cube.coords(
name_or_coord=index_coord_name, dimensions=(i_dim,)
)
regdim = region_cube.coords(
name_or_coord=index_coord_name, dimensions=(i_dim,)
)
else:
# non-mesh dims : look for dim-coords (only)
fulldim = full_mesh_cube.coords(
dim_coords=True, dimensions=(i_dim,)
)
regdim = region_cube.coords(
dim_coords=True, dimensions=(i_dim,)
)
if fulldim:
(fulldim,) = fulldim
full_dimname = fulldim.name()
fulldim_metadata = metadata_no_varname(fulldim)
if regdim:
(regdim,) = regdim
reg_dimname = regdim.name()
regdim_metadata = metadata_no_varname(regdim)
err = None
# N.B. checks for mesh- and non-mesh-dims are different.
if i_dim != mesh_dim:
# i_dim == mesh_dim : checks for non-mesh dims.
if fulldim and not regdim:
err = (
f"{reg_cube_str} has no dim-coord for dimension "
"{i_dim}, to match the 'full_mesh_cube' dimension "
f'"{full_dimname}".'
)
elif regdim and not fulldim:
err = (
f'{reg_cube_str} has a dim-coord "{reg_dimname}" for '
f"dimension {i_dim}, but 'full_mesh_cube' has none."
)
elif regdim != fulldim:
err = (
f'{reg_cube_str} has a dim-coord "{reg_dimname}" for '
f"dimension {i_dim}, which does not match that "
f"of 'full_mesh_cube', \"{full_dimname}\"."
)
else:
# i_dim == mesh_dim : different rules for this one
if not regdim:
# Must have an index coord on the mesh dimension
err = (
f'{reg_cube_str} has no "{index_coord_name}" coord on '
f"the mesh dimension (dimension {mesh_dim})."
)
elif fulldim and regdim_metadata != fulldim_metadata:
# May *not* have full-cube index, but if so it must match
err = (
f"{reg_cube_str} has an index coord "
f'"{index_coord_name}" whose ".metadata" does not '
"match that on 'full_mesh_cube' : "
f"{regdim_metadata} != {fulldim_metadata}."
)
# At this point, we know we *have* an index coord, and it does not
# conflict with the one on 'full_mesh_cube' (if any).
# Now check for matches between the region cubes.
if indexcoord_metadata is None:
# Store first occurrence (from first region-cube)
indexcoord_metadata = regdim_metadata
elif regdim_metadata != indexcoord_metadata:
# Compare subsequent occurences (from other region-cubes)
err = (
f"{reg_cube_str} has an index coord "
f'"{index_coord_name}" whose ".metadata" does not '
f"match that of the first region-cube : "
f"{regdim_metadata} != {indexcoord_metadata}."
)
if err:
raise ValueError(err)
# Use the mesh_dim to transpose inputs + outputs, if required, as it is
# simpler for all the array operations to always have the mesh dim *last*.
if mesh_dim == full_mesh_cube.ndim - 1:
# Mesh dim is already the last one : no tranposes required
untranspose_dims = None
else:
dim_range = np.arange(full_mesh_cube.ndim, dtype=int)
# Transpose all inputs to mesh-last order.
tranpose_dims = [i_dim for i_dim in dim_range if i_dim != mesh_dim] + [
mesh_dim
] # chop out mesh_dim + put it at the end.
def transposed_copy(cube, dim_order):
cube = cube.copy()
cube.transpose()
return cube
full_mesh_cube = transposed_copy(full_mesh_cube, tranpose_dims)
region_cubes = [
transposed_copy(region_cube, tranpose_dims)
for region_cube in region_cubes
]
# Also prepare for transforming the output back to the original order.
untranspose_dims = dim_range.copy()
# Neat trick to produce the reverse operation.
untranspose_dims[tranpose_dims] = dim_range
#
# Here's the core operation..
#
def fill_region(target, regiondata, regioninds):
if not target.flags.writeable:
# The initial input can be a section of a da.zeros(), which has no
# real array "behind" it. This means that real arrays created in
# memory are only chunk-sized, but it also means that 'target' may
# not be writeable. So take a copy to fix that, where needed.
target = target.copy()
# N.B. Indices are basically 1D, but may have leading *1 dims for
# alignment, to satisfy da.map_blocks
assert all(size == 1 for size in regioninds.shape[:-1])
inds = regioninds.flatten()
# Assign blocks with indexing on the last dim only.
target[..., inds] = regiondata
return target
# Create an initially 'empty' (all-masked) dask array matching the input.
# N.B. this does not use the full_mesh_cube.lazy_data() array, but only its
# shape and dtype, since the data itself is not used in the calculation.
# N.B. chunking matches the input cube, allowing performance control.
input_data = full_mesh_cube.lazy_data()
result_array = da.ma.masked_array(
da.zeros(
input_data.shape,
dtype=input_data.dtype,
chunks=input_data.chunksize,
),
True,
)
# Wrap this repeatedly with a lazy operation to assign each region.
# It is done this way because we couldn't get map_blocks to correctly wrap
# a function which does all regions in a single operation.
# TODO: replace with a single-stage solution: Probably better, if possible.
# Notes on resultant calculation properties:
# 1. map_blocks is chunk-mapped, so it is parallelisable and space-saving
# 2. However, fetching less than a whole chunk is not efficient
for region_cube in region_cubes:
# Lazy data array from the region cube
datarr = region_cube.lazy_data()
# Lazy indices from the mesh-dim coord.
mesh_dimcoord = region_cube.coord(
name_or_coord=index_coord_name, dimensions=region_cube.ndim - 1
)
indarr = mesh_dimcoord.lazy_points()
# Extend indarr dimensions to align it with the 'target' array dims.
assert indarr.ndim == 1
shape = (1,) * (region_cube.ndim - 1) + indarr.shape
indarr = indarr.reshape(shape)
# Apply the operation to paste from one region into the target.
# N.B. replacing 'result_array' each time around the loop.
result_array = da.map_blocks(
fill_region,
result_array,
datarr,
indarr,
dtype=result_array.dtype,
meta=np.ndarray,
)
# Construct the result cube.
result_cube = full_mesh_cube.copy()
result_cube.data = result_array
# Copy names, units + attributes from region data (N.B. but not var_name)
result_cube.metadata = regioncube_metadata
if untranspose_dims:
# Re-order dims as in the original input.
result_cube.transpose(untranspose_dims)
return result_cube
|
22,389 | def lint_inputs(tool_xml, lint_ctx):
"""Lint parameters in a tool's inputs block."""
datasource = is_datasource(tool_xml)
inputs = tool_xml.findall("./inputs//param")
num_inputs = 0
for param in inputs:
num_inputs += 1
param_attrib = param.attrib
has_errors = False
if "type" not in param_attrib:
lint_ctx.error("Found param input with no type specified.")
has_errors = True
if "name" not in param_attrib and "argument" not in param_attrib:
lint_ctx.error("Found param input with no name specified.")
has_errors = True
if has_errors:
continue
param_type = param_attrib["type"]
param_name = _parse_name(param_attrib.get("name"), param_attrib.get("argument"))
if not is_valid_cheetah_placeholder(param_name):
lint_ctx.warn("Param input [%s] is not a valid Cheetah placeholder.", param_name)
if param_type == "data":
if "format" not in param_attrib:
lint_ctx.warn("Param input [%s] with no format specified - 'data' format will be assumed.", param_name)
elif param_type == "select":
dynamic_options = param.get("dynamic_options", None)
if dynamic_options is None:
dynamic_options = param.find("options")
select_options = param.findall('./option')
if any(['value' not in option.attrib for option in select_options]):
lint_ctx.error("Select [%s] has option without value", param_name)
if dynamic_options is None and len(select_options) == 0:
message = "No options defined for select [%s]" % param_name
lint_ctx.warn(message)
if param_attrib["display"] == "radio":
if param_attrib["optional"] == "true":
lint_ctx.error('Select [%s] display="radio" is incompatible with optional="true"', param_name)
if param_attrib["multiple"] == "true":
lint_ctx.error('Select [%s] display="radio" is incompatible with multiple="true"', param_name)
# TODO: Validate type, much more...
conditional_selects = tool_xml.findall("./inputs//conditional")
for conditional in conditional_selects:
conditional_name = conditional.get('name')
if not conditional_name:
lint_ctx.error("Conditional without a name")
if conditional.get("value_from"):
# Probably only the upload tool use this, no children elements
continue
first_param = conditional.find("param")
if first_param is None:
lint_ctx.error("Conditional '%s' has no child <param>" % conditional_name)
continue
first_param_type = first_param.get('type')
if first_param_type not in ['select', 'boolean']:
lint_ctx.warn("Conditional '%s' first param should have type=\"select\" /> or type=\"boolean\"" % conditional_name)
continue
if first_param_type == 'select':
select_options = _find_with_attribute(first_param, 'option', 'value')
option_ids = [option.get('value') for option in select_options]
else: # boolean
option_ids = [
first_param.get('truevalue', 'true'),
first_param.get('falsevalue', 'false')
]
if string_as_bool(first_param.get('optional', False)):
lint_ctx.warn("Conditional test parameter cannot be optional")
whens = conditional.findall('./when')
if any('value' not in when.attrib for when in whens):
lint_ctx.error("When without value")
when_ids = [w.get('value') for w in whens]
for option_id in option_ids:
if option_id not in when_ids:
lint_ctx.warn(f"No <when /> block found for {first_param_type} option '{option_id}' inside conditional '{conditional_name}'")
for when_id in when_ids:
if when_id not in option_ids:
if first_param_type == 'select':
lint_ctx.warn(f"No <option /> found for when block '{when_id}' inside conditional '{conditional_name}'")
else:
lint_ctx.warn(f"No truevalue/falsevalue found for when block '{when_id}' inside conditional '{conditional_name}'")
if datasource:
for datasource_tag in ('display', 'uihints'):
if not any([param.tag == datasource_tag for param in inputs]):
lint_ctx.info("%s tag usually present in data sources" % datasource_tag)
if num_inputs:
lint_ctx.info("Found %d input parameters.", num_inputs)
else:
if datasource:
lint_ctx.info("No input parameters, OK for data sources")
else:
lint_ctx.warn("Found no input parameters.")
| def lint_inputs(tool_xml, lint_ctx):
"""Lint parameters in a tool's inputs block."""
datasource = is_datasource(tool_xml)
inputs = tool_xml.findall("./inputs//param")
num_inputs = 0
for param in inputs:
num_inputs += 1
param_attrib = param.attrib
has_errors = False
if "type" not in param_attrib:
lint_ctx.error("Found param input with no type specified.")
has_errors = True
if "name" not in param_attrib and "argument" not in param_attrib:
lint_ctx.error("Found param input with no name specified.")
has_errors = True
if has_errors:
continue
param_type = param_attrib["type"]
param_name = _parse_name(param_attrib.get("name"), param_attrib.get("argument"))
if not is_valid_cheetah_placeholder(param_name):
lint_ctx.warn("Param input [%s] is not a valid Cheetah placeholder.", param_name)
if param_type == "data":
if "format" not in param_attrib:
lint_ctx.warn("Param input [%s] with no format specified - 'data' format will be assumed.", param_name)
elif param_type == "select":
dynamic_options = param.get("dynamic_options", None)
if dynamic_options is None:
dynamic_options = param.find("options")
select_options = param.findall('./option')
if any(['value' not in option.attrib for option in select_options]):
lint_ctx.error("Select [%s] has option without value", param_name)
if dynamic_options is None and len(select_options) == 0:
message = "No options defined for select [%s]" % param_name
lint_ctx.warn(message)
if param_attrib.get("display", "checkboxes") == "radio":
if param_attrib["optional"] == "true":
lint_ctx.error('Select [%s] display="radio" is incompatible with optional="true"', param_name)
if param_attrib["multiple"] == "true":
lint_ctx.error('Select [%s] display="radio" is incompatible with multiple="true"', param_name)
# TODO: Validate type, much more...
conditional_selects = tool_xml.findall("./inputs//conditional")
for conditional in conditional_selects:
conditional_name = conditional.get('name')
if not conditional_name:
lint_ctx.error("Conditional without a name")
if conditional.get("value_from"):
# Probably only the upload tool use this, no children elements
continue
first_param = conditional.find("param")
if first_param is None:
lint_ctx.error("Conditional '%s' has no child <param>" % conditional_name)
continue
first_param_type = first_param.get('type')
if first_param_type not in ['select', 'boolean']:
lint_ctx.warn("Conditional '%s' first param should have type=\"select\" /> or type=\"boolean\"" % conditional_name)
continue
if first_param_type == 'select':
select_options = _find_with_attribute(first_param, 'option', 'value')
option_ids = [option.get('value') for option in select_options]
else: # boolean
option_ids = [
first_param.get('truevalue', 'true'),
first_param.get('falsevalue', 'false')
]
if string_as_bool(first_param.get('optional', False)):
lint_ctx.warn("Conditional test parameter cannot be optional")
whens = conditional.findall('./when')
if any('value' not in when.attrib for when in whens):
lint_ctx.error("When without value")
when_ids = [w.get('value') for w in whens]
for option_id in option_ids:
if option_id not in when_ids:
lint_ctx.warn(f"No <when /> block found for {first_param_type} option '{option_id}' inside conditional '{conditional_name}'")
for when_id in when_ids:
if when_id not in option_ids:
if first_param_type == 'select':
lint_ctx.warn(f"No <option /> found for when block '{when_id}' inside conditional '{conditional_name}'")
else:
lint_ctx.warn(f"No truevalue/falsevalue found for when block '{when_id}' inside conditional '{conditional_name}'")
if datasource:
for datasource_tag in ('display', 'uihints'):
if not any([param.tag == datasource_tag for param in inputs]):
lint_ctx.info("%s tag usually present in data sources" % datasource_tag)
if num_inputs:
lint_ctx.info("Found %d input parameters.", num_inputs)
else:
if datasource:
lint_ctx.info("No input parameters, OK for data sources")
else:
lint_ctx.warn("Found no input parameters.")
|
1,342 | def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
If True, for binary y_true, the score function is supposed to accpet
1d y_pred (i.e., probability of the positive class, shape
``(n_samples,)``).
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
If True, for binary y_true, the score function is supposed to accpet
1d y_pred (i.e., probability of the positive class or the decision
function, shape ``(n_samples,)``).
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.model_selection import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
| def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
If True, for binary y_true, the score function is supposed to accept
1d y_pred (i.e., probability of the positive class, shape
``(n_samples,)``).
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
If True, for binary y_true, the score function is supposed to accpet
1d y_pred (i.e., probability of the positive class or the decision
function, shape ``(n_samples,)``).
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.model_selection import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
|
2,714 | def inplace_swap_column(X, m, n):
"""
To swap two columns of a CSC/CSR matrix in-place.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix whose two columns are to be swapped. It should be of
CSR or CSC format.
m : int
Index of the column of X to be swapped.
n : int
Index of the column of X to be swapped.
"""
if m < 0:
m += X.shape[1]
if n < 0:
n += X.shape[1]
if isinstance(X, sp.csc_matrix):
inplace_swap_row_csr(X, m, n)
elif isinstance(X, sp.csr_matrix):
inplace_swap_row_csc(X, m, n)
else:
_raise_typeerror(X)
| def inplace_swap_column(X, m, n):
"""
Swap two columns of a CSC/CSR matrix in-place.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix whose two columns are to be swapped. It should be of
CSR or CSC format.
m : int
Index of the column of X to be swapped.
n : int
Index of the column of X to be swapped.
"""
if m < 0:
m += X.shape[1]
if n < 0:
n += X.shape[1]
if isinstance(X, sp.csc_matrix):
inplace_swap_row_csr(X, m, n)
elif isinstance(X, sp.csr_matrix):
inplace_swap_row_csc(X, m, n)
else:
_raise_typeerror(X)
|
20,523 | def get_parser():
# Initialize the parser
parser = argparse.ArgumentParser(
description=(
"This function extracts the spinal cord centerline. Three methods are available: OptiC (automatic), "
"Viewer (manual) and Fitseg (applied on segmented image). These functions output (i) a NIFTI file with "
"labels corresponding to the discrete centerline, and (ii) a csv file containing the float (more precise) "
"coordinates of the centerline in the RPI orientation. \n"
"\n"
"Reference: C Gros, B De Leener, et al. Automatic spinal cord localization, robust to MRI contrast using "
"global curve optimization (2017). doi.org/10.1016/j.media.2017.12.001"
),
formatter_class=SmartFormatter,
add_help=None,
prog=os.path.basename(__file__).strip(".py")
)
mandatory = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatory.add_argument(
'-i',
metavar=Metavar.file,
required=True,
help="Input image. Example: ti.nii.gz"
)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit."
)
optional.add_argument(
"-c",
choices=['t1', 't2', 't2s', 'dwi'],
help="Type of image contrast. Only with method=optic."
)
optional.add_argument(
"-method",
choices=['optic', 'viewer', 'fitseg'],
default='optic',
help=("Method used for extracting the centerline.\n"
" - optic: automatic spinal cord detection method\n"
" - viewer: manual selection a few points followed by interpolation\n"
" - fitseg: fit a regularized centerline on an already-existing cord segmentation. It will "
"interpolate if slices are missing and extrapolate beyond the segmentation boundaries (i.e., every "
"axial slice will exhibit a centerline pixel).")
)
optional.add_argument(
"-centerline-algo",
choices=['polyfit', 'bspline', 'linear', 'nurbs'],
default='bspline',
help="Show this help message and exit."
)
optional.add_argument(
"-centerline-smooth",
metavar=Metavar.int,
type=int,
default=30,
help="Degree of smoothing for centerline fitting. Only for -centerline-algo {bspline, linear}."
)
optional.add_argument(
"-o",
metavar=Metavar.file,
help=("File name (without extension) for the centerline output files. By default, output file will be the "
"input with suffix '_centerline'. Example: 'centerline_optic'")
)
optional.add_argument(
"-gap",
metavar=Metavar.float,
type=float,
default=20.0,
help="Gap in mm between manually selected points. Only with method=viewer."
)
optional.add_argument(
"-igt",
metavar=Metavar.file,
help="File name of ground-truth centerline or segmentation (binary nifti)."
)
optional.add_argument(
"-v",
choices=['0', '1'],
default='1',
help="Verbose. 1: display on, 0: display off (default)"
)
optional.add_argument(
"-qc",
metavar=Metavar.folder,
action=ActionCreateFolder,
help="The path where the quality control generated content will be saved."
)
optional.add_argument(
"-qc-dataset",
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the dataset the process was run on."
)
optional.add_argument(
"-qc-subject",
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the subject the process was run on."
)
return parser
| def get_parser():
# Initialize the parser
parser = argparse.ArgumentParser(
description=(
"This function extracts the spinal cord centerline. Three methods are available: OptiC (automatic), "
"Viewer (manual) and Fitseg (applied on segmented image). These functions output (i) a NIFTI file with "
"labels corresponding to the discrete centerline, and (ii) a csv file containing the float (more precise) "
"coordinates of the centerline in the RPI orientation. \n"
"\n"
"Reference: C Gros, B De Leener, et al. Automatic spinal cord localization, robust to MRI contrast using "
"global curve optimization (2017). doi.org/10.1016/j.media.2017.12.001"
),
formatter_class=SmartFormatter,
add_help=None,
prog=os.path.basename(__file__).strip(".py")
)
mandatory = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatory.add_argument(
'-i',
metavar=Metavar.file,
required=True,
help="Input image. Example: ti.nii.gz"
)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit."
)
optional.add_argument(
"-c",
choices=['t1', 't2', 't2s', 'dwi'],
help="Type of image contrast. Only with method=optic."
)
optional.add_argument(
"-method",
choices=['optic', 'viewer', 'fitseg'],
default='optic',
help=("Method used for extracting the centerline.\n"
" - optic: automatic spinal cord detection method\n"
" - viewer: manual selection a few points followed by interpolation\n"
" - fitseg: fit a regularized centerline on an already-existing cord segmentation. It will "
"interpolate if slices are missing and extrapolate beyond the segmentation boundaries (i.e., every "
"axial slice will exhibit a centerline pixel).")
)
optional.add_argument(
"-centerline-algo",
choices=['polyfit', 'bspline', 'linear', 'nurbs'],
default='bspline',
help="Show this help message and exit."
)
optional.add_argument(
"-centerline-smooth",
metavar=Metavar.int,
type=int,
default=30,
help="Degree of smoothing for centerline fitting. Only for -centerline-algo {bspline, linear}."
)
optional.add_argument(
"-o",
metavar=Metavar.file,
help=("File name (without extension) for the centerline output files. By default, output file will be the "
"input with suffix '_centerline'. Example: 'centerline_optic'")
)
optional.add_argument(
"-gap",
metavar=Metavar.float,
type=float,
default=20.0,
help="Gap in mm between manually selected points. Only with method=viewer."
)
optional.add_argument(
"-igt",
metavar=Metavar.file,
help="File name of ground-truth centerline or segmentation (binary nifti)."
)
optional.add_argument(
"-v",
choices=['0', '1'],
default='1',
help="Verbose. 1: display on, 0: display off (default)"
)
optional.add_argument(
"-qc",
metavar=Metavar.folder,
action=ActionCreateFolder,
help="The path where the quality control generated content will be saved."
)
optional.add_argument(
"-qc-dataset",
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the dataset the process was run on."
)
optional.add_argument(
"-qc-subject",
help="If provided, this string will be mentioned in the QC report as the subject the process was run on."
)
return parser
|
51,564 | def merge(datasets, bounds=None, res=None, nodata=None, precision=7, indexes=None,
method='first'):
"""Copy valid pixels from input files to an output file.
All files must have the same number of bands, data type, and
coordinate reference system.
Input files are merged in their listed order using the reverse
painter's algorithm. If the output file exists, its values will be
overwritten by input values.
Geospatial bounds and resolution of a new output file in the
units of the input file coordinate reference system may be provided
and are otherwise taken from the first input file.
Parameters
----------
datasets: list of dataset objects opened in 'r' mode
source datasets to be merged.
bounds: tuple, optional
Bounds of the output image (left, bottom, right, top).
If not set, bounds are determined from bounds of input rasters.
res: tuple, optional
Output resolution in units of coordinate reference system. If not set,
the resolution of the first raster is used. If a single value is passed,
output pixels will be square.
nodata: float, optional
nodata value to use in output file. If not set, uses the nodata value
in the first input raster.
indexes : list of ints or a single int, optional
bands to read and merge
method : str or callable
pre-defined or custom merging method
callable must have signature
(old_data, new_data, old_nodata, new_nodata)
Returns
-------
tuple
Two elements:
dest: numpy ndarray
Contents of all input rasters in single array
out_transform: affine.Affine()
Information for mapping pixel coordinates in `dest` to another
coordinate system
"""
first = datasets[0]
first_res = first.res
nodataval = first.nodatavals[0]
dtype = first.dtypes[0]
if method not in MERGE_METHODS and not callable(method):
raise ValueError('Unknown method {0}, must be one of {1} or callable'
.format(method, MERGE_METHODS))
# Determine output band count
if indexes is None:
output_count = first.count
elif isinstance(indexes, int):
output_count = 1
else:
output_count = len(indexes)
# Extent from option or extent of all inputs
if bounds:
dst_w, dst_s, dst_e, dst_n = bounds
else:
# scan input files
xs = []
ys = []
for src in datasets:
left, bottom, right, top = src.bounds
xs.extend([left, right])
ys.extend([bottom, top])
dst_w, dst_s, dst_e, dst_n = min(xs), min(ys), max(xs), max(ys)
logger.debug("Output bounds: %r", (dst_w, dst_s, dst_e, dst_n))
output_transform = Affine.translation(dst_w, dst_n)
logger.debug("Output transform, before scaling: %r", output_transform)
# Resolution/pixel size
if not res:
res = first_res
elif not np.iterable(res):
res = (res, res)
elif len(res) == 1:
res = (res[0], res[0])
output_transform *= Affine.scale(res[0], -res[1])
logger.debug("Output transform, after scaling: %r", output_transform)
# Compute output array shape. We guarantee it will cover the output
# bounds completely
output_width = int(math.ceil((dst_e - dst_w) / res[0]))
output_height = int(math.ceil((dst_n - dst_s) / res[1]))
# Adjust bounds to fit
dst_e, dst_s = output_transform * (output_width, output_height)
logger.debug("Output width: %d, height: %d", output_width, output_height)
logger.debug("Adjusted bounds: %r", (dst_w, dst_s, dst_e, dst_n))
# create destination array
dest = np.zeros((output_count, output_height, output_width), dtype=dtype)
if nodata is not None:
nodataval = nodata
logger.debug("Set nodataval: %r", nodataval)
if nodataval is not None:
# Only fill if the nodataval is within dtype's range
inrange = False
if np.dtype(dtype).kind in ('i', 'u'):
info = np.iinfo(dtype)
inrange = (info.min <= nodataval <= info.max)
elif np.dtype(dtype).kind == 'f':
info = np.finfo(dtype)
if np.isnan(nodataval):
inrange = True
else:
inrange = (info.min <= nodataval <= info.max)
if inrange:
dest.fill(nodataval)
else:
warnings.warn(
"Input file's nodata value, %s, is beyond the valid "
"range of its data type, %s. Consider overriding it "
"using the --nodata option for better results." % (
nodataval, dtype))
else:
nodataval = 0
if method == 'first':
def copyto(old_data, new_data, old_nodata, new_nodata):
mask = np.logical_and(old_nodata, ~new_nodata)
old_data[mask] = new_data[mask]
elif method == 'last':
def copyto(old_data, new_data, old_nodata, new_nodata):
mask = ~new_nodata
old_data[mask] = new_data[mask]
elif method == 'min':
def copyto(old_data, new_data, old_nodata, new_nodata):
mask = np.logical_and(~old_nodata, ~new_nodata)
old_data[mask] = np.minimum(old_data[mask], new_data[mask])
mask = np.logical_and(old_nodata, ~new_nodata)
old_data[mask] = new_data[mask]
elif method == 'max':
def copyto(old_data, new_data, old_nodata, new_nodata):
mask = np.logical_and(~old_nodata, ~new_nodata)
old_data[mask] = np.maximum(old_data[mask], new_data[mask])
mask = np.logical_and(old_nodata, ~new_nodata)
old_data[mask] = new_data[mask]
elif callable(method):
copyto = method
else:
raise ValueError(method)
for src in datasets:
# Real World (tm) use of boundless reads.
# This approach uses the maximum amount of memory to solve the
# problem. Making it more efficient is a TODO.
# 1. Compute spatial intersection of destination and source
src_w, src_s, src_e, src_n = src.bounds
int_w = src_w if src_w > dst_w else dst_w
int_s = src_s if src_s > dst_s else dst_s
int_e = src_e if src_e < dst_e else dst_e
int_n = src_n if src_n < dst_n else dst_n
# 2. Compute the source window
src_window = windows.from_bounds(
int_w, int_s, int_e, int_n, src.transform, precision=precision)
logger.debug("Src %s window: %r", src.name, src_window)
src_window = src_window.round_shape()
# 3. Compute the destination window
dst_window = windows.from_bounds(
int_w, int_s, int_e, int_n, output_transform, precision=precision)
# 4. Read data in source window into temp
trows, tcols = (
int(round(dst_window.height)), int(round(dst_window.width)))
temp_shape = (output_count, trows, tcols)
temp = src.read(out_shape=temp_shape, window=src_window,
boundless=False, masked=True, indexes=indexes)
# 5. Copy elements of temp into dest
roff, coff = (
int(round(dst_window.row_off)), int(round(dst_window.col_off)))
region = dest[:, roff:roff + trows, coff:coff + tcols]
if np.isnan(nodataval):
region_nodata = np.isnan(region)
temp_nodata = np.isnan(temp)
else:
region_nodata = region == nodataval
temp_nodata = temp.mask
copyto(region, temp, region_nodata, temp_nodata)
return dest, output_transform
| def merge(datasets, bounds=None, res=None, nodata=None, precision=7, indexes=None,
method='first'):
"""Copy valid pixels from input files to an output file.
All files must have the same number of bands, data type, and
coordinate reference system.
Input files are merged in their listed order using the reverse
painter's algorithm. If the output file exists, its values will be
overwritten by input values.
Geospatial bounds and resolution of a new output file in the
units of the input file coordinate reference system may be provided
and are otherwise taken from the first input file.
Parameters
----------
datasets: list of dataset objects opened in 'r' mode
source datasets to be merged.
bounds: tuple, optional
Bounds of the output image (left, bottom, right, top).
If not set, bounds are determined from bounds of input rasters.
res: tuple, optional
Output resolution in units of coordinate reference system. If not set,
the resolution of the first raster is used. If a single value is passed,
output pixels will be square.
nodata: float, optional
nodata value to use in output file. If not set, uses the nodata value
in the first input raster.
indexes : list of ints or a single int, optional
bands to read and merge
method : str or callable
pre-defined or custom merging method
callable must have signature
function(old_data, new_data, old_nodata, new_nodata)
Parameters
-----------
old_data : array_like
...
Returns
-------
tuple
Two elements:
dest: numpy ndarray
Contents of all input rasters in single array
out_transform: affine.Affine()
Information for mapping pixel coordinates in `dest` to another
coordinate system
"""
first = datasets[0]
first_res = first.res
nodataval = first.nodatavals[0]
dtype = first.dtypes[0]
if method not in MERGE_METHODS and not callable(method):
raise ValueError('Unknown method {0}, must be one of {1} or callable'
.format(method, MERGE_METHODS))
# Determine output band count
if indexes is None:
output_count = first.count
elif isinstance(indexes, int):
output_count = 1
else:
output_count = len(indexes)
# Extent from option or extent of all inputs
if bounds:
dst_w, dst_s, dst_e, dst_n = bounds
else:
# scan input files
xs = []
ys = []
for src in datasets:
left, bottom, right, top = src.bounds
xs.extend([left, right])
ys.extend([bottom, top])
dst_w, dst_s, dst_e, dst_n = min(xs), min(ys), max(xs), max(ys)
logger.debug("Output bounds: %r", (dst_w, dst_s, dst_e, dst_n))
output_transform = Affine.translation(dst_w, dst_n)
logger.debug("Output transform, before scaling: %r", output_transform)
# Resolution/pixel size
if not res:
res = first_res
elif not np.iterable(res):
res = (res, res)
elif len(res) == 1:
res = (res[0], res[0])
output_transform *= Affine.scale(res[0], -res[1])
logger.debug("Output transform, after scaling: %r", output_transform)
# Compute output array shape. We guarantee it will cover the output
# bounds completely
output_width = int(math.ceil((dst_e - dst_w) / res[0]))
output_height = int(math.ceil((dst_n - dst_s) / res[1]))
# Adjust bounds to fit
dst_e, dst_s = output_transform * (output_width, output_height)
logger.debug("Output width: %d, height: %d", output_width, output_height)
logger.debug("Adjusted bounds: %r", (dst_w, dst_s, dst_e, dst_n))
# create destination array
dest = np.zeros((output_count, output_height, output_width), dtype=dtype)
if nodata is not None:
nodataval = nodata
logger.debug("Set nodataval: %r", nodataval)
if nodataval is not None:
# Only fill if the nodataval is within dtype's range
inrange = False
if np.dtype(dtype).kind in ('i', 'u'):
info = np.iinfo(dtype)
inrange = (info.min <= nodataval <= info.max)
elif np.dtype(dtype).kind == 'f':
info = np.finfo(dtype)
if np.isnan(nodataval):
inrange = True
else:
inrange = (info.min <= nodataval <= info.max)
if inrange:
dest.fill(nodataval)
else:
warnings.warn(
"Input file's nodata value, %s, is beyond the valid "
"range of its data type, %s. Consider overriding it "
"using the --nodata option for better results." % (
nodataval, dtype))
else:
nodataval = 0
if method == 'first':
def copyto(old_data, new_data, old_nodata, new_nodata):
mask = np.logical_and(old_nodata, ~new_nodata)
old_data[mask] = new_data[mask]
elif method == 'last':
def copyto(old_data, new_data, old_nodata, new_nodata):
mask = ~new_nodata
old_data[mask] = new_data[mask]
elif method == 'min':
def copyto(old_data, new_data, old_nodata, new_nodata):
mask = np.logical_and(~old_nodata, ~new_nodata)
old_data[mask] = np.minimum(old_data[mask], new_data[mask])
mask = np.logical_and(old_nodata, ~new_nodata)
old_data[mask] = new_data[mask]
elif method == 'max':
def copyto(old_data, new_data, old_nodata, new_nodata):
mask = np.logical_and(~old_nodata, ~new_nodata)
old_data[mask] = np.maximum(old_data[mask], new_data[mask])
mask = np.logical_and(old_nodata, ~new_nodata)
old_data[mask] = new_data[mask]
elif callable(method):
copyto = method
else:
raise ValueError(method)
for src in datasets:
# Real World (tm) use of boundless reads.
# This approach uses the maximum amount of memory to solve the
# problem. Making it more efficient is a TODO.
# 1. Compute spatial intersection of destination and source
src_w, src_s, src_e, src_n = src.bounds
int_w = src_w if src_w > dst_w else dst_w
int_s = src_s if src_s > dst_s else dst_s
int_e = src_e if src_e < dst_e else dst_e
int_n = src_n if src_n < dst_n else dst_n
# 2. Compute the source window
src_window = windows.from_bounds(
int_w, int_s, int_e, int_n, src.transform, precision=precision)
logger.debug("Src %s window: %r", src.name, src_window)
src_window = src_window.round_shape()
# 3. Compute the destination window
dst_window = windows.from_bounds(
int_w, int_s, int_e, int_n, output_transform, precision=precision)
# 4. Read data in source window into temp
trows, tcols = (
int(round(dst_window.height)), int(round(dst_window.width)))
temp_shape = (output_count, trows, tcols)
temp = src.read(out_shape=temp_shape, window=src_window,
boundless=False, masked=True, indexes=indexes)
# 5. Copy elements of temp into dest
roff, coff = (
int(round(dst_window.row_off)), int(round(dst_window.col_off)))
region = dest[:, roff:roff + trows, coff:coff + tcols]
if np.isnan(nodataval):
region_nodata = np.isnan(region)
temp_nodata = np.isnan(temp)
else:
region_nodata = region == nodataval
temp_nodata = temp.mask
copyto(region, temp, region_nodata, temp_nodata)
return dest, output_transform
|
478 | def form_session_handler(v, text, msg):
"""
The form session handler will use the inbound text to answer the next question
in the open SQLXformsSession for the associated contact. If no session is open,
the handler passes. If multiple sessions are open, they are all closed and an
error message is displayed to the user.
"""
with critical_section_for_smsforms_sessions(v.owner_id):
if toggles.ONE_PHONE_NUMBER_MULTIPLE_CONTACTS.enabled(v.domain):
channel = get_channel_for_contact(v.owner_id, v.phone_number)
running_session_info = XFormsSessionSynchronization.get_running_session_info_for_channel(channel)
if running_session_info.session_id:
session = SQLXFormsSession.by_session_id(running_session_info.session_id)
if not session.session_is_open:
# This should never happen. But if it does we should set the channel free
# and act like there was no available session
notify_error("The supposedly running session was not open and was released. "
'No known way for this to happen, so worth investigating.')
XFormsSessionSynchronization.clear_stale_channel_claim(channel)
session = None
else:
session = None
else:
multiple, session = get_single_open_session_or_close_multiple(v.domain, v.owner_id)
if multiple:
send_sms_to_verified_number(v, get_message(MSG_MULTIPLE_SESSIONS, v))
return True
if session:
session.phone_number = v.phone_number
session.modified_time = datetime.utcnow()
session.save()
# fetch subevent pk to link inbound sms to
try:
subevent_id = MessagingSubEvent.objects.get(xforms_session_id=session.pk).pk
except MessagingSubEvent.DoesNotExist:
subevent_id = None
# Metadata to be applied to the inbound message
inbound_metadata = MessageMetadata(
workflow=session.workflow,
reminder_id=session.reminder_id,
xforms_session_couch_id=session._id,
messaging_subevent_id=subevent_id,
)
add_msg_tags(msg, inbound_metadata)
msg.save()
try:
answer_next_question(v, text, msg, session, subevent_id)
except Exception:
# Catch any touchforms errors
log_sms_exception(msg)
send_sms_to_verified_number(v, get_message(MSG_TOUCHFORMS_DOWN, v))
return True
else:
return False
| def form_session_handler(v, text, msg):
"""
The form session handler will use the inbound text to answer the next question
in the open SQLXformsSession for the associated contact. If no session is open,
the handler passes. If multiple sessions are open, they are all closed and an
error message is displayed to the user.
"""
with critical_section_for_smsforms_sessions(v.owner_id):
if toggles.ONE_PHONE_NUMBER_MULTIPLE_CONTACTS.enabled(v.domain):
channel = get_channel_for_contact(v.owner_id, v.phone_number)
running_session_info = XFormsSessionSynchronization.get_running_session_info_for_channel(channel)
if running_session_info.session_id:
session = SQLXFormsSession.by_session_id(running_session_info.session_id)
if not session.session_is_open:
# This should never happen. But if it does we should set the channel free
# and act like there was no available session
notify_error("The supposedly running session was not open and was released. "
'No known way for this to happen, so worth investigating.')
XFormsSessionSynchronization.clear_stale_channel_claim(channel)
session = None
else:
session = None
else:
multiple, session = get_single_open_session_or_close_multiple(v.domain, v.owner_id)
if multiple:
send_sms_to_verified_number(v, get_message(MSG_MULTIPLE_SESSIONS, v))
return True
if session:
session.phone_number = v.phone_number
session.modified_time = datetime.utcnow()
session.save()
# fetch subevent pk to link inbound sms to
try:
subevent_id = MessagingSubEvent.objects.values_list("id", flat=True).get(xforms_session_id=session.pk)
except MessagingSubEvent.DoesNotExist:
subevent_id = None
# Metadata to be applied to the inbound message
inbound_metadata = MessageMetadata(
workflow=session.workflow,
reminder_id=session.reminder_id,
xforms_session_couch_id=session._id,
messaging_subevent_id=subevent_id,
)
add_msg_tags(msg, inbound_metadata)
msg.save()
try:
answer_next_question(v, text, msg, session, subevent_id)
except Exception:
# Catch any touchforms errors
log_sms_exception(msg)
send_sms_to_verified_number(v, get_message(MSG_TOUCHFORMS_DOWN, v))
return True
else:
return False
|
30,344 | def make_edit_request_for_an_object(obj_id, obj_type, params):
# Remove items with empty values:
params = {k: v for k, v in params.items() if v is not None}
url_suffix = '/{0}/{1}?with=attributes,sources'.format(OBJ_DIRECTORY[obj_type], obj_id)
if obj_type == 'indicator':
url_suffix += ',score'
res = tq_request('PUT', url_suffix, params)
raw = data_to_demisto_format(res['data'], obj_type)
entry_context = {CONTEXT_PATH[obj_type]: createContext(raw, removeNull=True)}
readable_title = 'Successfully edited {0} with ID {1}'.format(obj_type, obj_id)
readable = build_readable(readable_title, obj_type, raw)
return_outputs(readable, entry_context, raw)
| def make_edit_request_for_an_object(obj_id, obj_type, params):
# Remove items with empty values.
params = {k: v for k, v in params.items() if v is not None}
url_suffix = '/{0}/{1}?with=attributes,sources'.format(OBJ_DIRECTORY[obj_type], obj_id)
if obj_type == 'indicator':
url_suffix += ',score'
res = tq_request('PUT', url_suffix, params)
raw = data_to_demisto_format(res['data'], obj_type)
entry_context = {CONTEXT_PATH[obj_type]: createContext(raw, removeNull=True)}
readable_title = 'Successfully edited {0} with ID {1}'.format(obj_type, obj_id)
readable = build_readable(readable_title, obj_type, raw)
return_outputs(readable, entry_context, raw)
|
31,865 | def get_assignee(client: Client, args) -> str:
return client.live_assign_get(args)
| def get_assignee(client: Client, args) -> CommandResults:
return client.live_assign_get(args)
|
30,639 | def get_report_command(client: Client, args: dict) -> CommandResults:
report_id = args.get('report_id')
result = client.get_report_request(report_id)
headers = ['ID', 'Title', 'Timestamp', 'Description', 'Severity', 'Link', 'IOCs_v2', 'Tags', 'Visibility']
ioc_contents = []
contents = {
'ID': result.get('id'),
'Timestamp': convert_unix_to_timestamp(result.get('timestamp')),
'Title': result.get('title'),
'Description': result.get('description'),
'Severity': result.get('severity'),
'Link': result.get('link'),
'Tags': result.get('tags'),
'Visibility': result.get('visibility')
}
context = {
'ID': result.get('id'),
'Timestamp': convert_unix_to_timestamp(result.get('timestamp')),
'Title': result.get('title'),
'Description': result.get('description'),
'Severity': result.get('severity'),
'Link': result.get('link'),
'Tags': result.get('tags'),
'IOCs': result.get('iocs_v2'),
'Visibility': result.get('visibility')
}
iocs = result.get('iocs_v2')
for ioc in iocs:
ioc_contents.append({
'ID': ioc.get('id'),
'Match_type': ioc.get('match_type'),
'Values': ioc.get('values'),
'Field': ioc.get('field'),
'Link': ioc.get('link')
})
readable_output = tableToMarkdown(f'Report "{report_id}" information', contents, headers, removeNull=True)
ioc_output = tableToMarkdown(f'The IOCs for the report', ioc_contents, removeNull=True)
results = CommandResults(
outputs_prefix='CarbonBlackEEDR.Report',
outputs_key_field='id',
outputs=context,
readable_output=readable_output + ioc_output,
raw_response=result
)
return results
| def get_report_command(client: Client, args: dict) -> CommandResults:
report_id = args.get('report_id')
result = client.get_report_request(report_id)
headers = ['ID', 'Title', 'Timestamp', 'Description', 'Severity', 'Link', 'IOCs_v2', 'Tags', 'Visibility']
ioc_contents = []
contents = {
'ID': result.get('id'),
'Timestamp': convert_unix_to_timestamp(result.get('timestamp')),
'Title': result.get('title'),
'Description': result.get('description'),
'Severity': result.get('severity'),
'Link': result.get('link'),
'Tags': result.get('tags'),
'Visibility': result.get('visibility')
}
context = {
'ID': result.get('id'),
'Timestamp': convert_unix_to_timestamp(result.get('timestamp')),
'Title': result.get('title'),
'Description': result.get('description'),
'Severity': result.get('severity'),
'Link': result.get('link'),
'Tags': result.get('tags'),
'IOCs': result.get('iocs_v2'),
'Visibility': result.get('visibility')
}
iocs = result.get('iocs_v2', [])
for ioc in iocs:
ioc_contents.append({
'ID': ioc.get('id'),
'Match_type': ioc.get('match_type'),
'Values': ioc.get('values'),
'Field': ioc.get('field'),
'Link': ioc.get('link')
})
readable_output = tableToMarkdown(f'Report "{report_id}" information', contents, headers, removeNull=True)
ioc_output = tableToMarkdown(f'The IOCs for the report', ioc_contents, removeNull=True)
results = CommandResults(
outputs_prefix='CarbonBlackEEDR.Report',
outputs_key_field='id',
outputs=context,
readable_output=readable_output + ioc_output,
raw_response=result
)
return results
|
32,403 | def main() -> None:
"""main function, parses params and runs command_func functions
:return:
:rtype:
"""
params: Dict[str, Any] = demisto.params()
args: Dict[str, Any] = demisto.args()
verify_certificate = not params.get('insecure', False)
SERVER_URL = params.get('serverUrl', '') + '/api/v2'
proxy = params.get('proxy', False)
demisto_command = demisto.command()
demisto.debug(f'Command being called is {demisto_command}')
try:
headers: Dict = {
'api-key': demisto.params().get('credentials', {}).get('password'),
'User-Agent': 'Falcon Sandbox'
}
client = Client(
base_url=SERVER_URL,
verify=verify_certificate,
headers=headers,
proxy=proxy)
command_func: Callable
if demisto_command in ['test-module']:
command_func = test_module
elif demisto_command in ['cs-falcon-sandbox-search', 'crowdstrike-search']:
command_func = crowdstrike_search_command
elif demisto_command in ['cs-falcon-sandbox-scan', 'crowdstrike-scan', 'file']:
command_func = crowdstrike_scan_command
elif demisto_command in ['crowdstrike-get-environments', 'cs-falcon-sandbox-get-environments']:
command_func = crowdstrike_get_environments_command
elif demisto_command in ['cs-falcon-sandbox-get-screenshots', 'crowdstrike-get-screenshots']:
command_func = crowdstrike_get_screenshots_command
elif demisto_command in ['cs-falcon-sandbox-result', 'crowdstrike-result']:
command_func = crowdstrike_result_command
elif demisto_command in ['cs-falcon-sandbox-analysis-overview']:
command_func = crowdstrike_analysis_overview_command
elif demisto_command in ['cs-falcon-sandbox-analysis-overview-summary']:
command_func = crowdstrike_analysis_overview_summary_command
elif demisto_command in ['cs-falcon-sandbox-analysis-overview-refresh']:
command_func = crowdstrike_analysis_overview_refresh_command
elif demisto_command in ['crowdstrike-submit-sample', 'cs-falcon-sandbox-submit-sample']:
command_func = crowdstrike_submit_sample_command
elif demisto_command in ['cs-falcon-sandbox-submit-url', 'crowdstrike-submit-url']:
command_func = crowdstrike_submit_url_command
elif demisto_command in ['cs-falcon-sandbox-sample-download']:
command_func = crowdstrike_sample_download_command
elif demisto_command in ['cs-falcon-sandbox-report-state']:
command_func = crowdstrike_report_state_command
else:
raise NotImplementedError(f'Command not implemented: {demisto_command}')
return_results(command_func(client, args))
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto_command} command_func.\nError:\n{str(e)}')
| def main() -> None:
"""main function, parses params and runs command_func functions
:return:
:rtype:
"""
params: Dict[str, Any] = demisto.params()
args: Dict[str, Any] = demisto.args()
verify_certificate = not params.get('insecure', False)
server_url = params.get('serverUrl', '') + '/api/v2'
proxy = params.get('proxy', False)
demisto_command = demisto.command()
demisto.debug(f'Command being called is {demisto_command}')
try:
headers: Dict = {
'api-key': demisto.params().get('credentials', {}).get('password'),
'User-Agent': 'Falcon Sandbox'
}
client = Client(
base_url=SERVER_URL,
verify=verify_certificate,
headers=headers,
proxy=proxy)
command_func: Callable
if demisto_command in ['test-module']:
command_func = test_module
elif demisto_command in ['cs-falcon-sandbox-search', 'crowdstrike-search']:
command_func = crowdstrike_search_command
elif demisto_command in ['cs-falcon-sandbox-scan', 'crowdstrike-scan', 'file']:
command_func = crowdstrike_scan_command
elif demisto_command in ['crowdstrike-get-environments', 'cs-falcon-sandbox-get-environments']:
command_func = crowdstrike_get_environments_command
elif demisto_command in ['cs-falcon-sandbox-get-screenshots', 'crowdstrike-get-screenshots']:
command_func = crowdstrike_get_screenshots_command
elif demisto_command in ['cs-falcon-sandbox-result', 'crowdstrike-result']:
command_func = crowdstrike_result_command
elif demisto_command in ['cs-falcon-sandbox-analysis-overview']:
command_func = crowdstrike_analysis_overview_command
elif demisto_command in ['cs-falcon-sandbox-analysis-overview-summary']:
command_func = crowdstrike_analysis_overview_summary_command
elif demisto_command in ['cs-falcon-sandbox-analysis-overview-refresh']:
command_func = crowdstrike_analysis_overview_refresh_command
elif demisto_command in ['crowdstrike-submit-sample', 'cs-falcon-sandbox-submit-sample']:
command_func = crowdstrike_submit_sample_command
elif demisto_command in ['cs-falcon-sandbox-submit-url', 'crowdstrike-submit-url']:
command_func = crowdstrike_submit_url_command
elif demisto_command in ['cs-falcon-sandbox-sample-download']:
command_func = crowdstrike_sample_download_command
elif demisto_command in ['cs-falcon-sandbox-report-state']:
command_func = crowdstrike_report_state_command
else:
raise NotImplementedError(f'Command not implemented: {demisto_command}')
return_results(command_func(client, args))
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto_command} command_func.\nError:\n{str(e)}')
|
20,522 | def get_parser():
# Initialize the parser
parser = argparse.ArgumentParser(
description=(
"This function extracts the spinal cord centerline. Three methods are available: OptiC (automatic), "
"Viewer (manual) and Fitseg (applied on segmented image). These functions output (i) a NIFTI file with "
"labels corresponding to the discrete centerline, and (ii) a csv file containing the float (more precise) "
"coordinates of the centerline in the RPI orientation. \n"
"\n"
"Reference: C Gros, B De Leener, et al. Automatic spinal cord localization, robust to MRI contrast using "
"global curve optimization (2017). doi.org/10.1016/j.media.2017.12.001"
),
formatter_class=SmartFormatter,
add_help=None,
prog=os.path.basename(__file__).strip(".py")
)
mandatory = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatory.add_argument(
'-i',
metavar=Metavar.file,
required=True,
help="Input image. Example: ti.nii.gz"
)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit."
)
optional.add_argument(
"-c",
choices=['t1', 't2', 't2s', 'dwi'],
help="Type of image contrast. Only with method=optic."
)
optional.add_argument(
"-method",
choices=['optic', 'viewer', 'fitseg'],
default='optic',
help=("Method used for extracting the centerline.\n"
" - optic: automatic spinal cord detection method\n"
" - viewer: manual selection a few points followed by interpolation\n"
" - fitseg: fit a regularized centerline on an already-existing cord segmentation. It will "
"interpolate if slices are missing and extrapolate beyond the segmentation boundaries (i.e., every "
"axial slice will exhibit a centerline pixel).")
)
optional.add_argument(
"-centerline-algo",
choices=['polyfit', 'bspline', 'linear', 'nurbs'],
default='bspline',
help="Show this help message and exit."
)
optional.add_argument(
"-centerline-smooth",
metavar=Metavar.int,
type=int,
default=30,
help="Degree of smoothing for centerline fitting. Only for -centerline-algo {bspline, linear}."
)
optional.add_argument(
"-o",
metavar=Metavar.file,
help=("File name (without extension) for the centerline output files. By default, output file will be the "
"input with suffix '_centerline'. Example: 'centerline_optic'")
)
optional.add_argument(
"-gap",
metavar=Metavar.float,
type=float,
default=20.0,
help="Gap in mm between manually selected points. Only with method=viewer."
)
optional.add_argument(
"-igt",
metavar=Metavar.file,
help="File name of ground-truth centerline or segmentation (binary nifti)."
)
optional.add_argument(
"-v",
choices=['0', '1'],
default='1',
help="Verbose. 1: display on, 0: display off (default)"
)
optional.add_argument(
"-qc",
metavar=Metavar.folder,
action=ActionCreateFolder,
help="The path where the quality control generated content will be saved."
)
optional.add_argument(
"-qc-dataset",
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the dataset the process was run on."
)
optional.add_argument(
"-qc-subject",
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the subject the process was run on."
)
return parser
| def get_parser():
# Initialize the parser
parser = argparse.ArgumentParser(
description=(
"This function extracts the spinal cord centerline. Three methods are available: OptiC (automatic), "
"Viewer (manual) and Fitseg (applied on segmented image). These functions output (i) a NIFTI file with "
"labels corresponding to the discrete centerline, and (ii) a csv file containing the float (more precise) "
"coordinates of the centerline in the RPI orientation. \n"
"\n"
"Reference: C Gros, B De Leener, et al. Automatic spinal cord localization, robust to MRI contrast using "
"global curve optimization (2017). doi.org/10.1016/j.media.2017.12.001"
),
formatter_class=SmartFormatter,
add_help=None,
prog=os.path.basename(__file__).strip(".py")
)
mandatory = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatory.add_argument(
'-i',
metavar=Metavar.file,
required=True,
help="Input image. Example: ti.nii.gz"
)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit."
)
optional.add_argument(
"-c",
choices=['t1', 't2', 't2s', 'dwi'],
help="Type of image contrast. Only with method=optic."
)
optional.add_argument(
"-method",
choices=['optic', 'viewer', 'fitseg'],
default='optic',
help=("Method used for extracting the centerline.\n"
" - optic: automatic spinal cord detection method\n"
" - viewer: manual selection a few points followed by interpolation\n"
" - fitseg: fit a regularized centerline on an already-existing cord segmentation. It will "
"interpolate if slices are missing and extrapolate beyond the segmentation boundaries (i.e., every "
"axial slice will exhibit a centerline pixel).")
)
optional.add_argument(
"-centerline-algo",
choices=['polyfit', 'bspline', 'linear', 'nurbs'],
default='bspline',
help='Algorithm for centerline fitting. Only relevant with -method fitseg'
)
optional.add_argument(
"-centerline-smooth",
metavar=Metavar.int,
type=int,
default=30,
help="Degree of smoothing for centerline fitting. Only for -centerline-algo {bspline, linear}."
)
optional.add_argument(
"-o",
metavar=Metavar.file,
help=("File name (without extension) for the centerline output files. By default, output file will be the "
"input with suffix '_centerline'. Example: 'centerline_optic'")
)
optional.add_argument(
"-gap",
metavar=Metavar.float,
type=float,
default=20.0,
help="Gap in mm between manually selected points. Only with method=viewer."
)
optional.add_argument(
"-igt",
metavar=Metavar.file,
help="File name of ground-truth centerline or segmentation (binary nifti)."
)
optional.add_argument(
"-v",
choices=['0', '1'],
default='1',
help="Verbose. 1: display on, 0: display off (default)"
)
optional.add_argument(
"-qc",
metavar=Metavar.folder,
action=ActionCreateFolder,
help="The path where the quality control generated content will be saved."
)
optional.add_argument(
"-qc-dataset",
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the dataset the process was run on."
)
optional.add_argument(
"-qc-subject",
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the subject the process was run on."
)
return parser
|
3,534 | def prepare_build(
project,
version=None,
commit=None,
record=True,
force=False,
immutable=True,
):
"""
Prepare a build in a Celery task for project and version.
If project has a ``build_queue``, execute the task on this build queue. If
project has ``skip=True``, the build is not triggered.
:param project: project's documentation to be built
:param version: version of the project to be built. Default: ``project.get_default_version()``
:param commit: commit sha of the version required for sending build status reports
:param record: whether or not record the build in a new Build object
:param force: build the HTML documentation even if the files haven't changed
:param immutable: whether or not create an immutable Celery signature
:returns: Celery signature of update_docs_task and Build instance
:rtype: tuple
"""
# Avoid circular import
from readthedocs.builds.models import Build
from readthedocs.projects.models import Project
from readthedocs.projects.tasks import (
update_docs_task,
send_external_build_status,
send_notifications,
)
build = None
if not Project.objects.is_active(project):
log.warning(
'Build not triggered because Project is not active: project=%s',
project.slug,
)
return (None, None)
if not version:
default_version = project.get_default_version()
version = project.versions.get(slug=default_version)
kwargs = {
'record': record,
'force': force,
'commit': commit,
}
if record:
build = Build.objects.create(
project=project,
version=version,
type='html',
state=BUILD_STATE_TRIGGERED,
success=True,
commit=commit
)
kwargs['build_pk'] = build.pk
options = {}
if project.build_queue:
options['queue'] = project.build_queue
# Set per-task time limit
time_limit = DOCKER_LIMITS['time']
try:
if project.container_time_limit:
time_limit = int(project.container_time_limit)
except ValueError:
log.warning('Invalid time_limit for project: %s', project.slug)
# Add 20% overhead to task, to ensure the build can timeout and the task
# will cleanly finish.
options['soft_time_limit'] = time_limit
options['time_limit'] = int(time_limit * 1.2)
if build and commit:
# Send pending Build Status using Git Status API for External Builds.
send_external_build_status(
version_type=version.type, build_pk=build.id,
commit=commit, status=BUILD_STATUS_PENDING
)
if build and version.type != EXTERNAL:
# Send Webhook notification for build triggered.
send_notifications.delay(version.pk, build_pk=build.pk, email=False)
if version.type == EXTERNAL:
# External builds should be lower priority.
# We only support [1,2] currently, so set it to 2 (higher number being higher priority)
options['priority'] = 1
return (
update_docs_task.signature(
args=(version.pk,),
kwargs=kwargs,
options=options,
immutable=True,
),
build,
)
| def prepare_build(
project,
version=None,
commit=None,
record=True,
force=False,
immutable=True,
):
"""
Prepare a build in a Celery task for project and version.
If project has a ``build_queue``, execute the task on this build queue. If
project has ``skip=True``, the build is not triggered.
:param project: project's documentation to be built
:param version: version of the project to be built. Default: ``project.get_default_version()``
:param commit: commit sha of the version required for sending build status reports
:param record: whether or not record the build in a new Build object
:param force: build the HTML documentation even if the files haven't changed
:param immutable: whether or not create an immutable Celery signature
:returns: Celery signature of update_docs_task and Build instance
:rtype: tuple
"""
# Avoid circular import
from readthedocs.builds.models import Build
from readthedocs.projects.models import Project
from readthedocs.projects.tasks import (
update_docs_task,
send_external_build_status,
send_notifications,
)
build = None
if not Project.objects.is_active(project):
log.warning(
'Build not triggered because Project is not active: project=%s',
project.slug,
)
return (None, None)
if not version:
default_version = project.get_default_version()
version = project.versions.get(slug=default_version)
kwargs = {
'record': record,
'force': force,
'commit': commit,
}
if record:
build = Build.objects.create(
project=project,
version=version,
type='html',
state=BUILD_STATE_TRIGGERED,
success=True,
commit=commit
)
kwargs['build_pk'] = build.pk
options = {}
if project.build_queue:
options['queue'] = project.build_queue
# Set per-task time limit
time_limit = DOCKER_LIMITS['time']
try:
if project.container_time_limit:
time_limit = int(project.container_time_limit)
except ValueError:
log.warning('Invalid time_limit for project: %s', project.slug)
# Add 20% overhead to task, to ensure the build can timeout and the task
# will cleanly finish.
options['soft_time_limit'] = time_limit
options['time_limit'] = int(time_limit * 1.2)
if build and commit:
# Send pending Build Status using Git Status API for External Builds.
send_external_build_status(
version_type=version.type, build_pk=build.id,
commit=commit, status=BUILD_STATUS_PENDING
)
if build and version.type != EXTERNAL:
# Send Webhook notification for build triggered.
send_notifications.delay(version.pk, build_pk=build.pk, email=False)
if version.type == EXTERNAL:
# External builds should be lower priority.
# We only support [1,2] currently, so set it to 1 (higher number being higher priority)
options['priority'] = 1
return (
update_docs_task.signature(
args=(version.pk,),
kwargs=kwargs,
options=options,
immutable=True,
),
build,
)
|
29,818 | def get_flink_job_name(flink_job: FlinkJobDetails):
return flink_job["name"].split(".", 2)[-1]
| def get_flink_job_name(flink_job: FlinkJobDetails) -> str:
return flink_job["name"].split(".", 2)[-1]
|
4,558 | def test_check_second_level_input():
from nilearn.glm.second_level.second_level import _check_second_level_input
with pytest.raises(ValueError,
match="A second level model requires a list with at "
"least two first level models or niimgs"):
_check_second_level_input([FirstLevelModel()], pd.DataFrame())
with pytest.raises(ValueError,
match="Model sub_1 at index 0 has not been fit yet"):
_check_second_level_input([FirstLevelModel(subject_label="sub_{}".format(i))
for i in range(1,3)], pd.DataFrame())
with InTemporaryDirectory():
shapes, rk = [(7, 8, 9, 15)], 3
mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(shapes, rk)
input_models = [FirstLevelModel(mask_img=mask).fit(fmri_data[0],
design_matrices=design_matrices[0])]
obj = lambda: None
obj.results_ = "foo"
obj.labels_ = "bar"
with pytest.raises(ValueError,
match=" object at idx 1 is <class 'function'> "
"instead of FirstLevelModel object"):
_check_second_level_input(input_models + [obj], pd.DataFrame())
with pytest.raises(ValueError,
match="In case confounds are provided, first level "
"objects need to provide the attribute subject_label"):
_check_second_level_input(input_models * 2, pd.DataFrame(),
confounds=pd.DataFrame())
with pytest.raises(ValueError,
match="List of niimgs as second_level_input "
"require a design matrix to be provided"):
_check_second_level_input(fmri_data * 2, None)
_check_second_level_input(fmri_data[0], pd.DataFrame())
with pytest.raises(ValueError,
match=" object at idx 1 is <class 'int'> instead"):
_check_second_level_input(["foo", 1], pd.DataFrame())
with pytest.raises(ValueError,
match="second_level_input DataFrame must have columns "
"subject_label, map_name and effects_map_path"):
_check_second_level_input(pd.DataFrame(columns=["foo", "bar"]), pd.DataFrame())
with pytest.raises(ValueError,
match="subject_label column must contain only strings"):
_check_second_level_input(pd.DataFrame({"subject_label": [1, 2],
"map_name": ["a", "b"],
"effects_map_path": ["c", "d"]}),
pd.DataFrame())
with pytest.raises(ValueError,
match="List of niimgs as second_level_input "
"require a design matrix to be provided"):
_check_second_level_input("foo", None)
with pytest.raises(ValueError,
match="second_level_input must be a list of"):
_check_second_level_input(1, None)
with pytest.raises(ValueError,
match="second_level_input must be a list"):
_check_second_level_input(1, None, flm_object=False)
| def test_check_second_level_input():
from nilearn.glm.second_level.second_level import _check_second_level_input
with pytest.raises(ValueError,
match="A second level model requires a list with at "
"least two first level models or niimgs"):
_check_second_level_input([FirstLevelModel()], pd.DataFrame())
with pytest.raises(ValueError,
match="Model sub_1 at index 0 has not been fit yet"):
_check_second_level_input([FirstLevelModel(subject_label="sub_{}".format(i))
for i in range(1, 3)], pd.DataFrame())
with InTemporaryDirectory():
shapes, rk = [(7, 8, 9, 15)], 3
mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(shapes, rk)
input_models = [FirstLevelModel(mask_img=mask).fit(fmri_data[0],
design_matrices=design_matrices[0])]
obj = lambda: None
obj.results_ = "foo"
obj.labels_ = "bar"
with pytest.raises(ValueError,
match=" object at idx 1 is <class 'function'> "
"instead of FirstLevelModel object"):
_check_second_level_input(input_models + [obj], pd.DataFrame())
with pytest.raises(ValueError,
match="In case confounds are provided, first level "
"objects need to provide the attribute subject_label"):
_check_second_level_input(input_models * 2, pd.DataFrame(),
confounds=pd.DataFrame())
with pytest.raises(ValueError,
match="List of niimgs as second_level_input "
"require a design matrix to be provided"):
_check_second_level_input(fmri_data * 2, None)
_check_second_level_input(fmri_data[0], pd.DataFrame())
with pytest.raises(ValueError,
match=" object at idx 1 is <class 'int'> instead"):
_check_second_level_input(["foo", 1], pd.DataFrame())
with pytest.raises(ValueError,
match="second_level_input DataFrame must have columns "
"subject_label, map_name and effects_map_path"):
_check_second_level_input(pd.DataFrame(columns=["foo", "bar"]), pd.DataFrame())
with pytest.raises(ValueError,
match="subject_label column must contain only strings"):
_check_second_level_input(pd.DataFrame({"subject_label": [1, 2],
"map_name": ["a", "b"],
"effects_map_path": ["c", "d"]}),
pd.DataFrame())
with pytest.raises(ValueError,
match="List of niimgs as second_level_input "
"require a design matrix to be provided"):
_check_second_level_input("foo", None)
with pytest.raises(ValueError,
match="second_level_input must be a list of"):
_check_second_level_input(1, None)
with pytest.raises(ValueError,
match="second_level_input must be a list"):
_check_second_level_input(1, None, flm_object=False)
|
47,986 | def main():
all_passed = True
index_file_paths = (
OMZ_ROOT / 'models/intel/index.md',
OMZ_ROOT / 'models/public/index.md',
OMZ_ROOT / 'demos/README.md',
)
all_md_files = tuple(find_md_files())
def complain(message):
nonlocal all_passed
all_passed = False
print(message, file=sys.stderr)
index_child_md_links = {}
for index_file_path in index_file_paths:
if not index_file_path.exists():
complain(f'{index_file_path}: file not found')
continue
required_md_links = []
for md_file in all_md_files:
if md_file.name == "README.md" and md_file.parent != index_file_path.parent:
try:
md_rel_path = md_file.relative_to(index_file_path.parent)
except ValueError:
continue
md_intermediate_parents = list(md_rel_path.parents)[1:-1] # removed root and first parent dirs
if not any((index_file_path.parent / parent_dir / 'README.md').exists()
for parent_dir in md_intermediate_parents):
required_md_links.append(md_file)
index_child_md_links[index_file_path] = sorted(required_md_links)
for md_path in sorted(all_md_files):
referenced_md_files = set()
md_path_rel = md_path.relative_to(OMZ_ROOT)
doc_page = omzdocs.DocumentationPage(md_path.read_text(encoding='UTF-8'))
# check local link validity
for url in sorted([ref.url for ref in doc_page.external_references()]):
try:
components = urllib.parse.urlparse(url)
except ValueError:
complain(f'{md_path_rel}: invalid URL reference {url!r}')
continue
if components.scheme: # non-local URLs
continue
if components.netloc or components.path.startswith('/'):
complain(f'{md_path_rel}: non-relative local URL reference "{url}"')
continue
if not components.path: # self-link
continue
target_path = (md_path.parent / urllib.request.url2pathname(components.path)).resolve()
if OMZ_ROOT not in target_path.parents:
complain(f'{md_path_rel}: URL reference "{url}" points outside the OMZ directory')
continue
if not target_path.is_file():
complain(f'{md_path_rel}: URL reference "{url}" target'
' does not exist or is not a file')
continue
if md_path in index_child_md_links:
referenced_md_files.add(target_path)
# check <omz_dir> link validity
for link in sorted([link for link in doc_page.omz_references() if link.startswith('<omz_dir>')]):
file_path = Path(link.replace('<omz_dir>', str(OMZ_ROOT)))
try:
file_relative_path = file_path.relative_to(OMZ_ROOT)
except ValueError:
complain(f'{md_path_rel}: invalid OMZ reference {file_path!r}')
continue
if str(file_relative_path) == md_path_rel: # self-link
continue
if not (file_path.is_file() or file_path.is_dir()):
complain(f'{md_path_rel}: OMZ reference "{file_relative_path}" target'
' does not exist')
# check for existence of links to README.md files of models and demos
if md_path in index_child_md_links:
for md_file in index_child_md_links[md_path]:
if md_file not in referenced_md_files:
complain(f"{md_path_rel}: {md_file.relative_to(OMZ_ROOT)} is not referenced")
# check for HTML fragments that are unsupported by Doxygen
for html_fragment in doc_page.html_fragments():
match = HTML_FRAGMENT_RE.match(html_fragment)
if not match:
complain(f'{md_path_rel}: cannot parse HTML fragment {html_fragment!r}')
continue
if match.group(1).lower() not in ALLOWED_HTML_ELEMENTS:
complain(f'{md_path_rel}: unknown/disallowed HTML element in {html_fragment!r}')
continue
sys.exit(0 if all_passed else 1)
| def main():
all_passed = True
index_file_paths = (
OMZ_ROOT / 'models/intel/index.md',
OMZ_ROOT / 'models/public/index.md',
OMZ_ROOT / 'demos/README.md',
)
all_md_files = tuple(find_md_files())
def complain(message):
nonlocal all_passed
all_passed = False
print(message, file=sys.stderr)
index_child_md_links = {}
for index_file_path in index_file_paths:
if not index_file_path.exists():
complain(f'{index_file_path}: file not found')
continue
required_md_links = []
for md_file in all_md_files:
if md_file.name == "README.md" and md_file.parent != index_file_path.parent:
try:
md_rel_path = md_file.relative_to(index_file_path.parent)
except ValueError:
continue
md_intermediate_parents = list(md_rel_path.parents)[1:-1] # removed root and first parent dirs
if not any((index_file_path.parent / parent_dir / 'README.md').exists()
for parent_dir in md_intermediate_parents):
required_md_links.append(md_file)
index_child_md_links[index_file_path] = sorted(required_md_links)
for md_path in sorted(all_md_files):
referenced_md_files = set()
md_path_rel = md_path.relative_to(OMZ_ROOT)
doc_page = omzdocs.DocumentationPage(md_path.read_text(encoding='UTF-8'))
# check local link validity
for url in sorted([ref.url for ref in doc_page.external_references()]):
try:
components = urllib.parse.urlparse(url)
except ValueError:
complain(f'{md_path_rel}: invalid URL reference {url!r}')
continue
if components.scheme: # non-local URLs
continue
if components.netloc or components.path.startswith('/'):
complain(f'{md_path_rel}: non-relative local URL reference "{url}"')
continue
if not components.path: # self-link
continue
target_path = (md_path.parent / urllib.request.url2pathname(components.path)).resolve()
if OMZ_ROOT not in target_path.parents:
complain(f'{md_path_rel}: URL reference "{url}" points outside the OMZ directory')
continue
if not target_path.is_file():
complain(f'{md_path_rel}: URL reference "{url}" target'
' does not exist or is not a file')
continue
if md_path in index_child_md_links:
referenced_md_files.add(target_path)
# check <omz_dir> link validity
for link in sorted([link for link in doc_page.omz_references() if link.startswith('<omz_dir>')]):
file_path = Path(link.replace('<omz_dir>', str(OMZ_ROOT)))
try:
file_relative_path = file_path.relative_to(OMZ_ROOT)
except ValueError:
complain(f'{md_path_rel}: invalid OMZ reference {file_path!r}')
continue
if str(file_relative_path) == md_path_rel: # self-link
continue
if not file_path.exists():
complain(f'{md_path_rel}: OMZ reference "{file_relative_path}" target'
' does not exist')
# check for existence of links to README.md files of models and demos
if md_path in index_child_md_links:
for md_file in index_child_md_links[md_path]:
if md_file not in referenced_md_files:
complain(f"{md_path_rel}: {md_file.relative_to(OMZ_ROOT)} is not referenced")
# check for HTML fragments that are unsupported by Doxygen
for html_fragment in doc_page.html_fragments():
match = HTML_FRAGMENT_RE.match(html_fragment)
if not match:
complain(f'{md_path_rel}: cannot parse HTML fragment {html_fragment!r}')
continue
if match.group(1).lower() not in ALLOWED_HTML_ELEMENTS:
complain(f'{md_path_rel}: unknown/disallowed HTML element in {html_fragment!r}')
continue
sys.exit(0 if all_passed else 1)
|
55,638 | def homography_warp(
patch_src: torch.Tensor,
src_homo_dst: torch.Tensor,
dsize: Tuple[int, int],
mode: str = 'bilinear',
padding_mode: str = 'zeros',
align_corners: bool = False,
normalized_coordinates: bool = True,
normalized_homography: bool = True,
) -> torch.Tensor:
r"""Warp image patches or tensors by normalized 2D homographies.
See :class:`~kornia.geometry.warp.HomographyWarper` for details.
Args:
patch_src: The image or tensor to warp. Should be from source of shape
if homography normalized :math:`(N, C, H, W)`.
if homography not normalized :math:`(B, C, H, W)`
src_homo_dst: The homography or stack of homographies from destination to source of shape
if homography normalized :math:`(N, 3, 3)`
if homography not normalized :math:`(B, 3, 3)`.
dsize:
if homography normalized: The height and width of the image to warp.
if homography not normalized: size of the output image (height, width).
mode: interpolation mode to calculate output values ``'bilinear'`` | ``'nearest'``.
padding_mode: padding mode for outside grid values ``'zeros'`` | ``'border'`` | ``'reflection'``.
align_corners: interpolation flag.
normalized_coordinates: Whether the homography assumes [-1, 1] normalized coordinates or not.
normalized_homography: show is homography normalized.
Return:
Patch sampled at locations from source to destination.
Example_1:
>>> input = torch.rand(1, 3, 32, 32)
>>> homography = torch.eye(3).view(1, 3, 3)
>>> output = homography_warp(input, homography, (32, 32))
Example_2
>>> img = torch.rand(1, 4, 5, 6)
>>> H = torch.eye(3)[None]
>>> out = homography_warp(img, H, (4, 2), align_corners=True, normalized_homography=False)
>>> print(out.shape)
torch.Size([1, 4, 4, 2])
"""
if normalized_homography:
if not src_homo_dst.device == patch_src.device:
raise TypeError(
"Patch and homography must be on the same device. \
Got patch.device: {} src_H_dst.device: {}.".format(
patch_src.device, src_homo_dst.device
)
)
height, width = dsize
grid = create_meshgrid(height, width, normalized_coordinates=normalized_coordinates)
warped_grid = warp_grid(grid, src_homo_dst)
return F.grid_sample(patch_src, warped_grid, mode=mode, padding_mode=padding_mode, align_corners=align_corners)
else:
mode = 'bilinear'
align_corners = True
if not isinstance(patch_src, torch.Tensor):
raise TypeError(f"Input src type is not a torch.Tensor. Got {type(patch_src)}")
if not isinstance(src_homo_dst, torch.Tensor):
raise TypeError(f"Input M type is not a torch.Tensor. Got {type(src_homo_dst)}")
if not len(patch_src.shape) == 4:
raise ValueError(f"Input src must be a BxCxHxW tensor. Got {patch_src.shape}")
if not (len(src_homo_dst.shape) == 3 and src_homo_dst.shape[-2:] == (3, 3)):
raise ValueError(f"Input M must be a Bx3x3 tensor. Got {src_homo_dst.shape}")
B, _, H, W = patch_src.size()
h_out, w_out = dsize
# we normalize the 3x3 transformation matrix and convert to 3x4
dst_norm_trans_src_norm: torch.Tensor = normalize_homography(src_homo_dst, (H, W), (h_out, w_out)) # Bx3x3
src_norm_trans_dst_norm = _torch_inverse_cast(dst_norm_trans_src_norm) # Bx3x3
# this piece of code substitutes F.affine_grid since it does not support 3x3
grid = (
create_meshgrid(h_out, w_out, normalized_coordinates=True, device=patch_src.device).to(patch_src.dtype).repeat(B, 1, 1, 1))
grid = transform_points(src_norm_trans_dst_norm[:, None, None], grid)
return F.grid_sample(patch_src, grid, align_corners=align_corners, mode=mode, padding_mode=padding_mode)
| def homography_warp(
patch_src: torch.Tensor,
src_homo_dst: torch.Tensor,
dsize: Tuple[int, int],
mode: str = 'bilinear',
padding_mode: str = 'zeros',
align_corners: bool = False,
normalized_coordinates: bool = True,
normalized_homography: bool = True,
) -> torch.Tensor:
r"""Warp image patches or tensors by normalized 2D homographies.
See :class:`~kornia.geometry.warp.HomographyWarper` for details.
Args:
patch_src: The image or tensor to warp. Should be from source of shape
if homography normalized :math:`(N, C, H, W)`.
if homography not normalized :math:`(B, C, H, W)`
src_homo_dst: The homography or stack of homographies from destination to source of shape
if homography normalized :math:`(N, 3, 3)`
if homography not normalized :math:`(B, 3, 3)`.
dsize:
if homography normalized: The height and width of the image to warp.
if homography not normalized: size of the output image (height, width).
mode: interpolation mode to calculate output values ``'bilinear'`` | ``'nearest'``.
padding_mode: padding mode for outside grid values ``'zeros'`` | ``'border'`` | ``'reflection'``.
align_corners: interpolation flag.
normalized_coordinates: Whether the homography assumes [-1, 1] normalized coordinates or not.
normalized_homography: show is homography normalized.
Return:
Patch sampled at locations from source to destination.
Example_1:
>>> input = torch.rand(1, 3, 32, 32)
>>> homography = torch.eye(3).view(1, 3, 3)
>>> output = homography_warp(input, homography, (32, 32))
Example
>>> img = torch.rand(1, 4, 5, 6)
>>> H = torch.eye(3)[None]
>>> out = homography_warp(img, H, (4, 2), align_corners=True, normalized_homography=False)
>>> print(out.shape)
torch.Size([1, 4, 4, 2])
"""
if normalized_homography:
if not src_homo_dst.device == patch_src.device:
raise TypeError(
"Patch and homography must be on the same device. \
Got patch.device: {} src_H_dst.device: {}.".format(
patch_src.device, src_homo_dst.device
)
)
height, width = dsize
grid = create_meshgrid(height, width, normalized_coordinates=normalized_coordinates)
warped_grid = warp_grid(grid, src_homo_dst)
return F.grid_sample(patch_src, warped_grid, mode=mode, padding_mode=padding_mode, align_corners=align_corners)
else:
mode = 'bilinear'
align_corners = True
if not isinstance(patch_src, torch.Tensor):
raise TypeError(f"Input src type is not a torch.Tensor. Got {type(patch_src)}")
if not isinstance(src_homo_dst, torch.Tensor):
raise TypeError(f"Input M type is not a torch.Tensor. Got {type(src_homo_dst)}")
if not len(patch_src.shape) == 4:
raise ValueError(f"Input src must be a BxCxHxW tensor. Got {patch_src.shape}")
if not (len(src_homo_dst.shape) == 3 and src_homo_dst.shape[-2:] == (3, 3)):
raise ValueError(f"Input M must be a Bx3x3 tensor. Got {src_homo_dst.shape}")
B, _, H, W = patch_src.size()
h_out, w_out = dsize
# we normalize the 3x3 transformation matrix and convert to 3x4
dst_norm_trans_src_norm: torch.Tensor = normalize_homography(src_homo_dst, (H, W), (h_out, w_out)) # Bx3x3
src_norm_trans_dst_norm = _torch_inverse_cast(dst_norm_trans_src_norm) # Bx3x3
# this piece of code substitutes F.affine_grid since it does not support 3x3
grid = (
create_meshgrid(h_out, w_out, normalized_coordinates=True, device=patch_src.device).to(patch_src.dtype).repeat(B, 1, 1, 1))
grid = transform_points(src_norm_trans_dst_norm[:, None, None], grid)
return F.grid_sample(patch_src, grid, align_corners=align_corners, mode=mode, padding_mode=padding_mode)
|
54,064 | def add_random_edge(edge_index, p: float = 0.2, force_undirected: bool = False,
num_nodes: Optional[Union[Tuple[int], int]] = None,
training: bool = True) -> Tuple[Tensor, Tensor]:
r"""Randomly adds edges of ratio :obj:`p` into the existing edges
:obj:`edge_index`.
The method returns (1) the retained :obj:`edge_index`, (2) the added
edge indices.
Args:
edge_index (LongTensor): The edge indices.
p (float, optional): Ratio of added edges to the existing edges.
(default: :obj:`0.2`)
force_undirected (bool, optional): If set to :obj:`True`, will either
drop or keep both edges of an undirected edge.
(default: :obj:`False`)
num_nodes (int, Tuple[int], optional): The overall number of nodes,
*i.e.* :obj:`max_val + 1`, or the number of source and
destination nodes, *i.e.* :obj:`(max_src_val + 1, max_dst_val + 1)`
of :attr:`edge_index`. (default: :obj:`None`)
training (bool, optional): If set to :obj:`False`, this operation is a
no-op. (default: :obj:`True`)
:rtype: (:class:`LongTensor`, :class:`LongTensor`)
Examples:
>>> # Standard case
>>> edge_index = torch.tensor([[0, 1, 1, 2, 2, 3],
... [1, 0, 2, 1, 3, 2]])
>>> edge_index, added_edges = add_random_edge(edge_index, p=0.5)
>>> edge_index
tensor([[0, 1, 1, 2, 2, 3, 2, 1, 3],
[1, 0, 2, 1, 3, 2, 0, 2, 1]])
>>> added_edges
tensor([[2, 1, 3],
[0, 2, 1]])
>>> # The returned graph is kept undirected
>>> edge_index, added_edges = add_random_edge(edge_index, p=0.5,
... force_undirected=True)
>>> edge_index
tensor([[0, 1, 1, 2, 2, 3, 2, 1, 3, 0, 2, 1],
[1, 0, 2, 1, 3, 2, 0, 2, 1, 2, 1, 3]])
>>> added_edges
tensor([[2, 1, 3, 0, 2, 1],
[0, 2, 1, 2, 1, 3]])
>>> # For bipartite graphs
>>> edge_index = torch.tensor([[0, 1, 2, 3, 4, 5],
... [2, 3, 1, 4, 2, 1]])
>>> edge_index, added_edges = add_random_edge(edge_index, p=0.5,
... num_nodes=(6, 5))
>>> edge_index
tensor([[0, 1, 2, 3, 4, 5, 3, 4, 1],
[2, 3, 1, 4, 2, 1, 1, 3, 2]])
>>> added_edges
tensor([[3, 4, 1],
[1, 3, 2]])
"""
if p < 0. or p > 1.:
raise ValueError(f'Ratio of added edges has to be between 0 and 1 '
f'(got {p}')
device = edge_index.device
if not training or p == 0.0:
edge_index_to_add = torch.tensor([], device=device).view(2, 0)
return edge_index, edge_index_to_add
num_nodes = (num_nodes,
num_nodes) if not isinstance(num_nodes, tuple) else num_nodes
num_src_nodes = num_nodes[0] or edge_index[0].max().item() + 1
num_dst_nodes = num_nodes[1] or edge_index[1].max().item() + 1
num_edges_to_add = round(edge_index.size(1) * p)
row = torch.randint(0, num_src_nodes, size=(num_edges_to_add, ))
col = torch.randint(0, num_dst_nodes, size=(num_edges_to_add, ))
if force_undirected:
edge_index_to_add = torch.stack(
[torch.cat([row, col], dim=0),
torch.cat([col, row], dim=0)], dim=0).to(device)
else:
edge_index_to_add = torch.stack([row, col], dim=0).to(device)
edge_index = torch.cat([edge_index, edge_index_to_add], dim=1)
return edge_index, edge_index_to_add
| def add_random_edge(edge_index, p: float = 0.2, force_undirected: bool = False,
num_nodes: Optional[Union[Tuple[int], int]] = None,
training: bool = True) -> Tuple[Tensor, Tensor]:
r"""Randomly adds edges of ratio :obj:`p` into the existing edges
:obj:`edge_index`.
The method returns (1) the retained :obj:`edge_index`, (2) the added
edge indices.
Args:
edge_index (LongTensor): The edge indices.
p (float, optional): Ratio of added edges to the existing edges.
(default: :obj:`0.2`)
force_undirected (bool, optional): If set to :obj:`True` added edges will be undirected.
(default: :obj:`False`)
(default: :obj:`False`)
num_nodes (int, Tuple[int], optional): The overall number of nodes,
*i.e.* :obj:`max_val + 1`, or the number of source and
destination nodes, *i.e.* :obj:`(max_src_val + 1, max_dst_val + 1)`
of :attr:`edge_index`. (default: :obj:`None`)
training (bool, optional): If set to :obj:`False`, this operation is a
no-op. (default: :obj:`True`)
:rtype: (:class:`LongTensor`, :class:`LongTensor`)
Examples:
>>> # Standard case
>>> edge_index = torch.tensor([[0, 1, 1, 2, 2, 3],
... [1, 0, 2, 1, 3, 2]])
>>> edge_index, added_edges = add_random_edge(edge_index, p=0.5)
>>> edge_index
tensor([[0, 1, 1, 2, 2, 3, 2, 1, 3],
[1, 0, 2, 1, 3, 2, 0, 2, 1]])
>>> added_edges
tensor([[2, 1, 3],
[0, 2, 1]])
>>> # The returned graph is kept undirected
>>> edge_index, added_edges = add_random_edge(edge_index, p=0.5,
... force_undirected=True)
>>> edge_index
tensor([[0, 1, 1, 2, 2, 3, 2, 1, 3, 0, 2, 1],
[1, 0, 2, 1, 3, 2, 0, 2, 1, 2, 1, 3]])
>>> added_edges
tensor([[2, 1, 3, 0, 2, 1],
[0, 2, 1, 2, 1, 3]])
>>> # For bipartite graphs
>>> edge_index = torch.tensor([[0, 1, 2, 3, 4, 5],
... [2, 3, 1, 4, 2, 1]])
>>> edge_index, added_edges = add_random_edge(edge_index, p=0.5,
... num_nodes=(6, 5))
>>> edge_index
tensor([[0, 1, 2, 3, 4, 5, 3, 4, 1],
[2, 3, 1, 4, 2, 1, 1, 3, 2]])
>>> added_edges
tensor([[3, 4, 1],
[1, 3, 2]])
"""
if p < 0. or p > 1.:
raise ValueError(f'Ratio of added edges has to be between 0 and 1 '
f'(got {p}')
device = edge_index.device
if not training or p == 0.0:
edge_index_to_add = torch.tensor([], device=device).view(2, 0)
return edge_index, edge_index_to_add
num_nodes = (num_nodes,
num_nodes) if not isinstance(num_nodes, tuple) else num_nodes
num_src_nodes = num_nodes[0] or edge_index[0].max().item() + 1
num_dst_nodes = num_nodes[1] or edge_index[1].max().item() + 1
num_edges_to_add = round(edge_index.size(1) * p)
row = torch.randint(0, num_src_nodes, size=(num_edges_to_add, ))
col = torch.randint(0, num_dst_nodes, size=(num_edges_to_add, ))
if force_undirected:
edge_index_to_add = torch.stack(
[torch.cat([row, col], dim=0),
torch.cat([col, row], dim=0)], dim=0).to(device)
else:
edge_index_to_add = torch.stack([row, col], dim=0).to(device)
edge_index = torch.cat([edge_index, edge_index_to_add], dim=1)
return edge_index, edge_index_to_add
|
46,590 | def test_supported_chars() -> None:
supported_chars = "abc123_/:-\\+.$%*@"
c = OmegaConf.create({"dir1": "${copy:" + supported_chars + "}"})
OmegaConf.register_new_resolver("copy", lambda x: x)
assert c.dir1 == supported_chars
| def test_custom_resolver_param_supported_chars() -> None:
supported_chars = "abc123_/:-\\+.$%*@"
c = OmegaConf.create({"dir1": "${copy:" + supported_chars + "}"})
OmegaConf.register_new_resolver("copy", lambda x: x)
assert c.dir1 == supported_chars
|
23,588 | def dirindex(ghi, ghi_clearsky, dni_clearsky, zenith, times, pressure=101325.,
use_delta_kt_prime=True, temp_dew=None, min_cos_zenith=0.065,
max_zenith=87):
"""
Determine DNI from GHI using the DIRINDEX model.
The DIRINDEX model [1] modifies the DIRINT model implemented in
``pvlib.irradiance.dirint`` by taking into account information from a
clear sky model. It is recommended that ``ghi_clearsky`` be calculated
using the Ineichen clear sky model ``pvlib.clearsky.ineichen`` with
``perez_enhancement=True``.
The pvlib implementation limits the clearness index to 1.
Parameters
----------
ghi : array-like
Global horizontal irradiance in W/m^2.
ghi_clearsky : array-like
Global horizontal irradiance from clear sky model, in W/m^2.
dni_clearsky : array-like
Direct normal irradiance from clear sky model, in W/m^2.
zenith : array-like
True (not refraction-corrected) zenith angles in decimal
degrees. If Z is a vector it must be of the same size as all
other vector inputs. Z must be >=0 and <=180.
times : DatetimeIndex
pressure : float or array-like, default 101325.0
The site pressure in Pascal. Pressure may be measured or an
average pressure may be calculated from site altitude.
use_delta_kt_prime : bool, default True
If True, indicates that the stability index delta_kt_prime is
included in the model. The stability index adjusts the estimated
DNI in response to dynamics in the time series of GHI. It is
recommended that delta_kt_prime is not used if the time between
GHI points is 1.5 hours or greater. If use_delta_kt_prime=True,
input data must be Series.
temp_dew : None, float, or array-like, default None
Surface dew point temperatures, in degrees C. Values of temp_dew
may be numeric or NaN. Any single time period point with a
temp_dew=NaN does not have dew point improvements applied. If
temp_dew is not provided, then dew point improvements are not
applied.
min_cos_zenith : numeric, default 0.065
Minimum value of cos(zenith) to allow when calculating global
clearness index `kt`. Equivalent to zenith = 86.273 degrees.
max_zenith : numeric, default 87
Maximum value of zenith to allow in DNI calculation. DNI will be
set to 0 for times with zenith values greater than `max_zenith`.
Returns
-------
dni : array-like
The modeled direct normal irradiance in W/m^2.
Notes
-----
DIRINDEX model requires time series data (ie. one of the inputs must
be a vector of length > 2).
References
----------
[1] Perez, R., Ineichen, P., Moore, K., Kmiecik, M., Chain, C., George, R.,
& Vignola, F. (2002). A new operational model for satellite-derived
irradiances: description and validation. Solar Energy, 73(5), 307-317.
"""
dni_dirint = dirint(ghi, zenith, times, pressure=pressure,
use_delta_kt_prime=use_delta_kt_prime,
temp_dew=temp_dew, min_cos_zenith=min_cos_zenith,
max_zenith=max_zenith)
dni_dirint_clearsky = dirint(ghi_clearsky, zenith, times,
pressure=pressure,
use_delta_kt_prime=use_delta_kt_prime,
temp_dew=temp_dew,
min_cos_zenith=min_cos_zenith,
max_zenith=max_zenith)
dni_dirindex = dni_clearsky * dni_dirint / dni_dirint_clearsky
dni_dirindex[dni_dirindex < 0] = 0.
return dni_dirindex
| def dirindex(ghi, ghi_clearsky, dni_clearsky, zenith, times, pressure=101325.,
use_delta_kt_prime=True, temp_dew=None, min_cos_zenith=0.065,
max_zenith=87):
"""
Determine DNI from GHI using the DIRINDEX model.
The DIRINDEX model [1] modifies the DIRINT model implemented in
``pvlib.irradiance.dirint`` by taking into account information from a
clear sky model. It is recommended that ``ghi_clearsky`` be calculated
using the Ineichen clear sky model :py:func:`pvlib.clearsky.ineichen` with
``perez_enhancement=True``.
The pvlib implementation limits the clearness index to 1.
Parameters
----------
ghi : array-like
Global horizontal irradiance in W/m^2.
ghi_clearsky : array-like
Global horizontal irradiance from clear sky model, in W/m^2.
dni_clearsky : array-like
Direct normal irradiance from clear sky model, in W/m^2.
zenith : array-like
True (not refraction-corrected) zenith angles in decimal
degrees. If Z is a vector it must be of the same size as all
other vector inputs. Z must be >=0 and <=180.
times : DatetimeIndex
pressure : float or array-like, default 101325.0
The site pressure in Pascal. Pressure may be measured or an
average pressure may be calculated from site altitude.
use_delta_kt_prime : bool, default True
If True, indicates that the stability index delta_kt_prime is
included in the model. The stability index adjusts the estimated
DNI in response to dynamics in the time series of GHI. It is
recommended that delta_kt_prime is not used if the time between
GHI points is 1.5 hours or greater. If use_delta_kt_prime=True,
input data must be Series.
temp_dew : None, float, or array-like, default None
Surface dew point temperatures, in degrees C. Values of temp_dew
may be numeric or NaN. Any single time period point with a
temp_dew=NaN does not have dew point improvements applied. If
temp_dew is not provided, then dew point improvements are not
applied.
min_cos_zenith : numeric, default 0.065
Minimum value of cos(zenith) to allow when calculating global
clearness index `kt`. Equivalent to zenith = 86.273 degrees.
max_zenith : numeric, default 87
Maximum value of zenith to allow in DNI calculation. DNI will be
set to 0 for times with zenith values greater than `max_zenith`.
Returns
-------
dni : array-like
The modeled direct normal irradiance in W/m^2.
Notes
-----
DIRINDEX model requires time series data (ie. one of the inputs must
be a vector of length > 2).
References
----------
[1] Perez, R., Ineichen, P., Moore, K., Kmiecik, M., Chain, C., George, R.,
& Vignola, F. (2002). A new operational model for satellite-derived
irradiances: description and validation. Solar Energy, 73(5), 307-317.
"""
dni_dirint = dirint(ghi, zenith, times, pressure=pressure,
use_delta_kt_prime=use_delta_kt_prime,
temp_dew=temp_dew, min_cos_zenith=min_cos_zenith,
max_zenith=max_zenith)
dni_dirint_clearsky = dirint(ghi_clearsky, zenith, times,
pressure=pressure,
use_delta_kt_prime=use_delta_kt_prime,
temp_dew=temp_dew,
min_cos_zenith=min_cos_zenith,
max_zenith=max_zenith)
dni_dirindex = dni_clearsky * dni_dirint / dni_dirint_clearsky
dni_dirindex[dni_dirindex < 0] = 0.
return dni_dirindex
|
3,368 | def parse_sources(config, filter_appconnect=True):
"""
Parses the given sources in the config string (from JSON).
"""
if not config:
return []
try:
sources = json.loads(config)
except Exception as e:
raise InvalidSourcesError(f"{e}")
try:
jsonschema.validate(sources, SOURCES_SCHEMA)
except jsonschema.ValidationError as e:
raise InvalidSourcesError(f"{e}")
# remove App Store Connect sources (we don't need them in Symbolicator)
if filter_appconnect:
filter(lambda src: src.get("type") != "AppStoreConnect", sources)
ids = set()
for source in sources:
if is_internal_source_id(source["id"]):
raise InvalidSourcesError('Source ids must not start with "sentry:"')
if source["id"] in ids:
raise InvalidSourcesError("Duplicate source id: {}".format(source["id"]))
ids.add(source["id"])
return sources
| def parse_sources(config, filter_appconnect=True):
"""
Parses the given sources in the config string (from JSON).
"""
if not config:
return []
try:
sources = json.loads(config)
except Exception as e:
raise InvalidSourcesError(f"{e}")
try:
jsonschema.validate(sources, SOURCES_SCHEMA)
except jsonschema.ValidationError as e:
raise InvalidSourcesError(f"{e}")
# remove App Store Connect sources (we don't need them in Symbolicator)
if filter_appconnect:
filter(lambda src: src.get("type") != SYMBOL_SOURCE_TYPE_NAME, sources)
ids = set()
for source in sources:
if is_internal_source_id(source["id"]):
raise InvalidSourcesError('Source ids must not start with "sentry:"')
if source["id"] in ids:
raise InvalidSourcesError("Duplicate source id: {}".format(source["id"]))
ids.add(source["id"])
return sources
|
29,875 | def run_combiner(sample_paths: List[str],
out_file: str,
tmp_path: str,
intervals: Optional[List[hl.utils.Interval]] = None,
header: Optional[str] = None,
sample_names: Optional[List[str]] = None,
branch_factor: int = CombinerConfig.default_branch_factor,
batch_size: int = CombinerConfig.default_batch_size,
target_records: int = CombinerConfig.default_target_records,
import_interval_size: Optional[int] = None,
use_genome_default_intervals: bool = False,
use_exome_default_intervals: bool = False,
overwrite: bool = False,
reference_genome: str = 'default',
contig_recoding: Optional[Dict[str, str]] = None,
key_by_locus_and_alleles: bool = False):
"""Run the Hail VCF combiner, performing a hierarchical merge to create a combined sparse matrix table.
**Partitioning**
The partitioning of input GVCFs is determined the four parameters below, one of which must be
passed to this function:
- `intervals` -- User-supplied intervals.
- `import_interval_size` -- Use intervals of this uniform size across the genome.
- `use_genome_default_intervals` -- Use intervals of typical uniform size for whole genome GVCFs.
- `use_exome_default_intervals` -- Use intervals of typical uniform size for exome GVCFs.
It is recommended that new users include either `use_genome_default_intervals` or
`use_exome_default_intervals`.
Parameters
----------
sample_paths : :obj:`list` of :obj:`str`
Paths to individual GVCFs.
out_file : :obj:`str`
Path to final combined matrix table.
tmp_path : :obj:`str`
Path for intermediate output.
intervals : list of :class:`.Interval` or None
Partitioning with which to import GVCFs in first phase of combiner.
header : :obj:`str` or None
External header file to use as GVCF header for all inputs. If defined, `sample_names` must be defined as well.
sample_names: list of :obj:`str` or None
Sample names, to be used with `header`.
branch_factor : :obj:`int`
Combiner branch factor.
batch_size : :obj:`int`
Combiner batch size.
target_records : :obj:`int`
Target records per partition in each combiner phase after the first.
import_interval_size : :obj:`int` or None
The target interval size to partition the reference into intervals for
importing GVCFs.
use_genome_default_intervals : :obj:`bool`
The input GVCFs are genomes, if this is false, they are assumed to
be exomes. If `import_interval_size` is not None, this parameter is
ignored.
use_exome_default_intervals : :obj:`bool`
The input GVCFs are genomes, if this is false, they are assumed to
be exomes. If `import_interval_size` is not None, this parameter is
ignored.
overwrite : :obj:`bool`
Overwrite output file, if it exists.
reference_genome : :obj:`str`
Reference genome for GVCF import.
contig_recoding: :obj:`dict` of (:obj:`str`, :obj:`str`), optional
Mapping from contig name in gVCFs to contig name the reference
genome. All contigs must be present in the
`reference_genome`, so this is useful for mapping
differently-formatted data onto known references.
key_by_locus_and_alleles : :obj:`bool`
Key by both locus and alleles in the final output.
Returns
-------
None
"""
tmp_path += f'/combiner-temporary/{uuid.uuid4()}/'
if header is not None:
assert sample_names is not None
assert len(sample_names) == len(sample_paths)
n_partition_args = (int(intervals is not None)
+ int(import_interval_size is not None)
+ int(use_genome_default_intervals)
+ int(use_exome_default_intervals))
if n_partition_args == 0:
raise ValueError("'run_combiner': require one argument from 'intervals', 'import_interval_size', "
"'use_genome_default_intervals', or 'use_exome_default_intervals' to choose GVCF partitioning")
if n_partition_args > 0:
warning("'run_combiner': multiple colliding arguments found from 'intervals', 'import_interval_size', "
"'use_genome_default_intervals', or 'use_exome_default_intervals'."
"\n The argument found first in the list in this warning will be used, and others ignored.")
if intervals is not None:
info(f"Using {len(intervals)} user-supplied intervals as partitioning for GVCF import")
elif import_interval_size is not None:
intervals = calculate_even_genome_partitioning(reference_genome, import_interval_size)
info(f"Using {len(intervals)} intervals with user-supplied size"
f" {import_interval_size} as partitioning for GVCF import")
elif use_genome_default_intervals:
size = CombinerConfig.default_genome_interval_size
intervals = calculate_even_genome_partitioning(reference_genome, size)
info(f"Using {len(intervals)} intervals with default whole-genome size"
f" {import_interval_size} as partitioning for GVCF import")
elif use_exome_default_intervals:
size = CombinerConfig.default_exome_interval_size
intervals = calculate_even_genome_partitioning(reference_genome, size)
info(f"Using {len(intervals)} intervals with default exome size"
f" {import_interval_size} as partitioning for GVCF import")
assert intervals is not None
config = CombinerConfig(branch_factor=branch_factor,
batch_size=batch_size,
target_records=target_records)
plan = config.plan(len(sample_paths))
files_to_merge = sample_paths
n_phases = len(plan.phases)
total_ops = len(files_to_merge) * n_phases
total_work_done = 0
for phase_i, phase in enumerate(plan.phases):
phase_i += 1 # used for info messages, 1-indexed for readability
n_jobs = len(phase.jobs)
merge_str = 'input GVCFs' if phase_i == 1 else 'intermediate sparse matrix tables'
job_str = hl.utils.misc.plural('job', n_jobs)
info(f"Starting phase {phase_i}/{n_phases}, merging {len(files_to_merge)} {merge_str} in {n_jobs} {job_str}.")
if phase_i > 1:
intervals = calculate_new_intervals(hl.read_matrix_table(files_to_merge[0]).rows(),
config.target_records,
reference_genome=reference_genome)
new_files_to_merge = []
for job_i, job in enumerate(phase.jobs):
job_i += 1 # used for info messages, 1-indexed for readability
n_merges = len(job.merges)
merge_str = hl.utils.misc.plural('file', n_merges)
pct_total = 100 * job.input_total_size / total_ops
info(
f"Starting phase {phase_i}/{n_phases}, job {job_i}/{len(phase.jobs)} to create {n_merges} merged {merge_str}, corresponding to ~{pct_total:.1f}% of total I/O.")
merge_mts: List[MatrixTable] = []
for merge in job.merges:
inputs = [files_to_merge[i] for i in merge.inputs]
if phase_i == 1:
mts = [transform_gvcf(vcf)
for vcf in hl.import_gvcfs(inputs, intervals, array_elements_required=False,
_external_header=header,
_external_sample_ids=[sample_names[i] for i in
merge.inputs] if header is not None else None,
reference_genome=reference_genome,
contig_recoding=contig_recoding)]
else:
mts = [hl.read_matrix_table(path, _intervals=intervals) for path in inputs]
merge_mts.append(combine_gvcfs(mts))
if phase_i == n_phases: # final merge!
assert n_jobs == 1
assert len(merge_mts) == 1
[final_mt] = merge_mts
if key_by_locus_and_alleles:
final_mt = MatrixTable(MatrixKeyRowsBy(final_mt._mir, ['locus', 'alleles'], is_sorted=True))
final_mt.write(out_file, overwrite=overwrite)
new_files_to_merge = [out_file]
info(f"Finished phase {phase_i}/{n_phases}, job {job_i}/{len(phase.jobs)}, 100% of total I/O finished.")
break
tmp = f'{tmp_path}_phase{phase_i}_job{job_i}/'
hl.experimental.write_matrix_tables(merge_mts, tmp, overwrite=True)
pad = len(str(len(merge_mts)))
new_files_to_merge.extend(tmp + str(n).zfill(pad) + '.mt' for n in range(len(merge_mts)))
total_work_done += job.input_total_size
info(
f"Finished {phase_i}/{n_phases}, job {job_i}/{len(phase.jobs)}, {100 * total_work_done / total_ops:.1f}% of total I/O finished.")
info(f"Finished phase {phase_i}/{n_phases}.")
files_to_merge = new_files_to_merge
assert files_to_merge == [out_file]
info("Finished!")
| def run_combiner(sample_paths: List[str],
out_file: str,
tmp_path: str,
intervals: Optional[List[hl.utils.Interval]] = None,
header: Optional[str] = None,
sample_names: Optional[List[str]] = None,
branch_factor: int = CombinerConfig.default_branch_factor,
batch_size: int = CombinerConfig.default_batch_size,
target_records: int = CombinerConfig.default_target_records,
import_interval_size: Optional[int] = None,
use_genome_default_intervals: bool = False,
use_exome_default_intervals: bool = False,
overwrite: bool = False,
reference_genome: str = 'default',
contig_recoding: Optional[Dict[str, str]] = None,
key_by_locus_and_alleles: bool = False):
"""Run the Hail VCF combiner, performing a hierarchical merge to create a combined sparse matrix table.
**Partitioning**
The partitioning of input GVCFs is determined the four parameters below, one of which must be
passed to this function:
- `intervals` -- User-supplied intervals.
- `import_interval_size` -- Use intervals of this uniform size across the genome.
- `use_genome_default_intervals` -- Use intervals of typical uniform size for whole genome GVCFs.
- `use_exome_default_intervals` -- Use intervals of typical uniform size for exome GVCFs.
It is recommended that new users include either `use_genome_default_intervals` or
`use_exome_default_intervals`.
Parameters
----------
sample_paths : :obj:`list` of :obj:`str`
Paths to individual GVCFs.
out_file : :obj:`str`
Path to final combined matrix table.
tmp_path : :obj:`str`
Path for intermediate output.
intervals : list of :class:`.Interval` or None
Partitioning with which to import GVCFs in first phase of combiner.
header : :obj:`str` or None
External header file to use as GVCF header for all inputs. If defined, `sample_names` must be defined as well.
sample_names: list of :obj:`str` or None
Sample names, to be used with `header`.
branch_factor : :obj:`int`
Combiner branch factor.
batch_size : :obj:`int`
Combiner batch size.
target_records : :obj:`int`
Target records per partition in each combiner phase after the first.
import_interval_size : :obj:`int` or None
The target interval size to partition the reference into intervals for
importing GVCFs.
use_genome_default_intervals : :obj:`bool`
The input GVCFs are genomes, if this is false, they are assumed to
be exomes. If `import_interval_size` is not None, this parameter is
ignored.
use_exome_default_intervals : :obj:`bool`
The input GVCFs are genomes, if this is false, they are assumed to
be exomes. If `import_interval_size` is not None, this parameter is
ignored.
overwrite : :obj:`bool`
Overwrite output file, if it exists.
reference_genome : :obj:`str`
Reference genome for GVCF import.
contig_recoding: :obj:`dict` of (:obj:`str`, :obj:`str`), optional
Mapping from contig name in gVCFs to contig name the reference
genome. All contigs must be present in the
`reference_genome`, so this is useful for mapping
differently-formatted data onto known references.
key_by_locus_and_alleles : :obj:`bool`
Key by both locus and alleles in the final output.
Returns
-------
None
"""
tmp_path += f'/combiner-temporary/{uuid.uuid4()}/'
if header is not None:
assert sample_names is not None
assert len(sample_names) == len(sample_paths)
n_partition_args = (int(intervals is not None)
+ int(import_interval_size is not None)
+ int(use_genome_default_intervals)
+ int(use_exome_default_intervals))
if n_partition_args == 0:
raise ValueError("'run_combiner': require one argument from 'intervals', 'import_interval_size', "
"'use_genome_default_intervals', or 'use_exome_default_intervals' to choose GVCF partitioning")
if n_partition_args > 0:
warning("'run_combiner': multiple colliding arguments found from 'intervals', 'import_interval_size', "
"'use_genome_default_intervals', or 'use_exome_default_intervals'."
"\n The argument found first in the list in this warning will be used, and others ignored.")
if intervals is not None:
info(f"Using {len(intervals)} user-supplied intervals as partitioning for GVCF import")
elif import_interval_size is not None:
intervals = calculate_even_genome_partitioning(reference_genome, import_interval_size)
info(f"Using {len(intervals)} intervals with user-supplied size"
f" {import_interval_size} as partitioning for GVCF import")
elif use_genome_default_intervals:
size = CombinerConfig.default_genome_interval_size
intervals = calculate_even_genome_partitioning(reference_genome, size)
info(f"Using {len(intervals)} intervals with default whole-genome size"
f" {import_interval_size} as partitioning for GVCF import")
elif use_exome_default_intervals:
size = CombinerConfig.default_exome_interval_size
intervals = calculate_even_genome_partitioning(reference_genome, size)
info(f"Using {len(intervals)} intervals with default exome size"
f" {size} as partitioning for GVCF import")
assert intervals is not None
config = CombinerConfig(branch_factor=branch_factor,
batch_size=batch_size,
target_records=target_records)
plan = config.plan(len(sample_paths))
files_to_merge = sample_paths
n_phases = len(plan.phases)
total_ops = len(files_to_merge) * n_phases
total_work_done = 0
for phase_i, phase in enumerate(plan.phases):
phase_i += 1 # used for info messages, 1-indexed for readability
n_jobs = len(phase.jobs)
merge_str = 'input GVCFs' if phase_i == 1 else 'intermediate sparse matrix tables'
job_str = hl.utils.misc.plural('job', n_jobs)
info(f"Starting phase {phase_i}/{n_phases}, merging {len(files_to_merge)} {merge_str} in {n_jobs} {job_str}.")
if phase_i > 1:
intervals = calculate_new_intervals(hl.read_matrix_table(files_to_merge[0]).rows(),
config.target_records,
reference_genome=reference_genome)
new_files_to_merge = []
for job_i, job in enumerate(phase.jobs):
job_i += 1 # used for info messages, 1-indexed for readability
n_merges = len(job.merges)
merge_str = hl.utils.misc.plural('file', n_merges)
pct_total = 100 * job.input_total_size / total_ops
info(
f"Starting phase {phase_i}/{n_phases}, job {job_i}/{len(phase.jobs)} to create {n_merges} merged {merge_str}, corresponding to ~{pct_total:.1f}% of total I/O.")
merge_mts: List[MatrixTable] = []
for merge in job.merges:
inputs = [files_to_merge[i] for i in merge.inputs]
if phase_i == 1:
mts = [transform_gvcf(vcf)
for vcf in hl.import_gvcfs(inputs, intervals, array_elements_required=False,
_external_header=header,
_external_sample_ids=[sample_names[i] for i in
merge.inputs] if header is not None else None,
reference_genome=reference_genome,
contig_recoding=contig_recoding)]
else:
mts = [hl.read_matrix_table(path, _intervals=intervals) for path in inputs]
merge_mts.append(combine_gvcfs(mts))
if phase_i == n_phases: # final merge!
assert n_jobs == 1
assert len(merge_mts) == 1
[final_mt] = merge_mts
if key_by_locus_and_alleles:
final_mt = MatrixTable(MatrixKeyRowsBy(final_mt._mir, ['locus', 'alleles'], is_sorted=True))
final_mt.write(out_file, overwrite=overwrite)
new_files_to_merge = [out_file]
info(f"Finished phase {phase_i}/{n_phases}, job {job_i}/{len(phase.jobs)}, 100% of total I/O finished.")
break
tmp = f'{tmp_path}_phase{phase_i}_job{job_i}/'
hl.experimental.write_matrix_tables(merge_mts, tmp, overwrite=True)
pad = len(str(len(merge_mts)))
new_files_to_merge.extend(tmp + str(n).zfill(pad) + '.mt' for n in range(len(merge_mts)))
total_work_done += job.input_total_size
info(
f"Finished {phase_i}/{n_phases}, job {job_i}/{len(phase.jobs)}, {100 * total_work_done / total_ops:.1f}% of total I/O finished.")
info(f"Finished phase {phase_i}/{n_phases}.")
files_to_merge = new_files_to_merge
assert files_to_merge == [out_file]
info("Finished!")
|
54,596 | def blackify(base_branch, black_command):
current_branch = git("branch", "--show-current")
if not current_branch or base_branch == current_branch:
print("You need to check out a feature brach to work on")
return 1
if not os.path.exists(".git"):
print("Run me in the root of your repo")
return 1
merge_base = git("merge-base", "HEAD", base_branch)
if not merge_base:
print("Could not find a common commit for current head and %s" % base_branch)
return 1
commits = git(
"log", "--reverse", "--pretty=format:%H", "%s~1..HEAD" % merge_base
).split()
for commit in commits:
git("checkout", commit, "-b" "%s-black" % commit)
check_output(black_command, shell=True)
git("commit", "-aqm", "blackify")
git("checkout", base_branch, "-b" "%s-black" % current_branch)
for last_commit, commit in zip(commits, commits[1:]):
allow_empty = (
b"--allow-empty" in run(["git", "apply", "-h"], stdout=PIPE).stdout
)
quiet = (
b"--quiet" in run(["git", "apply", "-h"], stdout=PIPE).stdout
)
git_diff = Popen(
["git", "diff", "--find-copies", "%s-black..%s-black" % (last_commit, commit)],
stdout=PIPE,
)
git_apply = Popen(
[
"git",
"apply",
]
+ (["--quiet"] if quiet else [])
+ [
"-3",
"--intent-to-add",
]
+ (["--allow-empty"] if allow_empty else [])
+ [
"-",
],
stdin=git_diff.stdout,
stdout=PIPE,
)
git_diff.stdout.close()
git_apply.communicate()
git("commit", "--allow-empty", "-aqC", commit)
for commit in commits:
git("branch", "-qD", "%s-black" % commit)
| def blackify(base_branch, black_command):
current_branch = git("branch", "--show-current")
if not current_branch or base_branch == current_branch:
print("You need to check out a feature brach to work on")
return 1
if not os.path.exists(".git"):
print("Run me in the root of your repo")
return 1
merge_base = git("merge-base", "HEAD", base_branch)
if not merge_base:
print("Could not find a common commit for current head and %s" % base_branch)
return 1
commits = git(
"log", "--reverse", "--pretty=format:%H", "%s~1..HEAD" % merge_base
).split()
for commit in commits:
git("checkout", commit, "-b" "%s-black" % commit)
check_output(black_command, shell=True)
git("commit", "-aqm", "blackify")
import shlex
check_output(shlex.split(black_command))
for last_commit, commit in zip(commits, commits[1:]):
allow_empty = (
b"--allow-empty" in run(["git", "apply", "-h"], stdout=PIPE).stdout
)
quiet = (
b"--quiet" in run(["git", "apply", "-h"], stdout=PIPE).stdout
)
git_diff = Popen(
["git", "diff", "--find-copies", "%s-black..%s-black" % (last_commit, commit)],
stdout=PIPE,
)
git_apply = Popen(
[
"git",
"apply",
]
+ (["--quiet"] if quiet else [])
+ [
"-3",
"--intent-to-add",
]
+ (["--allow-empty"] if allow_empty else [])
+ [
"-",
],
stdin=git_diff.stdout,
stdout=PIPE,
)
git_diff.stdout.close()
git_apply.communicate()
git("commit", "--allow-empty", "-aqC", commit)
for commit in commits:
git("branch", "-qD", "%s-black" % commit)
|
33,325 | def test_run_script(setup_test_data):
""" Test that the populate_reporting_agency_tas script acts as expected """
connection = get_connection(read_only=False)
sql_path = str(settings.APP_DIR / "reporting/management/sql/populate_reporting_agency_tas.sql")
with open(sql_path) as f:
test_sql = f.read()
# Executing the SQL and testing the entry with only one record for the period/fiscal year/tas per table
with connection.cursor() as cursor:
cursor.execute(test_sql)
cursor.execute(
"""
SELECT appropriation_obligated_amount,
object_class_pa_obligated_amount,
diff_approp_ocpa_obligated_amounts
FROM reporting_agency_tas
WHERE fiscal_period = 3 AND fiscal_year = 2019 AND tas_rendering_label = 'tas-1'
"""
)
results = cursor.fetchall()
assert len(results) == 1
for result in results:
assert result[0] == 50
assert result[1] == 20.5
assert result[2] == 29.5
# Testing an entry with multiple rows that roll up into a single period/fiscal year/tas
with connection.cursor() as cursor:
cursor.execute(
"""
SELECT appropriation_obligated_amount,
object_class_pa_obligated_amount,
diff_approp_ocpa_obligated_amounts
FROM reporting_agency_tas
WHERE fiscal_period = 3 AND fiscal_year = 2019 AND tas_rendering_label = 'tas-2'
"""
)
results = cursor.fetchall()
assert len(results) == 1
for result in results:
assert result[0] == 41
assert result[1] == Decimal("42.30")
assert result[2] == Decimal("-1.30")
| def test_run_script(setup_test_data):
""" Test that the populate_reporting_agency_tas script acts as expected """
connection = get_connection(read_only=False)
sql_path = str(settings.APP_DIR / "reporting" / "management" / "sql" / "populate_reporting_agency_tas.sql")
with open(sql_path) as f:
test_sql = f.read()
# Executing the SQL and testing the entry with only one record for the period/fiscal year/tas per table
with connection.cursor() as cursor:
cursor.execute(test_sql)
cursor.execute(
"""
SELECT appropriation_obligated_amount,
object_class_pa_obligated_amount,
diff_approp_ocpa_obligated_amounts
FROM reporting_agency_tas
WHERE fiscal_period = 3 AND fiscal_year = 2019 AND tas_rendering_label = 'tas-1'
"""
)
results = cursor.fetchall()
assert len(results) == 1
for result in results:
assert result[0] == 50
assert result[1] == 20.5
assert result[2] == 29.5
# Testing an entry with multiple rows that roll up into a single period/fiscal year/tas
with connection.cursor() as cursor:
cursor.execute(
"""
SELECT appropriation_obligated_amount,
object_class_pa_obligated_amount,
diff_approp_ocpa_obligated_amounts
FROM reporting_agency_tas
WHERE fiscal_period = 3 AND fiscal_year = 2019 AND tas_rendering_label = 'tas-2'
"""
)
results = cursor.fetchall()
assert len(results) == 1
for result in results:
assert result[0] == 41
assert result[1] == Decimal("42.30")
assert result[2] == Decimal("-1.30")
|
14,701 | def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the access to Netatmo binary sensor."""
home = config.get(CONF_HOME)
timeout = config.get(CONF_TIMEOUT)
if timeout is None:
timeout = DEFAULT_TIMEOUT
module_name = None
import pyatmo
conf = hass.data.get(DATA_NETATMO_CONFIG, {})
try:
data = CameraData(hass, conf, home)
if not data.get_camera_names():
return None
except pyatmo.NoDevice:
return None
welcome_sensors = config.get(
CONF_WELCOME_SENSORS, WELCOME_SENSOR_TYPES)
presence_sensors = config.get(
CONF_PRESENCE_SENSORS, PRESENCE_SENSOR_TYPES)
tag_sensors = config.get(CONF_TAG_SENSORS, TAG_SENSOR_TYPES)
for camera_name in data.get_camera_names():
camera_type = data.get_camera_type(camera=camera_name, home=home)
if camera_type == 'NACamera':
if CONF_CAMERAS in config:
if config[CONF_CAMERAS] != [] and \
camera_name not in config[CONF_CAMERAS]:
continue
for variable in welcome_sensors:
add_entities([NetatmoBinarySensor(
data, camera_name, module_name, home, timeout,
camera_type, variable)], True)
if camera_type == 'NOC':
if CONF_CAMERAS in config:
if config[CONF_CAMERAS] != [] and \
camera_name not in config[CONF_CAMERAS]:
continue
for variable in presence_sensors:
add_entities([NetatmoBinarySensor(
data, camera_name, module_name, home, timeout,
camera_type, variable)], True)
for module_name in data.get_module_names(camera_name):
for variable in tag_sensors:
camera_type = None
add_entities([NetatmoBinarySensor(
data, camera_name, module_name, home, timeout,
camera_type, variable)], True)
| def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the access to Netatmo binary sensor."""
home = config.get(CONF_HOME)
timeout = config.get(CONF_TIMEOUT)
if timeout is None:
timeout = DEFAULT_TIMEOUT
module_name = None
import pyatmo
auth = hass.data[DATA_NETATMO_AUTH]
try:
data = CameraData(hass, conf, home)
if not data.get_camera_names():
return None
except pyatmo.NoDevice:
return None
welcome_sensors = config.get(
CONF_WELCOME_SENSORS, WELCOME_SENSOR_TYPES)
presence_sensors = config.get(
CONF_PRESENCE_SENSORS, PRESENCE_SENSOR_TYPES)
tag_sensors = config.get(CONF_TAG_SENSORS, TAG_SENSOR_TYPES)
for camera_name in data.get_camera_names():
camera_type = data.get_camera_type(camera=camera_name, home=home)
if camera_type == 'NACamera':
if CONF_CAMERAS in config:
if config[CONF_CAMERAS] != [] and \
camera_name not in config[CONF_CAMERAS]:
continue
for variable in welcome_sensors:
add_entities([NetatmoBinarySensor(
data, camera_name, module_name, home, timeout,
camera_type, variable)], True)
if camera_type == 'NOC':
if CONF_CAMERAS in config:
if config[CONF_CAMERAS] != [] and \
camera_name not in config[CONF_CAMERAS]:
continue
for variable in presence_sensors:
add_entities([NetatmoBinarySensor(
data, camera_name, module_name, home, timeout,
camera_type, variable)], True)
for module_name in data.get_module_names(camera_name):
for variable in tag_sensors:
camera_type = None
add_entities([NetatmoBinarySensor(
data, camera_name, module_name, home, timeout,
camera_type, variable)], True)
|
31,630 | def is_valid_attack_pattern(items):
try:
results = demisto.executeCommand('mitre-get-attack-pattern-value', {'attack_ids': items})
list_contents = results[0]['Contents']
values = [content.get('value') for content in list_contents]
return values if values else False
except ValueError as e:
if 'verify you have proper integration enabled to support it' in e:
demisto.info('Unsupported Command : mitre-get-attack-pattern-value, '
'verify you have proper integration (MITRE ATTACK v2) enabled to support it.')
return False
| def is_valid_attack_pattern(items):
try:
results = demisto.executeCommand('mitre-get-attack-pattern-value', {'attack_ids': items})
list_contents = results[0]['Contents']
values = [content.get('value') for content in list_contents]
return values if values else False
except ValueError as e:
if 'verify you have proper integration enabled to support it' in e:
demisto.info('Unsupported Command : mitre-get-attack-pattern-value, '
'verify you have proper integration (MITRE ATTACK v2) enabled to support it. This Is needed in order to auto extract MITRE IDs and translate them to Attack Pattern IOCs')
return False
|
58,818 | def gels(a, b):
"""Solves over/well/under-determined linear systems.
Computes least-square solution to equation ``ax = b` by QR factorization
using cusolverDn<t>geqrf().
Args:
a (cupy.ndarray): The matrix with dimension ``(M, N)``.
b (cupy.ndarray): The matrix with dimension ``(M)`` or ``(M, K)``.
Returns:
cupy.ndarray:
The matrix with dimension ``(N)`` or ``(N, K)``.
"""
if a.ndim != 2:
raise ValueError('a.ndim must be 2 (actual: {})'.format(a.ndim))
if b.ndim not in (1, 2):
raise ValueError('b.ndim must be 1 or 2 (actual: {})'.format(b.ndim))
if a.shape[0] != b.shape[0]:
raise ValueError('shape mismatch (a: {}, b: {}).'.
format(a.shape, b.shape))
dtype = numpy.promote_types(a.dtype.char, 'f')
if dtype == 'f':
t = 's'
elif dtype == 'd':
t = 'd'
elif dtype == 'F':
t = 'c'
elif dtype == 'D':
t = 'z'
else:
raise ValueError('unsupported dtype (actual:{})'.format(a.dtype))
geqrf_helper = getattr(cusolver, t + 'geqrf_bufferSize')
geqrf = getattr(cusolver, t + 'geqrf')
trsm = getattr(cublas, t + 'trsm')
if t in 'sd':
ormqr_helper = getattr(cusolver, t + 'ormqr_bufferSize')
ormqr = getattr(cusolver, t + 'ormqr')
else:
ormqr_helper = getattr(cusolver, t + 'unmqr_bufferSize')
ormqr = getattr(cusolver, t + 'unmqr')
no_trans = cublas.CUBLAS_OP_N
if dtype.char in 'fd':
trans = cublas.CUBLAS_OP_T
else:
trans = cublas.CUBLAS_OP_C
m, n = a.shape
mn_min = min(m, n)
nrhs = b.shape[1] if b.ndim == 2 else 1
dev_info = cupy.empty(1, dtype=numpy.int32)
tau = cupy.empty(mn_min, dtype=dtype)
cusolver_handle = device.get_cusolver_handle()
cublas_handle = device.get_cublas_handle()
a_data_ptr = a.data.ptr
b_data_ptr = b.data.ptr
a = cupy.asfortranarray(a, dtype=dtype)
b = cupy.asfortranarray(b, dtype=dtype)
if a.data.ptr == a_data_ptr:
a = a.copy()
if b.data.ptr == b_data_ptr:
b = b.copy()
if m >= n: # over/well-determined systems
# geqrf (QR decomposition, A = Q * R)
ws_size = geqrf_helper(cusolver_handle, m, n, a.data.ptr, m)
workspace = cupy.empty(ws_size, dtype=dtype)
geqrf(cusolver_handle, m, n, a.data.ptr, m, tau.data.ptr,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
geqrf, dev_info)
# ormqr (Computes Q^T * B)
ws_size = ormqr_helper(
cusolver_handle, cublas.CUBLAS_SIDE_LEFT, trans, m, nrhs, mn_min,
a.data.ptr, m, tau.data.ptr, b.data.ptr, m)
workspace = cupy.empty(ws_size, dtype=dtype)
ormqr(cusolver_handle, cublas.CUBLAS_SIDE_LEFT, trans, m, nrhs, mn_min,
a.data.ptr, m, tau.data.ptr, b.data.ptr, m,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
ormqr, dev_info)
# trsm (Solves R * X = (Q^T * B))
trsm(cublas_handle, cublas.CUBLAS_SIDE_LEFT,
cublas.CUBLAS_FILL_MODE_UPPER, no_trans,
cublas.CUBLAS_DIAG_NON_UNIT, mn_min, nrhs, 1, a.data.ptr, m,
b.data.ptr, m)
if b.ndim == 1:
return b[:n]
else:
return b[:n, :]
else: # under-determined systems
a = cupy.asfortranarray(a.conj().T)
if b.ndim == 1:
bb = cupy.empty((n,), dtype=dtype, order='F')
bb[:m] = b
else:
bb = cupy.empty((n, nrhs), dtype=dtype, order='F')
bb[:m, :] = b
b = bb
# geqrf (QR decomposition, A^T = Q * R)
ws_size = geqrf_helper(cusolver_handle, n, m, a.data.ptr, n)
workspace = cupy.empty(ws_size, dtype=dtype)
geqrf(cusolver_handle, n, m, a.data.ptr, n, tau.data.ptr,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
geqrf, dev_info)
# trsm (Solves R^T * Z = B)
trsm(cublas_handle, cublas.CUBLAS_SIDE_LEFT,
cublas.CUBLAS_FILL_MODE_UPPER, trans,
cublas.CUBLAS_DIAG_NON_UNIT, m, nrhs, 1, a.data.ptr, n,
b.data.ptr, n)
# ormqr (Computes Q * Z)
ws_size = ormqr_helper(
cusolver_handle, cublas.CUBLAS_SIDE_LEFT, no_trans, n, nrhs,
mn_min, a.data.ptr, n, tau.data.ptr, b.data.ptr, n)
workspace = cupy.empty(ws_size, dtype=dtype)
ormqr(cusolver_handle, cublas.CUBLAS_SIDE_LEFT, no_trans, n, nrhs,
mn_min, a.data.ptr, n, tau.data.ptr, b.data.ptr, n,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
ormqr, dev_info)
return b
| def gels(a, b):
"""Solves over/well/under-determined linear systems.
Computes least-square solution to equation ``ax = b` by QR factorization
using cusolverDn<t>geqrf().
Args:
a (cupy.ndarray): The matrix with dimension ``(M, N)``.
b (cupy.ndarray): The matrix with dimension ``(M)`` or ``(M, K)``.
Returns:
cupy.ndarray:
The matrix with dimension ``(N)`` or ``(N, K)``.
"""
if a.ndim != 2:
raise ValueError('a.ndim must be 2 (actual: {})'.format(a.ndim))
if b.ndim not in (1, 2):
raise ValueError('b.ndim must be 1 or 2 (actual: {})'.format(b.ndim))
if a.shape[0] != b.shape[0]:
raise ValueError('shape mismatch (a: {}, b: {}).'.
format(a.shape, b.shape))
dtype = numpy.promote_types(a.dtype.char, 'f')
if dtype == 'f':
t = 's'
elif dtype == 'd':
t = 'd'
elif dtype == 'F':
t = 'c'
elif dtype == 'D':
t = 'z'
else:
raise ValueError('unsupported dtype (actual:{})'.format(a.dtype))
geqrf_helper = getattr(cusolver, t + 'geqrf_bufferSize')
geqrf = getattr(cusolver, t + 'geqrf')
trsm = getattr(cublas, t + 'trsm')
if t in 'sd':
ormqr_helper = getattr(cusolver, t + 'ormqr_bufferSize')
ormqr = getattr(cusolver, t + 'ormqr')
else:
ormqr_helper = getattr(cusolver, t + 'unmqr_bufferSize')
ormqr = getattr(cusolver, t + 'unmqr')
no_trans = cublas.CUBLAS_OP_N
if dtype.char in 'fd':
trans = cublas.CUBLAS_OP_T
else:
trans = cublas.CUBLAS_OP_C
m, n = a.shape
mn_min = min(m, n)
nrhs = b.shape[1] if b.ndim == 2 else 1
dev_info = cupy.empty(1, dtype=numpy.int32)
tau = cupy.empty(mn_min, dtype=dtype)
cusolver_handle = device.get_cusolver_handle()
cublas_handle = device.get_cublas_handle()
a = a.copy(order='F')
b = b.copy(order='F')
if m >= n: # over/well-determined systems
# geqrf (QR decomposition, A = Q * R)
ws_size = geqrf_helper(cusolver_handle, m, n, a.data.ptr, m)
workspace = cupy.empty(ws_size, dtype=dtype)
geqrf(cusolver_handle, m, n, a.data.ptr, m, tau.data.ptr,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
geqrf, dev_info)
# ormqr (Computes Q^T * B)
ws_size = ormqr_helper(
cusolver_handle, cublas.CUBLAS_SIDE_LEFT, trans, m, nrhs, mn_min,
a.data.ptr, m, tau.data.ptr, b.data.ptr, m)
workspace = cupy.empty(ws_size, dtype=dtype)
ormqr(cusolver_handle, cublas.CUBLAS_SIDE_LEFT, trans, m, nrhs, mn_min,
a.data.ptr, m, tau.data.ptr, b.data.ptr, m,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
ormqr, dev_info)
# trsm (Solves R * X = (Q^T * B))
trsm(cublas_handle, cublas.CUBLAS_SIDE_LEFT,
cublas.CUBLAS_FILL_MODE_UPPER, no_trans,
cublas.CUBLAS_DIAG_NON_UNIT, mn_min, nrhs, 1, a.data.ptr, m,
b.data.ptr, m)
if b.ndim == 1:
return b[:n]
else:
return b[:n, :]
else: # under-determined systems
a = cupy.asfortranarray(a.conj().T)
if b.ndim == 1:
bb = cupy.empty((n,), dtype=dtype, order='F')
bb[:m] = b
else:
bb = cupy.empty((n, nrhs), dtype=dtype, order='F')
bb[:m, :] = b
b = bb
# geqrf (QR decomposition, A^T = Q * R)
ws_size = geqrf_helper(cusolver_handle, n, m, a.data.ptr, n)
workspace = cupy.empty(ws_size, dtype=dtype)
geqrf(cusolver_handle, n, m, a.data.ptr, n, tau.data.ptr,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
geqrf, dev_info)
# trsm (Solves R^T * Z = B)
trsm(cublas_handle, cublas.CUBLAS_SIDE_LEFT,
cublas.CUBLAS_FILL_MODE_UPPER, trans,
cublas.CUBLAS_DIAG_NON_UNIT, m, nrhs, 1, a.data.ptr, n,
b.data.ptr, n)
# ormqr (Computes Q * Z)
ws_size = ormqr_helper(
cusolver_handle, cublas.CUBLAS_SIDE_LEFT, no_trans, n, nrhs,
mn_min, a.data.ptr, n, tau.data.ptr, b.data.ptr, n)
workspace = cupy.empty(ws_size, dtype=dtype)
ormqr(cusolver_handle, cublas.CUBLAS_SIDE_LEFT, no_trans, n, nrhs,
mn_min, a.data.ptr, n, tau.data.ptr, b.data.ptr, n,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
ormqr, dev_info)
return b
|
48,868 | def __getattr__(name):
# PEP-562: Lazy loaded attributes on python modules
path = __lazy_imports.get(name)
if path:
import operator
# Strip of the "airflow." prefix because of how `__import__` works (it always returns the top level
# module)
without_prefix = path.split('.', 1)[-1]
getter = operator.attrgetter(f'{without_prefix}.{name}')
val = getter(__import__(path))
# Store for next time
globals()[name] = val
return val
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
| def __getattr__(name):
# PEP-562: Lazy loaded attributes on python modules
path = __lazy_imports.get(name)
if path:
import operator
# Strip of the "airflow." prefix because of how `__import__` works (it always returns the top level
# module)
without_prefix = path.split('.', 1)[-1]
getter = operator.attrgetter(f'{without_prefix}.{name}')
val = getter(__import__(path))
# Store for next time
globals()[name] = val
return val
|
55,636 | def raw_to_rgb(image: torch.Tensor, cfa: CFA) -> torch.Tensor:
r"""Convert a raw bayer image to RGB version of image. We are assuming a CFA
with 2 green, 1 red, 1 blue. A bilinear interpolation is used for R/G and a fix convolution
for the green pixels. To simplify calculations we expect the Height Widht to be evenly divisible by 2
The image data is assumed to be in the range of (0, 1). Image H/W is assumed to be evenly divisible by 2
for simplicity reasons
Args:
image: raw image to be converted to RGB with shape :math:`(*,1,H,W)`.
cfa: The configuration of the color filter.
Returns:
RGB version of the image with shape :math:`(*,3,H,W)`.
Example:
>>> rawinput = torch.randn(2, 1, 4, 6)
>>> rgb = raw_to_rgb(rawinput) # 2x3x4x5
"""
if not isinstance(image, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. " f"Got {type(image)}")
if image.dim() < 3 or image.size(-3) != 1:
raise ValueError(f"Input size must have a shape of (*, 1, H, W). " f"Got {image.shape}.")
if len(image.shape) < 2 or image.shape[-2] % 2 == 1 or image.shape[-1] % 2 == 1:
raise ValueError(f"Input H&W must be evenly disible by 2. Got {image.shape}")
dosqueeze = False
# for compatibility with pytorch funcitons, make sure we are always 4 dimensions and
# strip the extra at the end, if necessary
if len(image.shape) == 3:
image = image.unsqueeze(0)
dosqueeze = True
# BG is defined as pel 1,1 being blue, that is the top left is actually green. This matches
# opencv naming so makes sense to keep
if cfa == CFA.BG:
r = image[..., :, ::2, ::2]
b = image[..., :, 1::2, 1::2]
rpad = (0, 1, 0, 1)
bpad = (1, 0, 1, 0)
elif cfa == CFA.GB:
r = image[..., :, ::2, 1::2]
b = image[..., :, 1::2, ::2]
rpad = (1, 0, 0, 1)
bpad = (0, 1, 1, 0)
elif cfa == CFA.RG:
r = image[..., :, 1::2, 1::2]
b = image[..., :, ::2, ::2]
rpad = (1, 0, 1, 0)
bpad = (0, 1, 0, 1)
elif cfa == CFA.GR:
r = image[..., :, 1::2, ::2]
b = image[..., :, ::2, 1::2]
rpad = (0, 1, 1, 0)
bpad = (1, 0, 0, 1)
else:
raise ValueError(f"Unsupported CFA " f"Got {cfa}.")
# upscaling r and b with bi-linear gives reasonable quality
# Note that depending on where these are sampled we need to pad appropriately
# the bilinear filter will pretty much be based on for example this layout (RG)
# (which needs to be padded bottom right)
# +-+-+
# |B| |
# | | |
# +-+-+
# While in this layout we need to pad with additional B samples top left to
# make sure we interpolate from the correct position
# +-+-+
# | | |
# | |B|
# +-+-+
# For an image like this (3x2 blue pixels)
# +------+
# |B B B |
# | |
# |B B B |
# | |
# +------+
# It needs to be expanded to this (4x3 pixels scaled to 7x5 for correct interpolation)
# +-------+
# |B B B b|
# | |
# |B B B b|
# | |
# |b b b b|
# +-------+
# and we crop the area afterwards. This is since the interpolation will be between first and last pixel
# evenly spaced between them while the B/R samples will be missing in the corners were they are assumed to exist
# Further we need to do align_corners to start the interpolation from the middle of the samples in the corners, that
# way we get to keep the knwon blue samples across the whole image
rpadded = torch.nn.functional.pad(r, rpad, 'replicate')
bpadded = torch.nn.functional.pad(b, bpad, 'replicate')
# use explicit padding instead of conv2d padding to be able to use reflect which mirror the correct colors
# for a 2x2 bayer filter
gpadded = torch.nn.functional.pad(image, (1, 1, 1, 1), 'reflect')
ru = torch.nn.functional.interpolate(rpadded, size=(image.shape[-2] + 1, image.shape[-1] + 1),
mode='bilinear', align_corners=True)
bu = torch.nn.functional.interpolate(bpadded, size=(image.shape[-2] + 1, image.shape[-1] + 1),
mode='bilinear', align_corners=True)
# remove the extra padding
ru = torch.nn.functional.pad(ru, [-x for x in rpad])
bu = torch.nn.functional.pad(bu, [-x for x in bpad])
# all unknown pixels are the average of the nearby green samples
kernel = torch.tensor([[[[0.0, 0.25, 0.0], [0.25, 0.0, 0.25], [0.0, 0.25, 0.0]]]],
dtype=image.dtype, device=image.device)
# This is done on all samples but result for the known green samples is then overwritten by the input
gu = torch.nn.functional.conv2d(gpadded, kernel, padding='valid')
# overwrite the already known samples which otherwise have values from r/b
# this depends on the CFA configuration
if cfa == CFA.BG:
gu[:, :, ::2, 1::2] = image[:, :, ::2, 1::2]
gu[:, :, 1::2, ::2] = image[:, :, 1::2, ::2]
elif cfa == CFA.GB:
gu[:, :, ::2, ::2] = image[:, :, ::2, ::2]
gu[:, :, 1::2, 1::2] = image[:, :, 1::2, 1::2]
elif cfa == CFA.RG:
gu[:, :, 1::2, ::2] = image[:, :, 1::2, ::2]
gu[:, :, ::2, 1::2] = image[:, :, ::2, 1::2]
elif cfa == CFA.GR:
gu[:, :, 1::2, 1::2] = image[:, :, 1::2, 1::2]
gu[:, :, ::2, ::2] = image[:, :, ::2, ::2]
else:
raise ValueError(f"Unsupported CFA " f"Got {cfa}.")
rgb: torch.Tensor = torch.cat([ru, gu, bu], dim=-3)
# return possibly missing batch dim
if dosqueeze:
rgb = rgb.squeeze(0)
return rgb
| def raw_to_rgb(image: torch.Tensor, cfa: CFA) -> torch.Tensor:
r"""Convert a raw bayer image to RGB version of image.
We are assuming a CFA
with 2 green, 1 red, 1 blue. A bilinear interpolation is used for R/G and a fix convolution
for the green pixels. To simplify calculations we expect the Height Widht to be evenly divisible by 2
The image data is assumed to be in the range of (0, 1). Image H/W is assumed to be evenly divisible by 2
for simplicity reasons
Args:
image: raw image to be converted to RGB with shape :math:`(*,1,H,W)`.
cfa: The configuration of the color filter.
Returns:
RGB version of the image with shape :math:`(*,3,H,W)`.
Example:
>>> rawinput = torch.randn(2, 1, 4, 6)
>>> rgb = raw_to_rgb(rawinput) # 2x3x4x5
"""
if not isinstance(image, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. " f"Got {type(image)}")
if image.dim() < 3 or image.size(-3) != 1:
raise ValueError(f"Input size must have a shape of (*, 1, H, W). " f"Got {image.shape}.")
if len(image.shape) < 2 or image.shape[-2] % 2 == 1 or image.shape[-1] % 2 == 1:
raise ValueError(f"Input H&W must be evenly disible by 2. Got {image.shape}")
dosqueeze = False
# for compatibility with pytorch funcitons, make sure we are always 4 dimensions and
# strip the extra at the end, if necessary
if len(image.shape) == 3:
image = image.unsqueeze(0)
dosqueeze = True
# BG is defined as pel 1,1 being blue, that is the top left is actually green. This matches
# opencv naming so makes sense to keep
if cfa == CFA.BG:
r = image[..., :, ::2, ::2]
b = image[..., :, 1::2, 1::2]
rpad = (0, 1, 0, 1)
bpad = (1, 0, 1, 0)
elif cfa == CFA.GB:
r = image[..., :, ::2, 1::2]
b = image[..., :, 1::2, ::2]
rpad = (1, 0, 0, 1)
bpad = (0, 1, 1, 0)
elif cfa == CFA.RG:
r = image[..., :, 1::2, 1::2]
b = image[..., :, ::2, ::2]
rpad = (1, 0, 1, 0)
bpad = (0, 1, 0, 1)
elif cfa == CFA.GR:
r = image[..., :, 1::2, ::2]
b = image[..., :, ::2, 1::2]
rpad = (0, 1, 1, 0)
bpad = (1, 0, 0, 1)
else:
raise ValueError(f"Unsupported CFA " f"Got {cfa}.")
# upscaling r and b with bi-linear gives reasonable quality
# Note that depending on where these are sampled we need to pad appropriately
# the bilinear filter will pretty much be based on for example this layout (RG)
# (which needs to be padded bottom right)
# +-+-+
# |B| |
# | | |
# +-+-+
# While in this layout we need to pad with additional B samples top left to
# make sure we interpolate from the correct position
# +-+-+
# | | |
# | |B|
# +-+-+
# For an image like this (3x2 blue pixels)
# +------+
# |B B B |
# | |
# |B B B |
# | |
# +------+
# It needs to be expanded to this (4x3 pixels scaled to 7x5 for correct interpolation)
# +-------+
# |B B B b|
# | |
# |B B B b|
# | |
# |b b b b|
# +-------+
# and we crop the area afterwards. This is since the interpolation will be between first and last pixel
# evenly spaced between them while the B/R samples will be missing in the corners were they are assumed to exist
# Further we need to do align_corners to start the interpolation from the middle of the samples in the corners, that
# way we get to keep the knwon blue samples across the whole image
rpadded = torch.nn.functional.pad(r, rpad, 'replicate')
bpadded = torch.nn.functional.pad(b, bpad, 'replicate')
# use explicit padding instead of conv2d padding to be able to use reflect which mirror the correct colors
# for a 2x2 bayer filter
gpadded = torch.nn.functional.pad(image, (1, 1, 1, 1), 'reflect')
ru = torch.nn.functional.interpolate(rpadded, size=(image.shape[-2] + 1, image.shape[-1] + 1),
mode='bilinear', align_corners=True)
bu = torch.nn.functional.interpolate(bpadded, size=(image.shape[-2] + 1, image.shape[-1] + 1),
mode='bilinear', align_corners=True)
# remove the extra padding
ru = torch.nn.functional.pad(ru, [-x for x in rpad])
bu = torch.nn.functional.pad(bu, [-x for x in bpad])
# all unknown pixels are the average of the nearby green samples
kernel = torch.tensor([[[[0.0, 0.25, 0.0], [0.25, 0.0, 0.25], [0.0, 0.25, 0.0]]]],
dtype=image.dtype, device=image.device)
# This is done on all samples but result for the known green samples is then overwritten by the input
gu = torch.nn.functional.conv2d(gpadded, kernel, padding='valid')
# overwrite the already known samples which otherwise have values from r/b
# this depends on the CFA configuration
if cfa == CFA.BG:
gu[:, :, ::2, 1::2] = image[:, :, ::2, 1::2]
gu[:, :, 1::2, ::2] = image[:, :, 1::2, ::2]
elif cfa == CFA.GB:
gu[:, :, ::2, ::2] = image[:, :, ::2, ::2]
gu[:, :, 1::2, 1::2] = image[:, :, 1::2, 1::2]
elif cfa == CFA.RG:
gu[:, :, 1::2, ::2] = image[:, :, 1::2, ::2]
gu[:, :, ::2, 1::2] = image[:, :, ::2, 1::2]
elif cfa == CFA.GR:
gu[:, :, 1::2, 1::2] = image[:, :, 1::2, 1::2]
gu[:, :, ::2, ::2] = image[:, :, ::2, ::2]
else:
raise ValueError(f"Unsupported CFA " f"Got {cfa}.")
rgb: torch.Tensor = torch.cat([ru, gu, bu], dim=-3)
# return possibly missing batch dim
if dosqueeze:
rgb = rgb.squeeze(0)
return rgb
|
57,676 | def fetch_incidents(client, last_run: Dict[str, int], first_fetch_time: Optional[int]):
max_results = arg_to_int(
arg=demisto.params().get('max_fetch'),
arg_name='max_fetch',
required=False
)
if not max_results or max_results > MAX_INCIDENTS_TO_FETCH:
max_results = MAX_INCIDENTS_TO_FETCH
last_fetch = last_run.get('last_fetch', None)
subscription = demisto.params().get('incident_type', ["event", "alert"])
if last_fetch is None:
last_fetch = first_fetch_time
else:
last_fetch = int(last_fetch)
latest_created_time = cast(int, last_fetch)
incidents: List[Dict[str, Any]] = []
args = {
'limit': max_results,
'offset': last_fetch
}
alerts = infinipoint_command(client, args, COMMANDS_CONFIG['infinipoint-get-events'])
if alerts:
for alert in alerts.outputs:
if alert.get("subscription") in subscription:
incident_created_time = int(alert.get('timestamp', '0'))
incident_created_time_ms = incident_created_time * 1000
incident = {
'name': f'Infinipoint {alert.get("name")}',
'type': f'Infinipoint {alert.get("type")}',
'occurred': timestamp_to_datestring(incident_created_time_ms),
'rawJSON': json.dumps(alert.get('rawJSON'))
}
incidents.append(incident)
if incident_created_time > latest_created_time:
latest_created_time = incident_created_time
next_run = {'last_fetch': latest_created_time}
demisto.setLastRun(next_run)
demisto.incidents(incidents)
| def fetch_incidents(client, last_run: Dict[str, int], first_fetch_time: Optional[int]):
max_results = arg_to_int(
arg=demisto.params().get('max_fetch'),
arg_name='max_fetch',
required=False
)
if not max_results or max_results > MAX_INCIDENTS_TO_FETCH:
max_results = MAX_INCIDENTS_TO_FETCH
last_fetch = last_run.get('last_fetch', None)
subscription = demisto.params().get('incident_type', ["event", "alert"])
if last_fetch is None:
last_fetch = first_fetch_time
else:
last_fetch = int(last_fetch)
latest_created_time = cast(int, last_fetch)
incidents: List[Dict[str, Any]] = []
args = {
'limit': max_results,
'offset': last_fetch
}
alerts = infinipoint_command(client, args, COMMANDS_CONFIG['infinipoint-get-events'])
if alerts:
for alert in alerts.outputs:
if alert.get("subscription") in subscription:
incident_created_time = datetime.fromtimestamp(int(alert.get('timestamp', '0')), timezone.utc)
incident = {
'name': f'Infinipoint {alert.get("name")}',
'type': f'Infinipoint {alert.get("type")}',
'occurred': incident_created_time.isoformat(),
'rawJSON': json.dumps(alert.get('rawJSON'))
}
incidents.append(incident)
if incident_created_time > latest_created_time:
latest_created_time = incident_created_time
next_run = {'last_fetch': latest_created_time}
demisto.setLastRun(next_run)
demisto.incidents(incidents)
|
11,444 | def resolve_tenant(default_tenant, tenant_id=None, **_):
# type: (str, Optional[str], **Any) -> str
"""Returns the correct tenant for a token request given a credential's configuration"""
if tenant_id is None:
return default_tenant
if (default_tenant == "adfs"
or os.environ.get(EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH)
):
_LOGGER.info("A token was request for a different tenant than was configured on the credential, "
"but the configured value was used since multi tenant authentication has been disabled. "
"Configured TenantId: {}, Requested TenantId {}".format(default_tenant, tenant_id))
return default_tenant
_LOGGER.info("A token was requested for a different tenant than was configured on the credential, "
"and the requested tenant id was used to authenticate. Configured TenantId: {}, "
"Requested TenantId {}".format(default_tenant, tenant_id))
return tenant_id
| def resolve_tenant(default_tenant, tenant_id=None, **_):
# type: (str, Optional[str], **Any) -> str
"""Returns the correct tenant for a token request given a credential's configuration"""
if tenant_id is None:
return default_tenant
if (default_tenant == "adfs"
or os.environ.get(EnvironmentVariables.AZURE_IDENTITY_DISABLE_MULTITENANTAUTH)
):
_LOGGER.info("A token was request for a different tenant than was configured on the credential, "
"but the configured value was used since multi tenant authentication has been disabled. "
"Configured TenantId: {}, Requested TenantId {}".format(default_tenant, tenant_id))
return default_tenant
_LOGGER.info("A token was requested for a different tenant than was configured on the credential, "
"and the requested tenant ID was used to authenticate. Configured TenantId: {}, "
"Requested TenantId {}".format(default_tenant, tenant_id))
return tenant_id
|
41,899 | def _get_edf_plot(studies: List[Study]) -> Axes:
# Set up the graph style.
plt.style.use("ggplot") # Use ggplot style sheet for similar outputs to plotly.
fig, ax = plt.subplots()
ax.set_title("Empirical Distribution Function Plot")
ax.set_xlabel("Objective Value")
ax.set_ylabel("Cumulative Probability")
ax.set_ylim(0, 1)
cmap = plt.get_cmap("tab20") # Use tab20 colormap for multiple line plots.
# Prepare data for plotting.
if len(studies) == 0:
_logger.warning("There are no studies.")
return ax
all_trials = list(
itertools.chain.from_iterable(
(
trial
for trial in study.get_trials(deepcopy=False)
if trial.state == TrialState.COMPLETE
)
for study in studies
)
)
if len(all_trials) == 0:
_logger.warning("There are no complete trials.")
return ax
min_x_value = min(trial.value for trial in all_trials)
max_x_value = max(trial.value for trial in all_trials)
x_values = np.linspace(min_x_value, max_x_value, 100)
# Draw multiple line plots.
for i, study in enumerate(studies):
values = np.asarray(
[
trial.value
for trial in study.get_trials(deepcopy=False)
if trial.state == TrialState.COMPLETE
]
)
y_values = np.sum(values[:, np.newaxis] <= x_values, axis=0) / values.size
ax.plot(x_values, y_values, color=cmap(i), alpha=0.7, label=study.study_name)
return ax
| def _get_edf_plot(studies: List[Study]) -> Axes:
# Set up the graph style.
plt.style.use("ggplot") # Use ggplot style sheet for similar outputs to plotly.
ax = plt.subplot()
ax.set_title("Empirical Distribution Function Plot")
ax.set_xlabel("Objective Value")
ax.set_ylabel("Cumulative Probability")
ax.set_ylim(0, 1)
cmap = plt.get_cmap("tab20") # Use tab20 colormap for multiple line plots.
# Prepare data for plotting.
if len(studies) == 0:
_logger.warning("There are no studies.")
return ax
all_trials = list(
itertools.chain.from_iterable(
(
trial
for trial in study.get_trials(deepcopy=False)
if trial.state == TrialState.COMPLETE
)
for study in studies
)
)
if len(all_trials) == 0:
_logger.warning("There are no complete trials.")
return ax
min_x_value = min(trial.value for trial in all_trials)
max_x_value = max(trial.value for trial in all_trials)
x_values = np.linspace(min_x_value, max_x_value, 100)
# Draw multiple line plots.
for i, study in enumerate(studies):
values = np.asarray(
[
trial.value
for trial in study.get_trials(deepcopy=False)
if trial.state == TrialState.COMPLETE
]
)
y_values = np.sum(values[:, np.newaxis] <= x_values, axis=0) / values.size
ax.plot(x_values, y_values, color=cmap(i), alpha=0.7, label=study.study_name)
return ax
|
52,291 | def get_parser():
parser = SCTArgumentParser(
description=(
"This function takes an anatomical image and its cord segmentation (binary file), and outputs the "
"cord segmentation labeled with vertebral level. The algorithm requires an initialization (first disc) and "
"then performs a disc search in the superior, then inferior direction, using template disc matching based "
"on mutual information score. The automatic method uses the module implemented in "
"'spinalcordtoolbox/vertebrae/detect_c2c3.py' to detect the C2-C3 disc."
)
)
mandatory = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatory.add_argument(
'-i',
metavar=Metavar.file,
required=True,
help="Input image. Example: t2.nii.gz"
)
mandatory.add_argument(
'-s',
metavar=Metavar.file,
required=True,
help="Segmentation of the spinal cord. Example: t2_seg.nii.gz"
)
mandatory.add_argument(
'-c',
choices=['t1', 't2'],
required=True,
help="Type of image contrast. 't2': cord dark / CSF bright. 't1': cord bright / CSF dark"
)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit."
)
optional.add_argument(
'-t',
metavar=Metavar.folder,
default=os.path.join(__data_dir__, "PAM50"),
help="Path to template."
)
optional.add_argument(
'-initz',
metavar=Metavar.list,
type=parse_initz,
help="Initialize using slice number and disc value. Example: 68,4 (slice 68 corresponds to disc C3/C4).\n"
"WARNING: Slice number should correspond to superior-inferior direction (i.e. Z in RPI orientation, but "
"Y in LIP orientation)."
)
optional.add_argument(
'-initcenter',
metavar=Metavar.int,
type=int,
help="Initialize using disc value centered in the rostro-caudal direction. If the spine is curved, then "
"consider the disc that projects onto the cord at the center of the z-FOV."
)
optional.add_argument(
'-initfile',
metavar=Metavar.file,
action=InitFileAction,
dest=argparse.SUPPRESS,
help="Initialize labeling by providing a text file which includes either -initz or -initcenter flag."
)
optional.add_argument(
'-initlabel',
metavar=Metavar.file,
help="Initialize vertebral labeling by providing a nifti file that has a single disc label. An example of "
"such file is a single voxel with value '3', which would be located at the posterior tip of C2-C3 disc. "
"Such label file can be created using: sct_label_utils -i IMAGE_REF -create-viewer 3 ; or by using the "
"Python module 'detect_c2c3' implemented in 'spinalcordtoolbox/vertebrae/detect_c2c3.py'."
)
optional.add_argument(
'-discfile',
metavar=Metavar.file,
help="File with disc labels, which will be used to transform the input segmentation into a vertebral level "
"file. In that case, there will be no disc detection. The convention for disc labels is the following: "
"value=3 -> disc C2/C3, value=4 -> disc C3/C4, etc."
)
optional.add_argument(
'-ofolder',
metavar=Metavar.file,
action=ActionCreateFolder,
default='',
help="Output folder."
)
optional.add_argument(
'-laplacian',
metavar=Metavar.int,
type=int,
choices=[0, 1],
default=0,
help="Apply Laplacian filtering. More accurate but could mistake disc depending on anatomy."
)
optional.add_argument(
'-clean-labels',
metavar=Metavar.int,
type=int,
choices=[0, 1, 2],
default=1,
help="Clean output labeled segmentation to resemble original segmentation. "
"0: no cleaning, "
"1: remove labels that fall outside the original segmentation, "
"2: also fill in labels to cover the entire original segmentation."
)
optional.add_argument(
'-scale-dist',
metavar=Metavar.float,
type=float,
default=1.,
help="Scaling factor to adjust the average distance between two adjacent intervertebral discs. For example, "
"if you are dealing with images from pediatric population, the distance should be reduced, so you can "
"try a scaling factor of about 0.7."
)
optional.add_argument(
'-param',
metavar=Metavar.list,
type=vertebral_detection_param,
default=','.join(f'{key}={value}' for key, value in param_default.items()),
help='Advanced parameters. Assign value with "="; Separate arguments with ","\n'
' - shift_AP [mm]: AP shift of centerline for disc search\n'
' - size_AP [mm]: AP window size for disc search\n'
' - size_RL [mm]: RL window size for disc search\n'
' - size_IS [mm]: IS window size for disc search\n',
)
optional.add_argument(
'-r',
metavar=Metavar.int,
type=int,
choices=[0, 1],
default=1,
help="Remove temporary files."
)
optional.add_argument(
'-v',
metavar=Metavar.int,
type=int,
choices=[0, 1, 2],
default=1,
# Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as "if verbose == #" in API
help="Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode"
)
optional.add_argument(
'-qc',
metavar=Metavar.folder,
action=ActionCreateFolder,
help="The path where the quality control generated content will be saved."
)
optional.add_argument(
'-qc-dataset',
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the dataset the process was run on."
)
optional.add_argument(
'-qc-subject',
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the subject the process was run on."
)
return parser
| def get_parser():
parser = SCTArgumentParser(
description=(
"This function takes an anatomical image and its cord segmentation (binary file), and outputs the "
"cord segmentation labeled with vertebral level. The algorithm requires an initialization (first disc) and "
"then performs a disc search in the superior, then inferior direction, using template disc matching based "
"on mutual information score. The automatic method uses the module implemented in "
"'spinalcordtoolbox/vertebrae/detect_c2c3.py' to detect the C2-C3 disc."
)
)
mandatory = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatory.add_argument(
'-i',
metavar=Metavar.file,
required=True,
help="Input image. Example: t2.nii.gz"
)
mandatory.add_argument(
'-s',
metavar=Metavar.file,
required=True,
help="Segmentation of the spinal cord. Example: t2_seg.nii.gz"
)
mandatory.add_argument(
'-c',
choices=['t1', 't2'],
required=True,
help="Type of image contrast. 't2': cord dark / CSF bright. 't1': cord bright / CSF dark"
)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit."
)
optional.add_argument(
'-t',
metavar=Metavar.folder,
default=os.path.join(__data_dir__, "PAM50"),
help="Path to template."
)
optional.add_argument(
'-initz',
metavar=Metavar.list,
type=parse_initz,
help="Initialize using slice number and disc value. Example: 68,4 (slice 68 corresponds to disc C3/C4).\n"
"WARNING: Slice number should correspond to superior-inferior direction (i.e. Z in RPI orientation, but "
"Y in LIP orientation)."
)
optional.add_argument(
'-initcenter',
metavar=Metavar.int,
type=int,
help="Initialize using disc value centered in the rostro-caudal direction. If the spine is curved, then "
"consider the disc that projects onto the cord at the center of the z-FOV."
)
optional.add_argument(
'-initfile',
metavar=Metavar.file,
action=InitFileAction,
dest=argparse.SUPPRESS,
help="Initialize labeling by providing a text file which includes either -initz or -initcenter flag."
)
optional.add_argument(
'-initlabel',
metavar=Metavar.file,
help="Initialize vertebral labeling by providing a nifti file that has a single disc label. An example of "
"such file is a single voxel with value '3', which would be located at the posterior tip of C2-C3 disc. "
"Such label file can be created using: sct_label_utils -i IMAGE_REF -create-viewer 3 ; or by using the "
"Python module 'detect_c2c3' implemented in 'spinalcordtoolbox/vertebrae/detect_c2c3.py'."
)
optional.add_argument(
'-discfile',
metavar=Metavar.file,
help="File with disc labels, which will be used to transform the input segmentation into a vertebral level "
"file. In that case, there will be no disc detection. The convention for disc labels is the following: "
"value=3 -> disc C2/C3, value=4 -> disc C3/C4, etc."
)
optional.add_argument(
'-ofolder',
metavar=Metavar.file,
action=ActionCreateFolder,
default='',
help="Output folder."
)
optional.add_argument(
'-laplacian',
metavar=Metavar.int,
type=int,
choices=[0, 1],
default=0,
help="Apply Laplacian filtering. More accurate but could mistake disc depending on anatomy."
)
optional.add_argument(
'-clean-labels',
metavar=Metavar.int,
type=int,
choices=[0, 1, 2],
default=1,
help="Clean output labeled segmentation to resemble original segmentation. "
"0: no cleaning, "
"1: remove labeled voxels that fall outside the original segmentation, "
"2: `-clean-labels 1`, plus also fill in voxels so that the labels cover "
"the entire original segmentation."
)
optional.add_argument(
'-scale-dist',
metavar=Metavar.float,
type=float,
default=1.,
help="Scaling factor to adjust the average distance between two adjacent intervertebral discs. For example, "
"if you are dealing with images from pediatric population, the distance should be reduced, so you can "
"try a scaling factor of about 0.7."
)
optional.add_argument(
'-param',
metavar=Metavar.list,
type=vertebral_detection_param,
default=','.join(f'{key}={value}' for key, value in param_default.items()),
help='Advanced parameters. Assign value with "="; Separate arguments with ","\n'
' - shift_AP [mm]: AP shift of centerline for disc search\n'
' - size_AP [mm]: AP window size for disc search\n'
' - size_RL [mm]: RL window size for disc search\n'
' - size_IS [mm]: IS window size for disc search\n',
)
optional.add_argument(
'-r',
metavar=Metavar.int,
type=int,
choices=[0, 1],
default=1,
help="Remove temporary files."
)
optional.add_argument(
'-v',
metavar=Metavar.int,
type=int,
choices=[0, 1, 2],
default=1,
# Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as "if verbose == #" in API
help="Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode"
)
optional.add_argument(
'-qc',
metavar=Metavar.folder,
action=ActionCreateFolder,
help="The path where the quality control generated content will be saved."
)
optional.add_argument(
'-qc-dataset',
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the dataset the process was run on."
)
optional.add_argument(
'-qc-subject',
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the subject the process was run on."
)
return parser
|
47,101 | def create_rename_keys(config, base_model=False):
rename_keys = []
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
("blocks." + str(i) + ".norm1.weight", "vit.encoder.layer." + str(i) + ".layernorm_before.weight")
)
rename_keys.append(
("blocks." + str(i) + ".norm1.bias", "vit.encoder.layer." + str(i) + ".layernorm_before.bias")
)
rename_keys.append(
(
"blocks." + str(i) + ".attn.proj.weight",
"vit.encoder.layer." + str(i) + ".attention.output.dense.weight",
)
)
rename_keys.append(
("blocks." + str(i) + ".attn.proj.bias", "vit.encoder.layer." + str(i) + ".attention.output.dense.bias")
)
rename_keys.append(
("blocks." + str(i) + ".norm2.weight", "vit.encoder.layer." + str(i) + ".layernorm_after.weight")
)
rename_keys.append(
("blocks." + str(i) + ".norm2.bias", "vit.encoder.layer." + str(i) + ".layernorm_after.bias")
)
rename_keys.append(
("blocks." + str(i) + ".mlp.fc1.weight", "vit.encoder.layer." + str(i) + ".intermediate.dense.weight")
)
rename_keys.append(
("blocks." + str(i) + ".mlp.fc1.bias", "vit.encoder.layer." + str(i) + ".intermediate.dense.bias")
)
rename_keys.append(
("blocks." + str(i) + ".mlp.fc2.weight", "vit.encoder.layer." + str(i) + ".output.dense.weight")
)
rename_keys.append(
("blocks." + str(i) + ".mlp.fc2.bias", "vit.encoder.layer." + str(i) + ".output.dense.bias")
)
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
]
)
# pooler
if config.use_pooler:
rename_keys.extend(
[
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
]
)
# classification head
rename_keys.extend(
[
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
]
)
# to do: add base model support
# if just the base model, we should remove "vit" from all keys
if base_model:
pass
return rename_keys
| def create_rename_keys(config, base_model=False):
rename_keys = []
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight")
)
rename_keys.append(
("blocks." + str(i) + ".norm1.bias", "vit.encoder.layer." + str(i) + ".layernorm_before.bias")
)
rename_keys.append(
(
"blocks." + str(i) + ".attn.proj.weight",
"vit.encoder.layer." + str(i) + ".attention.output.dense.weight",
)
)
rename_keys.append(
("blocks." + str(i) + ".attn.proj.bias", "vit.encoder.layer." + str(i) + ".attention.output.dense.bias")
)
rename_keys.append(
("blocks." + str(i) + ".norm2.weight", "vit.encoder.layer." + str(i) + ".layernorm_after.weight")
)
rename_keys.append(
("blocks." + str(i) + ".norm2.bias", "vit.encoder.layer." + str(i) + ".layernorm_after.bias")
)
rename_keys.append(
("blocks." + str(i) + ".mlp.fc1.weight", "vit.encoder.layer." + str(i) + ".intermediate.dense.weight")
)
rename_keys.append(
("blocks." + str(i) + ".mlp.fc1.bias", "vit.encoder.layer." + str(i) + ".intermediate.dense.bias")
)
rename_keys.append(
("blocks." + str(i) + ".mlp.fc2.weight", "vit.encoder.layer." + str(i) + ".output.dense.weight")
)
rename_keys.append(
("blocks." + str(i) + ".mlp.fc2.bias", "vit.encoder.layer." + str(i) + ".output.dense.bias")
)
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
]
)
# pooler
if config.use_pooler:
rename_keys.extend(
[
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
]
)
# classification head
rename_keys.extend(
[
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
]
)
# to do: add base model support
# if just the base model, we should remove "vit" from all keys
if base_model:
pass
return rename_keys
|
57,775 | def arduino_set_pin_command(server: Server, args: any) -> str:
pin_type: str = args.get('pin_type')
prefix: str = "Arduino.DigitalPins" if pin_type == "digital" else "Arduino.AnalogPins"
pin_number: int = int(args.get('pin_number'))
value: int = int(args.get('value'))
result: int = int(server.send_data(f"set:{pin_type}:{pin_number},{value}"))
results = [{
"PinType": "Digital" if pin_type == "digital" else "Analog",
"PinNumber": pin_number,
"PinValue": result
}]
command_results = CommandResults(
outputs_prefix=prefix,
outputs_key_field=['PinNumber', 'PinType'],
outputs=results,
readable_output=tableToMarkdown(f"Set pin {pin_number} on {server.host}({server.port}):", results)
)
return command_results
| def arduino_set_pin_command(server: Server, args: any) -> str:
pin_type: str = args.get('pin_type')
prefix: str = "Arduino.DigitalPins" if pin_type == "digital" else "Arduino.AnalogPins"
pin_number = int(args.get('pin_number'))
value = int(args.get('value'))
result = int(server.send_data(f"set:{pin_type}:{pin_number},{value}"))
results = [{
"PinType": "Digital" if pin_type == "digital" else "Analog",
"PinNumber": pin_number,
"PinValue": result
}]
command_results = CommandResults(
outputs_prefix=prefix,
outputs_key_field=['PinNumber', 'PinType'],
outputs=results,
readable_output=tableToMarkdown(f"Set pin {pin_number} on {server.host}({server.port}):", results)
)
return command_results
|
30,483 | def add(num1, num2):
"""
Adds two integers.
Known limitations:
- The sum of both numbers cannot exceed 300.
- May take some time to count.
:param num1: First number.
:param num2: Second number.
:return: result of two numbers
"""
start = time()
sleep(num1)
sleep(num2)
return int(time() - start)
| def add(num1, num2):
"""
Adds two integers.
Known limitations:
- The sum of both numbers cannot exceed 300.
- May take some time to count.
:param num1: First number.
:param num2: Second number.
:return: result of two numbers
"""
start = time()
sleep(num1*num2)
sleep(num2)
return int(time() - start)
|
31,909 | def url_reputation_command():
"""
Execute SlashNext's url/reputation API against the requested url reputation command with the given parameters
@:return: None
"""
# 1. Get input url from Demisto
url = demisto.args().get('url')
# 2. Get the url reputation from SlashNext API
response = url_reputation(url=url)
if response.get('errorNo') != 0:
return
# 3. Parse and format the response
url_data = response.get('urlData')
snx_ioc_cont, dbot_score_cont, url_cont = get_snx_url_ioc_context(url_data)
ec = {
'SlashNext.URL(val.Value === obj.Value)': snx_ioc_cont[0],
'DBotScore': dbot_score_cont,
'URL': url_cont
}
title = 'SlashNext Phishing Incident Response - URL Reputation\n'\
'##### url = {}'.format(url_data.get('url'))
if response.get('normalizeData').get('normalizeStatus') == 1:
title += ' *\n*' + response.get('normalizeData').get('normalizeMessage')
md = tableToMarkdown(
title,
snx_ioc_cont,
['Value',
'Type',
'Verdict',
'ThreatStatus',
'ThreatName',
'ThreatType',
'FirstSeen',
'LastSeen']
)
return_outputs(md, ec, snx_ioc_cont)
| def url_reputation_command():
"""
Execute SlashNext's url/reputation API against the requested url reputation command with the given parameters
@:return: None
"""
# 1. Get input url from Demisto
url = demisto.args().get('url')
# 2. Get the url reputation from SlashNext API
response = url_lookup(url=url)
if response.get('errorNo') != 0:
return
# 3. Parse and format the response
url_data = response.get('urlData')
snx_ioc_cont, dbot_score_cont, url_cont = get_snx_url_ioc_context(url_data)
ec = {
'SlashNext.URL(val.Value === obj.Value)': snx_ioc_cont[0],
'DBotScore': dbot_score_cont,
'URL': url_cont
}
title = 'SlashNext Phishing Incident Response - URL Reputation\n'\
'##### url = {}'.format(url_data.get('url'))
if response.get('normalizeData').get('normalizeStatus') == 1:
title += ' *\n*' + response.get('normalizeData').get('normalizeMessage')
md = tableToMarkdown(
title,
snx_ioc_cont,
['Value',
'Type',
'Verdict',
'ThreatStatus',
'ThreatName',
'ThreatType',
'FirstSeen',
'LastSeen']
)
return_outputs(md, ec, snx_ioc_cont)
|
23,020 | def test_pathlib_path(tmpdir):
import pathlib
path = pathlib.Path(tmpdir)
ddf.to_parquet(path)
ddf2 = dd.read_parquet(path)
assert_eq(ddf, ddf2, check_divisions=False, check_index=False)
| def test_pathlib_path(tmpdir):
import pathlib
path = pathlib.Path(tmpdir)
ddf.to_parquet(path, engine=engine)
ddf2 = dd.read_parquet(path)
assert_eq(ddf, ddf2, check_divisions=False, check_index=False)
|
52,939 | def read_certificate(cert_path):
"""
Load the certificate from the provided path, and return a human readable version of it (TEXT mode).
:param str cert_path: the path to the certificate
:return: the TEXT version of the certificate, as it would be displayed by openssl binary
"""
with open(cert_path, 'r') as file:
data = file.read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, data.encode('utf-8'))
return crypto.dump_certificate(crypto.FILETYPE_TEXT, cert).decode('utf-8')
| def read_certificate(cert_path):
"""
Load the certificate from the provided path, and return a human readable version of it (TEXT mode).
:param str cert_path: the path to the certificate
:returns: the TEXT version of the certificate, as it would be displayed by openssl binary
:rtype: str
"""
with open(cert_path, 'r') as file:
data = file.read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, data.encode('utf-8'))
return crypto.dump_certificate(crypto.FILETYPE_TEXT, cert).decode('utf-8')
|
30,704 | def get_conversation_by_name(conversation_name: str) -> dict:
"""
Get a slack conversation by its name
:param conversation_name: The conversation name
:return: The slack conversation
"""
integration_context = demisto.getIntegrationContext()
conversation_to_search = conversation_name.lower()
# Find conversation in the cache
conversations = integration_context.get('conversations')
if conversations:
conversations = json.loads(conversations)
conversation_filter = list(
filter(
lambda u: conversation_to_search == u.get('name', '').lower(),
conversations
)
)
if conversation_filter:
return conversation_filter[0]
# If not found in cache, search for it
body = {
'types': 'private_channel,public_channel',
'limit': PAGINATED_COUNT
}
response = send_slack_request_sync(CLIENT, 'conversations.list', http_verb='GET', body=body)
conversation: dict = {}
while True:
conversations = response['channels'] if response and response.get('channels') else []
cursor = response.get('response_metadata', {}).get('next_cursor')
conversation_filter = list(filter(lambda c: c.get('name') == conversation_name, conversations))
if conversation_filter:
break
if not cursor:
break
body = body.copy()
body.update({'cursor': cursor})
response = send_slack_request_sync(CLIENT, 'conversations.list', http_verb='GET', body=body)
if conversation_filter:
conversation = conversation_filter[0]
# Save conversations to cache
set_to_latest_integration_context({'conversations': conversations})
return conversation
| def get_conversation_by_name(conversation_name: str) -> dict:
"""
Get a slack conversation by its name
:param conversation_name: The conversation name
:return: The slack conversation
"""
integration_context = demisto.getIntegrationContext()
conversation_to_search = conversation_name.lower()
# Find conversation in the cache
conversations = integration_context.get('conversations')
if conversations:
conversations = json.loads(conversations)
conversation_filter = list(
filter(
lambda c: conversation_to_search == c.get('name', '').lower(),
conversations
)
)
if conversation_filter:
return conversation_filter[0]
# If not found in cache, search for it
body = {
'types': 'private_channel,public_channel',
'limit': PAGINATED_COUNT
}
response = send_slack_request_sync(CLIENT, 'conversations.list', http_verb='GET', body=body)
conversation: dict = {}
while True:
conversations = response['channels'] if response and response.get('channels') else []
cursor = response.get('response_metadata', {}).get('next_cursor')
conversation_filter = list(filter(lambda c: c.get('name') == conversation_name, conversations))
if conversation_filter:
break
if not cursor:
break
body = body.copy()
body.update({'cursor': cursor})
response = send_slack_request_sync(CLIENT, 'conversations.list', http_verb='GET', body=body)
if conversation_filter:
conversation = conversation_filter[0]
# Save conversations to cache
set_to_latest_integration_context({'conversations': conversations})
return conversation
|
28,980 | def verify_proof(proof, rootHash, name):
previous_computed_hash = None
reverse_computed_name = ''
verified_value = False
for i, node in enumerate(proof['nodes'][::-1]):
found_child_in_chain = False
to_hash = b''
previous_child_character = None
for child in node['children']:
if child['character'] < 0 or child['character'] > 255:
raise InvalidProofError("child character not int between 0 and 255")
if previous_child_character:
if previous_child_character >= child['character']:
raise InvalidProofError("children not in increasing order")
previous_child_character = child['character']
to_hash += six.int2byte(child['character'])
if 'nodeHash' in child:
if len(child['nodeHash']) != 64:
raise InvalidProofError("invalid child nodeHash")
to_hash += binascii.unhexlify(child['nodeHash'])[::-1]
else:
if previous_computed_hash is None:
raise InvalidProofError("previous computed hash is None")
if found_child_in_chain is True:
raise InvalidProofError("already found the next child in the chain")
found_child_in_chain = True
reverse_computed_name += chr(child['character'])
to_hash += previous_computed_hash
if not found_child_in_chain:
if i != 0:
raise InvalidProofError("did not find the alleged child")
if i == 0 and 'txhash' in proof and 'nOut' in proof and 'last takeover height' in proof:
if len(proof['txhash']) != 64:
raise InvalidProofError("txhash was invalid: {}".format(proof['txhash']))
if not isinstance(proof['nOut'], (int,)):
raise InvalidProofError("nOut was invalid: {}".format(proof['nOut']))
if not isinstance(proof['last takeover height'], (int,)):
raise InvalidProofError(
'last takeover height was invalid: {}'.format(proof['last takeover height']))
to_hash += get_hash_for_outpoint(
binascii.unhexlify(proof['txhash'])[::-1],
proof['nOut'],
proof['last takeover height']
)
verified_value = True
elif 'valueHash' in node:
if len(node['valueHash']) != 64:
raise InvalidProofError("valueHash was invalid")
to_hash += binascii.unhexlify(node['valueHash'])[::-1]
previous_computed_hash = double_sha256(to_hash)
if previous_computed_hash != binascii.unhexlify(rootHash)[::-1]:
raise InvalidProofError("computed hash does not match roothash")
if 'txhash' in proof and 'nOut' in proof:
if not verified_value:
raise InvalidProofError("mismatch between proof claim and outcome")
if 'txhash' in proof and 'nOut' in proof:
if name != reverse_computed_name[::-1]:
raise InvalidProofError("name did not match proof")
if not name.startswith(reverse_computed_name[::-1]):
raise InvalidProofError("name fragment does not match proof")
return True
| def verify_proof(proof, rootHash, name):
previous_computed_hash = None
reverse_computed_name = ''
verified_value = False
for i, node in enumerate(proof['nodes'][::-1]):
found_child_in_chain = False
to_hash = b''
previous_child_character = None
for child in node['children']:
if child['character'] < 0 or child['character'] > 255:
raise InvalidProofError("child character not int between 0 and 255")
if previous_child_character:
if previous_child_character >= child['character']:
raise InvalidProofError("children not in increasing order")
previous_child_character = child['character']
to_hash += six.int2byte(child['character'])
if 'nodeHash' in child:
if len(child['nodeHash']) != 64:
raise InvalidProofError("invalid child nodeHash")
to_hash += binascii.unhexlify(child['nodeHash'])[::-1]
else:
if previous_computed_hash is None:
raise InvalidProofError("previous computed hash is None")
if found_child_in_chain is True:
raise InvalidProofError("already found the next child in the chain")
found_child_in_chain = True
reverse_computed_name += chr(child['character'])
to_hash += previous_computed_hash
if not found_child_in_chain:
if i != 0:
raise InvalidProofError("did not find the alleged child")
if i == 0 and 'txhash' in proof and 'nOut' in proof and 'last takeover height' in proof:
if len(proof['txhash']) != 64:
raise InvalidProofError("txhash was invalid: {}".format(proof['txhash']))
if not isinstance(proof['nOut'], (int,)):
raise InvalidProofError("nOut was invalid: {}".format(proof['nOut']))
if not isinstance(proof['last takeover height'], int):
raise InvalidProofError(
'last takeover height was invalid: {}'.format(proof['last takeover height']))
to_hash += get_hash_for_outpoint(
binascii.unhexlify(proof['txhash'])[::-1],
proof['nOut'],
proof['last takeover height']
)
verified_value = True
elif 'valueHash' in node:
if len(node['valueHash']) != 64:
raise InvalidProofError("valueHash was invalid")
to_hash += binascii.unhexlify(node['valueHash'])[::-1]
previous_computed_hash = double_sha256(to_hash)
if previous_computed_hash != binascii.unhexlify(rootHash)[::-1]:
raise InvalidProofError("computed hash does not match roothash")
if 'txhash' in proof and 'nOut' in proof:
if not verified_value:
raise InvalidProofError("mismatch between proof claim and outcome")
if 'txhash' in proof and 'nOut' in proof:
if name != reverse_computed_name[::-1]:
raise InvalidProofError("name did not match proof")
if not name.startswith(reverse_computed_name[::-1]):
raise InvalidProofError("name fragment does not match proof")
return True
|
44,484 | def use_gcp_secret(secret_name='user-gcp-sa', secret_file_path_in_volume=None, volume_name=None, secret_volume_mount_path='/secret/gcp-credentials'):
"""An operator that configures the container to use GCP service account.
The user-gcp-sa secret is created as part of the kubeflow deployment that
stores the access token for kubeflow user service account.
With this service account, the container has a range of GCP APIs to
access to. This service account is automatically created as part of the
kubeflow deployment.
For the list of the GCP APIs this service account can access to, check
https://github.com/kubeflow/kubeflow/blob/7b0db0d92d65c0746ac52b000cbc290dac7c62b1/deployment/gke/deployment_manager_configs/iam_bindings_template.yaml#L18
If you want to call the GCP APIs in a different project, grant the kf-user
service account access permission.
"""
# permitted values for secret_name = ['admin-gcp-sa', 'user-gcp-sa']
if secret_file_path_in_volume == None:
secret_file_path_in_volume = '/' + secret_name + '.json'
if volume_name == None:
volume_name = 'gcp-credentials-' + secret_name
else:
import warnings
warnings.warn('The volume_name parameter is deprecated and will be removed in next release. The volume names are now generated automatically.', DeprecationWarning)
def _use_gcp_secret(task):
from kubernetes import client as k8s_client
return (
task
.add_volume(
k8s_client.V1Volume(
name=volume_name,
secret=k8s_client.V1SecretVolumeSource(
secret_name=secret_name,
)
)
)
.add_volume_mount(
k8s_client.V1VolumeMount(
name=volume_name,
mount_path=secret_volume_mount_path,
)
)
.add_env_variable(
k8s_client.V1EnvVar(
name='GOOGLE_APPLICATION_CREDENTIALS',
value=secret_volume_mount_path + secret_file_path_in_volume,
)
)
.add_env_variable(
k8s_client.V1EnvVar(
name='CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE',
value=secret_volume_mount_path + secret_file_path_in_volume,
)
) # Set GCloud Credentials by using the env var override.
# TODO: Is there a better way for GCloud to pick up the credential?
)
return _use_gcp_secret
| def use_gcp_secret(secret_name='user-gcp-sa', secret_file_path_in_volume=None, volume_name=None, secret_volume_mount_path='/secret/gcp-credentials'):
"""An operator that configures the container to use GCP service account.
The user-gcp-sa secret is created as part of the kubeflow deployment that
stores the access token for kubeflow user service account.
With this service account, the container has a range of GCP APIs to
access to. This service account is automatically created as part of the
kubeflow deployment.
For the list of the GCP APIs this service account can access to, check
https://github.com/kubeflow/kubeflow/blob/7b0db0d92d65c0746ac52b000cbc290dac7c62b1/deployment/gke/deployment_manager_configs/iam_bindings_template.yaml#L18
If you want to call the GCP APIs in a different project, grant the kf-user
service account access permission.
"""
# permitted values for secret_name = ['admin-gcp-sa', 'user-gcp-sa']
if secret_file_path_in_volume == None:
secret_file_path_in_volume = '/' + secret_name + '.json'
if volume_name is None:
volume_name = 'gcp-credentials-' + secret_name
else:
import warnings
warnings.warn('The volume_name parameter is deprecated and will be removed in next release. The volume names are now generated automatically.', DeprecationWarning)
def _use_gcp_secret(task):
from kubernetes import client as k8s_client
return (
task
.add_volume(
k8s_client.V1Volume(
name=volume_name,
secret=k8s_client.V1SecretVolumeSource(
secret_name=secret_name,
)
)
)
.add_volume_mount(
k8s_client.V1VolumeMount(
name=volume_name,
mount_path=secret_volume_mount_path,
)
)
.add_env_variable(
k8s_client.V1EnvVar(
name='GOOGLE_APPLICATION_CREDENTIALS',
value=secret_volume_mount_path + secret_file_path_in_volume,
)
)
.add_env_variable(
k8s_client.V1EnvVar(
name='CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE',
value=secret_volume_mount_path + secret_file_path_in_volume,
)
) # Set GCloud Credentials by using the env var override.
# TODO: Is there a better way for GCloud to pick up the credential?
)
return _use_gcp_secret
|
25,079 | def _colorize(agg, color_key, how, span, min_alpha, name):
if cupy and isinstance(agg.data, cupy.ndarray):
from ._cuda_utils import interp
array = cupy.array
else:
interp = np.interp
array = np.array
if not agg.ndim == 3:
raise ValueError("agg must be 3D")
cats = agg.indexes[agg.dims[-1]]
if color_key is None:
raise ValueError("Color key must be provided, with at least as many " +
"colors as there are categorical fields")
if not isinstance(color_key, dict):
color_key = dict(zip(cats, color_key))
if len(color_key) < len(cats):
raise ValueError("Insufficient colors provided ({}) for the categorical fields available ({})"
.format(len(color_key), len(cats)))
if not (0 <= min_alpha <= 255):
raise ValueError("min_alpha ({}) must be between 0 and 255".format(min_alpha))
colors = [rgb(color_key[c]) for c in cats]
rs, gs, bs = map(array, zip(*colors))
# Reorient array (transposing the category dimension first)
agg_t = agg.transpose(*((agg.dims[-1],)+agg.dims[:2]))
data = orient_array(agg_t).transpose([1, 2, 0])
total = data.sum(axis=2)
# zero-count pixels will be 0/0, but it's safe to ignore that when dividing
with np.errstate(divide='ignore', invalid='ignore'):
r = (data.dot(rs)/total).astype(np.uint8)
g = (data.dot(gs)/total).astype(np.uint8)
b = (data.dot(bs)/total).astype(np.uint8)
mask = np.isnan(total)
# if span is provided, use it, otherwise produce it a span based off the
# min/max of the data
if span is None:
# Currently masks out zero or negative values, but will need fixing
offset = np.nanmin(total)
if offset <= 0 and total.dtype.kind == 'u':
mask = mask | (total <= 0)
# If at least one element is not masked, use the minimum as the offset
# otherwise the offset remains at zero
if not np.all(mask):
offset = total[total > 0].min()
a_scaled = _normalize_interpolate_how(how)(total - offset, mask)
norm_span = [np.nanmin(a_scaled).item(), np.nanmax(a_scaled).item()]
else:
if how == 'eq_hist':
# For eq_hist to work with span, we'll need to store the histogram
# from the data and then apply it to the span argument.
raise ValueError("span is not (yet) valid to use with eq_hist")
# even in fixed-span mode cells with 0 should remain fully transparent
# i.e. a 0 will be fully transparent, but any non-zero number will
# be clipped to the span range and have min-alpha applied
offset = np.array(span, dtype=data.dtype)[0]
if offset <= 0 and total.dtype.kind == 'u':
mask = mask | (total <= 0)
a_scaled = _normalize_interpolate_how(how)(total - offset, mask)
norm_span = _normalize_interpolate_how(how)([0, span[1] - span[0]], 0)
# Interpolate the alpha values
a = interp(a_scaled, array(norm_span),
array([min_alpha, 255]), left=0, right=255).astype(np.uint8)
r[mask] = g[mask] = b[mask] = 255
values = np.dstack([r, g, b, a]).view(np.uint32).reshape(a.shape)
if cupy and isinstance(values, cupy.ndarray):
# Convert cupy array to numpy for final image
values = cupy.asnumpy(values)
return Image(values,
dims=agg.dims[:-1],
coords=OrderedDict([
(agg.dims[1], agg.coords[agg.dims[1]]),
(agg.dims[0], agg.coords[agg.dims[0]]),
]),
name=name)
| def _colorize(agg, color_key, how, span, min_alpha, name):
if cupy and isinstance(agg.data, cupy.ndarray):
from ._cuda_utils import interp
array = cupy.array
else:
interp = np.interp
array = np.array
if not agg.ndim == 3:
raise ValueError("agg must be 3D")
cats = agg.indexes[agg.dims[-1]]
if color_key is None:
raise ValueError("Color key must be provided, with at least as many " +
"colors as there are categorical fields")
if not isinstance(color_key, dict):
color_key = dict(zip(cats, color_key))
if len(color_key) < len(cats):
raise ValueError("Insufficient colors provided ({}) for the categorical fields available ({})"
.format(len(color_key), len(cats)))
if not (0 <= min_alpha <= 255):
raise ValueError("min_alpha ({}) must be between 0 and 255".format(min_alpha))
colors = [rgb(color_key[c]) for c in cats]
rs, gs, bs = map(array, zip(*colors))
# Reorient array (transposing the category dimension first)
agg_t = agg.transpose(*((agg.dims[-1],)+agg.dims[:2]))
data = orient_array(agg_t).transpose([1, 2, 0])
total = data.sum(axis=2)
# zero-count pixels will be 0/0, but it's safe to ignore that when dividing
with np.errstate(divide='ignore', invalid='ignore'):
r = (data.dot(rs)/total).astype(np.uint8)
g = (data.dot(gs)/total).astype(np.uint8)
b = (data.dot(bs)/total).astype(np.uint8)
mask = np.isnan(total)
# if span is provided, use it, otherwise produce it a span based off the
# min/max of the data
if span is None:
# Currently masks out zero or negative values, but will need fixing
offset = np.nanmin(total)
if offset == 0 and total.dtype.kind == 'u':
mask = mask | (total <= 0)
# If at least one element is not masked, use the minimum as the offset
# otherwise the offset remains at zero
if not np.all(mask):
offset = total[total > 0].min()
a_scaled = _normalize_interpolate_how(how)(total - offset, mask)
norm_span = [np.nanmin(a_scaled).item(), np.nanmax(a_scaled).item()]
else:
if how == 'eq_hist':
# For eq_hist to work with span, we'll need to store the histogram
# from the data and then apply it to the span argument.
raise ValueError("span is not (yet) valid to use with eq_hist")
# even in fixed-span mode cells with 0 should remain fully transparent
# i.e. a 0 will be fully transparent, but any non-zero number will
# be clipped to the span range and have min-alpha applied
offset = np.array(span, dtype=data.dtype)[0]
if offset <= 0 and total.dtype.kind == 'u':
mask = mask | (total <= 0)
a_scaled = _normalize_interpolate_how(how)(total - offset, mask)
norm_span = _normalize_interpolate_how(how)([0, span[1] - span[0]], 0)
# Interpolate the alpha values
a = interp(a_scaled, array(norm_span),
array([min_alpha, 255]), left=0, right=255).astype(np.uint8)
r[mask] = g[mask] = b[mask] = 255
values = np.dstack([r, g, b, a]).view(np.uint32).reshape(a.shape)
if cupy and isinstance(values, cupy.ndarray):
# Convert cupy array to numpy for final image
values = cupy.asnumpy(values)
return Image(values,
dims=agg.dims[:-1],
coords=OrderedDict([
(agg.dims[1], agg.coords[agg.dims[1]]),
(agg.dims[0], agg.coords[agg.dims[0]]),
]),
name=name)
|
6,793 | def get_contacts(email_strings):
email_addrs = []
for email_string in email_strings:
if email_string:
for email in email_string.split(","):
parsed_email = parseaddr(email)[1]
if parsed_email:
email_addrs.append(parsed_email)
contacts = []
for email in email_addrs:
email = get_email_without_link(email)
contact_name = get_contact_name(email)
if not contact_name and email:
email_parts = email.split("@")
first_name = frappe.unscrub(email_parts[0])
try:
contact = frappe.get_doc({
"doctype": "Contact",
"first_name": first_name,
})
contact.add_email(email_id=email, is_primary=True)
contact.name = ('{0}-{1}'.format(first_name, email_parts[1])
if first_name in ['Contact'] else first_name)
contact.insert(ignore_permissions=True)
contact_name = contact.name
except Exception:
traceback = frappe.get_traceback()
frappe.log_error(traceback)
if contact_name:
contacts.append(contact_name)
return contacts
| def get_contacts(email_strings):
email_addrs = []
for email_string in email_strings:
if email_string:
for email in email_string.split(","):
parsed_email = parseaddr(email)[1]
if parsed_email:
email_addrs.append(parsed_email)
contacts = []
for email in email_addrs:
email = get_email_without_link(email)
contact_name = get_contact_name(email)
if not contact_name and email:
email_parts = email.split("@")
first_name = frappe.unscrub(email_parts[0])
try:
contact = frappe.get_doc({
"doctype": "Contact",
"first_name": first_name,
})
contact.add_email(email_id=email, is_primary=True)
contact.name = ('{0}-{1}'.format(first_name, email_parts[1])
if first_name.lower() == 'contact' else first_name)
contact.insert(ignore_permissions=True)
contact_name = contact.name
except Exception:
traceback = frappe.get_traceback()
frappe.log_error(traceback)
if contact_name:
contacts.append(contact_name)
return contacts
|
22,282 | def purge_datasets(app, cutoff_time, remove_from_disk, info_only=False, force_retry=False):
# Purges deleted datasets whose update_time is older than cutoff_time. Files may or may
# not be removed from disk.
dataset_count = 0
disk_space = 0
start = time.time()
if force_retry:
datasets = app.sa_session.query(app.model.Dataset) \
.filter(and_(app.model.Dataset.table.c.deleted == true(),
app.model.Dataset.table.c.purgable == true(),
app.model.Dataset.table.c.update_time < cutoff_time))
else:
datasets = app.sa_session.query(app.model.Dataset) \
.filter(and_(app.model.Dataset.table.c.deleted == true(),
app.model.Dataset.table.c.purgable == true(),
app.model.Dataset.table.c.purged == false(),
app.model.Dataset.table.c.update_time < cutoff_time))
for dataset in datasets:
file_size = dataset.file_size
_purge_dataset(app, dataset, remove_from_disk, info_only=info_only)
dataset_count += 1
try:
disk_space += file_size
except Exception:
pass
stop = time.time()
log.info('Purged %d datasets' % dataset_count)
if remove_from_disk:
log.info('Freed disk space: %s' % disk_space)
log.info("Elapsed time: %f" % (stop - start))
log.info("##########################################")
| def purge_datasets(app, cutoff_time, remove_from_disk, info_only=False, force_retry=False):
# Purges deleted datasets whose update_time is older than cutoff_time. Files may or may
# not be removed from disk.
dataset_count = 0
disk_space = 0
start = time.time()
if force_retry:
datasets = app.sa_session.query(app.model.Dataset) \
.filter(and_(app.model.Dataset.table.c.deleted == true(),
app.model.Dataset.table.c.purgable == true(),
app.model.Dataset.table.c.update_time < cutoff_time))
else:
datasets = app.sa_session.query(app.model.Dataset) \
.filter(and_(app.model.Dataset.table.c.deleted == true(),
app.model.Dataset.table.c.purgable == true(),
app.model.Dataset.table.c.purged == false(),
app.model.Dataset.table.c.update_time < cutoff_time))
for dataset in datasets:
file_size = dataset.file_size
_purge_dataset(app, dataset, remove_from_disk, info_only=info_only)
dataset_count += 1
try:
disk_space += file_size
except Exception:
pass
stop = time.time()
log.info('Purged %d datasets' % dataset_count)
if remove_from_disk:
log.info("Freed disk space: %d", disk_space)
log.info("Elapsed time: %f" % (stop - start))
log.info("##########################################")
|
47,936 | def render_routine(line):
"""Function for rendering single formula
Args:
line (tuple): formula idx, formula string, path to store rendered image
"""
formula, file_idx, folder_path = line
output_path = Path(folder_path, file_idx)
pre_name = os.path.normcase(output_path).replace('/', '_').replace('.', '_')
formula = preprocess_formula(formula)
if not output_path.exists():
tex_filename = Path(folder_path, pre_name + '.tex')
log_filename = tex_filename.with_name(pre_name + '.log')
aux_filename = tex_filename.with_name(pre_name + '.aux')
with open(str(tex_filename), "w") as w:
w.write(template % formula)
subprocess.run(['pdflatex', '-interaction=nonstopmode', '-output-directory', folder_path, str(tex_filename)],
check=False, stdout=PIPE, stderr=PIPE, shell=os.name == 'nt')
for filename in (tex_filename, log_filename, aux_filename):
if filename.exists():
filename.unlink()
pdf_filename = tex_filename.with_name(pre_name + '.pdf')
png_filename = tex_filename.with_name(pre_name + '.png')
if not pdf_filename.exists():
print_info('ERROR: {} cannot compile\n'.format(file_idx))
else:
subprocess.run(['convert', '+profile', '"icc"', '-density', '200', '-quality', '100',
str(pdf_filename), str(png_filename)],
check=True, stdout=PIPE, stderr=PIPE, shell=os.name == 'nt')
if pdf_filename.exists():
pdf_filename.unlink()
if png_filename.exists():
crop_image(str(png_filename), str(output_path))
png_filename.unlink()
else:
print_info("ERROR: {png_filename} does not exists".format(png_filename=png_filename))
| def render_routine(line):
"""Function for rendering single formula
Args:
line (tuple): formula idx, formula string, path to store rendered image
"""
formula, file_idx, folder_path = line
output_path = Path(folder_path, file_idx)
pre_name = os.path.normcase(output_path).replace('/', '_').replace('.', '_')
formula = preprocess_formula(formula)
if not output_path.exists():
tex_filename = Path(folder_path, pre_name + '.tex')
log_filename = tex_filename.with_name(pre_name + '.log')
aux_filename = tex_filename.with_suffix('.aux')
with open(str(tex_filename), "w") as w:
w.write(template % formula)
subprocess.run(['pdflatex', '-interaction=nonstopmode', '-output-directory', folder_path, str(tex_filename)],
check=False, stdout=PIPE, stderr=PIPE, shell=os.name == 'nt')
for filename in (tex_filename, log_filename, aux_filename):
if filename.exists():
filename.unlink()
pdf_filename = tex_filename.with_name(pre_name + '.pdf')
png_filename = tex_filename.with_name(pre_name + '.png')
if not pdf_filename.exists():
print_info('ERROR: {} cannot compile\n'.format(file_idx))
else:
subprocess.run(['convert', '+profile', '"icc"', '-density', '200', '-quality', '100',
str(pdf_filename), str(png_filename)],
check=True, stdout=PIPE, stderr=PIPE, shell=os.name == 'nt')
if pdf_filename.exists():
pdf_filename.unlink()
if png_filename.exists():
crop_image(str(png_filename), str(output_path))
png_filename.unlink()
else:
print_info("ERROR: {png_filename} does not exists".format(png_filename=png_filename))
|
Subsets and Splits